Repository: terrapower/armi
Branch: main
Commit: 65813317319d
Files: 703
Total size: 6.8 MB
Directory structure:
gitextract_528z7ijz/
├── .github/
│ ├── .codecov.yml
│ ├── pull_request_template.md
│ └── workflows/
│ ├── coverage.yaml
│ ├── docs.yaml
│ ├── find_test_crumbs.py
│ ├── licensechecker.yaml
│ ├── linting.yaml
│ ├── mac_tests.yaml
│ ├── stale.yaml
│ ├── unittests.yaml
│ ├── validatemanifest.py
│ ├── validatemanifest.yaml
│ ├── wheels.yaml
│ └── wintests.yaml
├── .gitignore
├── .gitmodules
├── .licenserc.json
├── AUTHORS
├── CONTRIBUTING.md
├── LICENSE.md
├── README.rst
├── armi/
│ ├── __init__.py
│ ├── __main__.py
│ ├── _bootstrap.py
│ ├── apps.py
│ ├── bookkeeping/
│ │ ├── __init__.py
│ │ ├── db/
│ │ │ ├── __init__.py
│ │ │ ├── compareDB3.py
│ │ │ ├── database.py
│ │ │ ├── databaseInterface.py
│ │ │ ├── factory.py
│ │ │ ├── jaggedArray.py
│ │ │ ├── layout.py
│ │ │ ├── passiveDBLoadPlugin.py
│ │ │ ├── permissions.py
│ │ │ ├── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_comparedb3.py
│ │ │ │ ├── test_database.py
│ │ │ │ ├── test_databaseInterface.py
│ │ │ │ ├── test_jaggedArray.py
│ │ │ │ ├── test_layout.py
│ │ │ │ └── test_passiveDBLoadPlugin.py
│ │ │ └── typedefs.py
│ │ ├── historyTracker.py
│ │ ├── mainInterface.py
│ │ ├── memoryProfiler.py
│ │ ├── report/
│ │ │ ├── __init__.py
│ │ │ ├── data.py
│ │ │ ├── reportInterface.py
│ │ │ ├── reportingUtils.py
│ │ │ └── tests/
│ │ │ ├── __init__.py
│ │ │ └── test_report.py
│ │ ├── snapshotInterface.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── _constants.py
│ │ │ ├── test_historyTracker.py
│ │ │ ├── test_memoryProfiler.py
│ │ │ └── test_snapshot.py
│ │ └── visualization/
│ │ ├── __init__.py
│ │ ├── dumper.py
│ │ ├── entryPoint.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── test_vis.py
│ │ │ └── test_xdmf.py
│ │ ├── utils.py
│ │ ├── vtk.py
│ │ └── xdmf.py
│ ├── cases/
│ │ ├── __init__.py
│ │ ├── case.py
│ │ ├── inputModifiers/
│ │ │ ├── __init__.py
│ │ │ ├── inputModifiers.py
│ │ │ ├── neutronicsModifiers.py
│ │ │ ├── pinTypeInputModifiers.py
│ │ │ └── tests/
│ │ │ ├── __init__.py
│ │ │ ├── test_inputModifiers.py
│ │ │ └── test_pinTypeInputModifiers.py
│ │ ├── suite.py
│ │ ├── suiteBuilder.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── test_cases.py
│ │ └── test_suiteBuilder.py
│ ├── cli/
│ │ ├── __init__.py
│ │ ├── checkInputs.py
│ │ ├── cleanTemps.py
│ │ ├── clone.py
│ │ ├── compareCases.py
│ │ ├── database.py
│ │ ├── entryPoint.py
│ │ ├── gridGui.py
│ │ ├── migrateInputs.py
│ │ ├── modify.py
│ │ ├── reportsEntryPoint.py
│ │ ├── run.py
│ │ ├── runSuite.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── test_runEntryPoint.py
│ │ └── test_runSuite.py
│ ├── conftest.py
│ ├── context.py
│ ├── interfaces.py
│ ├── matProps/
│ │ ├── __init__.py
│ │ ├── constituent.py
│ │ ├── function.py
│ │ ├── interpolationFunctions.py
│ │ ├── material.py
│ │ ├── materialType.py
│ │ ├── piecewiseFunction.py
│ │ ├── point.py
│ │ ├── prop.py
│ │ ├── reference.py
│ │ ├── symbolicFunction.py
│ │ ├── tableFunction.py
│ │ ├── tableFunction1D.py
│ │ ├── tableFunction2D.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── invalidTestFiles/
│ │ │ ├── badFileFormat.YAML
│ │ │ ├── badProperty.yaml
│ │ │ └── duplicateComposition.yaml
│ │ ├── testDir1/
│ │ │ ├── a.yaml
│ │ │ └── b.yaml
│ │ ├── testDir2/
│ │ │ ├── c.yml
│ │ │ └── d.yaml
│ │ ├── testDir3/
│ │ │ ├── a.yaml
│ │ │ └── e.yaml
│ │ ├── testDir4/
│ │ │ └── sampleProperty.yaml
│ │ ├── testMaterialsData/
│ │ │ ├── materialA.yaml
│ │ │ ├── materialB.yaml
│ │ │ └── materialsSubDir/
│ │ │ ├── materialC.yaml
│ │ │ └── materialD.yaml
│ │ ├── test_1DSymbolicFunction.py
│ │ ├── test_composition.py
│ │ ├── test_constituent.py
│ │ ├── test_functions.py
│ │ ├── test_hashing.py
│ │ ├── test_interpolationFunctions.py
│ │ ├── test_material.py
│ │ ├── test_materialType.py
│ │ ├── test_parsing.py
│ │ ├── test_performance.py
│ │ ├── test_piecewiseFunction.py
│ │ ├── test_point.py
│ │ ├── test_property.py
│ │ ├── test_references.py
│ │ ├── test_symbolicFunction.py
│ │ └── test_tableFunctions.py
│ ├── materials/
│ │ ├── __init__.py
│ │ ├── air.py
│ │ ├── alloy200.py
│ │ ├── b4c.py
│ │ ├── be9.py
│ │ ├── caH2.py
│ │ ├── californium.py
│ │ ├── concrete.py
│ │ ├── copper.py
│ │ ├── cs.py
│ │ ├── custom.py
│ │ ├── graphite.py
│ │ ├── hafnium.py
│ │ ├── hastelloyN.py
│ │ ├── ht9.py
│ │ ├── inconel.py
│ │ ├── inconel600.py
│ │ ├── inconel625.py
│ │ ├── inconel800.py
│ │ ├── inconelPE16.py
│ │ ├── inconelX750.py
│ │ ├── lead.py
│ │ ├── leadBismuth.py
│ │ ├── lithium.py
│ │ ├── magnesium.py
│ │ ├── material.py
│ │ ├── mgO.py
│ │ ├── mixture.py
│ │ ├── molybdenum.py
│ │ ├── mox.py
│ │ ├── nZ.py
│ │ ├── potassium.py
│ │ ├── scandiumOxide.py
│ │ ├── siC.py
│ │ ├── sodium.py
│ │ ├── sodiumChloride.py
│ │ ├── sulfur.py
│ │ ├── tZM.py
│ │ ├── tantalum.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── test__init__.py
│ │ │ ├── test_air.py
│ │ │ ├── test_b4c.py
│ │ │ ├── test_be9.py
│ │ │ ├── test_fluids.py
│ │ │ ├── test_graphite.py
│ │ │ ├── test_lithium.py
│ │ │ ├── test_materials.py
│ │ │ ├── test_sic.py
│ │ │ ├── test_sulfur.py
│ │ │ ├── test_thoriumOxide.py
│ │ │ ├── test_uZr.py
│ │ │ └── test_water.py
│ │ ├── thU.py
│ │ ├── thorium.py
│ │ ├── thoriumOxide.py
│ │ ├── uZr.py
│ │ ├── uranium.py
│ │ ├── uraniumOxide.py
│ │ ├── void.py
│ │ ├── water.py
│ │ ├── yttriumOxide.py
│ │ ├── zincOxide.py
│ │ └── zr.py
│ ├── meta.py
│ ├── migration/
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── m0_1_3.py
│ │ ├── m0_1_6.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── test_m0_1_6.py
│ │ └── test_migration_base.py
│ ├── mpiActions.py
│ ├── nucDirectory/
│ │ ├── __init__.py
│ │ ├── elements.py
│ │ ├── nucDir.py
│ │ ├── nuclideBases.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── test_elements.py
│ │ │ ├── test_nucDirectory.py
│ │ │ ├── test_nuclideBases.py
│ │ │ ├── test_thermalScattering.py
│ │ │ └── test_transmutations.py
│ │ ├── thermalScattering.py
│ │ └── transmutations.py
│ ├── nuclearDataIO/
│ │ ├── __init__.py
│ │ ├── cccc/
│ │ │ ├── __init__.py
│ │ │ ├── cccc.py
│ │ │ ├── compxs.py
│ │ │ ├── dif3d.py
│ │ │ ├── fixsrc.py
│ │ │ ├── gamiso.py
│ │ │ ├── geodst.py
│ │ │ ├── isotxs.py
│ │ │ ├── labels.py
│ │ │ ├── nhflux.py
│ │ │ ├── pmatrx.py
│ │ │ ├── pwdint.py
│ │ │ ├── rtflux.py
│ │ │ ├── rzflux.py
│ │ │ └── tests/
│ │ │ ├── __init__.py
│ │ │ ├── fixtures/
│ │ │ │ ├── labels.binary
│ │ │ │ ├── simple_cartesian.pwdint
│ │ │ │ ├── simple_cartesian.rtflux
│ │ │ │ ├── simple_cartesian.rzflux
│ │ │ │ ├── simple_hexz.dif3d
│ │ │ │ ├── simple_hexz.geodst
│ │ │ │ ├── simple_hexz.nhflux
│ │ │ │ └── simple_hexz.nhflux.variant
│ │ │ ├── test_cccc.py
│ │ │ ├── test_compxs.py
│ │ │ ├── test_dif3d.py
│ │ │ ├── test_fixsrc.py
│ │ │ ├── test_gamiso.py
│ │ │ ├── test_geodst.py
│ │ │ ├── test_isotxs.py
│ │ │ ├── test_labels.py
│ │ │ ├── test_nhflux.py
│ │ │ ├── test_pmatrx.py
│ │ │ ├── test_pwdint.py
│ │ │ ├── test_rtflux.py
│ │ │ └── test_rzflux.py
│ │ ├── nuclearFileMetadata.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── fixtures/
│ │ │ │ ├── AA.gamiso
│ │ │ │ ├── AA.pmatrx
│ │ │ │ ├── AB.gamiso
│ │ │ │ ├── AB.pmatrx
│ │ │ │ ├── ISOAA
│ │ │ │ ├── ISOAB
│ │ │ │ ├── combined-AA-AB.gamiso
│ │ │ │ ├── combined-AA-AB.isotxs
│ │ │ │ ├── combined-AA-AB.pmatrx
│ │ │ │ ├── combined-and-lumped-AA-AB.gamiso
│ │ │ │ ├── combined-and-lumped-AA-AB.isotxs
│ │ │ │ ├── combined-and-lumped-AA-AB.pmatrx
│ │ │ │ ├── mc2v3-AA.gamiso
│ │ │ │ ├── mc2v3-AA.isotxs
│ │ │ │ ├── mc2v3-AA.pmatrx
│ │ │ │ ├── mc2v3-AB.gamiso
│ │ │ │ ├── mc2v3-AB.isotxs
│ │ │ │ └── mc2v3-AB.pmatrx
│ │ │ ├── library-file-generation/
│ │ │ │ ├── combine-AA-AB.inp
│ │ │ │ ├── combine-and-lump-AA-AB.inp
│ │ │ │ ├── mc2v3-AA.inp
│ │ │ │ └── mc2v3-AB.inp
│ │ │ ├── simple_hexz.inp
│ │ │ ├── test_xsCollections.py
│ │ │ ├── test_xsLibraries.py
│ │ │ └── test_xsNuclides.py
│ │ ├── xsCollections.py
│ │ ├── xsLibraries.py
│ │ └── xsNuclides.py
│ ├── operators/
│ │ ├── __init__.py
│ │ ├── operator.py
│ │ ├── operatorMPI.py
│ │ ├── runTypes.py
│ │ ├── snapshots.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── test_operatorSnapshots.py
│ │ └── test_operators.py
│ ├── physics/
│ │ ├── __init__.py
│ │ ├── constants.py
│ │ ├── executers.py
│ │ ├── fuelCycle/
│ │ │ ├── __init__.py
│ │ │ ├── assemblyRotationAlgorithms.py
│ │ │ ├── fuelHandlerFactory.py
│ │ │ ├── fuelHandlerInterface.py
│ │ │ ├── fuelHandlers.py
│ │ │ ├── hexAssemblyFuelMgmtUtils.py
│ │ │ ├── settings.py
│ │ │ ├── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _customFuelHandlerModule.py
│ │ │ │ ├── test_assemblyRotationAlgorithms.py
│ │ │ │ ├── test_fuelHandlerFactory.py
│ │ │ │ ├── test_fuelHandlers.py
│ │ │ │ ├── test_hexAssemblyFuelMgmtUtils.py
│ │ │ │ └── test_utils.py
│ │ │ └── utils.py
│ │ ├── fuelPerformance/
│ │ │ ├── __init__.py
│ │ │ ├── executers.py
│ │ │ ├── parameters.py
│ │ │ ├── plugin.py
│ │ │ ├── settings.py
│ │ │ ├── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_executers.py
│ │ │ │ ├── test_fuelPerformancePlugin.py
│ │ │ │ ├── test_fuelPerformanceSymmetry.py
│ │ │ │ └── test_fuelPerformanceUtils.py
│ │ │ └── utils.py
│ │ ├── neutronics/
│ │ │ ├── __init__.py
│ │ │ ├── const.py
│ │ │ ├── crossSectionGroupManager.py
│ │ │ ├── crossSectionSettings.py
│ │ │ ├── diffIsotxs.py
│ │ │ ├── energyGroups.py
│ │ │ ├── fissionProductModel/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fissionProductModel.py
│ │ │ │ ├── fissionProductModelSettings.py
│ │ │ │ ├── lumpedFissionProduct.py
│ │ │ │ └── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_fissionProductModel.py
│ │ │ │ └── test_lumpedFissionProduct.py
│ │ │ ├── globalFlux/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── globalFluxInterface.py
│ │ │ │ └── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ └── test_globalFluxInterface.py
│ │ │ ├── isotopicDepletion/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── crossSectionTable.py
│ │ │ │ └── isotopicDepletionInterface.py
│ │ │ ├── latticePhysics/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── latticePhysicsInterface.py
│ │ │ │ ├── latticePhysicsWriter.py
│ │ │ │ └── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_latticeInterface.py
│ │ │ │ └── test_latticeWriter.py
│ │ │ ├── macroXSGenerationInterface.py
│ │ │ ├── parameters.py
│ │ │ ├── plugin.py
│ │ │ ├── settings.py
│ │ │ └── tests/
│ │ │ ├── ISOXA
│ │ │ ├── __init__.py
│ │ │ ├── rzmflxYA
│ │ │ ├── test_crossSectionManager.py
│ │ │ ├── test_crossSectionSettings.py
│ │ │ ├── test_crossSectionTable.py
│ │ │ ├── test_energyGroups.py
│ │ │ ├── test_macroXSGenerationInterface.py
│ │ │ ├── test_neutronicsPlugin.py
│ │ │ └── test_neutronicsSymmetry.py
│ │ ├── safety/
│ │ │ └── __init__.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ └── test_executers.py
│ │ └── thermalHydraulics/
│ │ ├── __init__.py
│ │ ├── const.py
│ │ ├── parameters.py
│ │ ├── plugin.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ └── test_thermalHydraulicsSymmetry.py
│ ├── pluginManager.py
│ ├── plugins.py
│ ├── reactor/
│ │ ├── __init__.py
│ │ ├── assemblies.py
│ │ ├── assemblyParameters.py
│ │ ├── blockParameters.py
│ │ ├── blocks/
│ │ │ ├── __init__.py
│ │ │ ├── block.py
│ │ │ ├── cartesianBlock.py
│ │ │ ├── hexBlock.py
│ │ │ └── thRZBlock.py
│ │ ├── blueprints/
│ │ │ ├── __init__.py
│ │ │ ├── assemblyBlueprint.py
│ │ │ ├── blockBlueprint.py
│ │ │ ├── componentBlueprint.py
│ │ │ ├── gridBlueprint.py
│ │ │ ├── isotopicOptions.py
│ │ │ ├── reactorBlueprint.py
│ │ │ └── tests/
│ │ │ ├── __init__.py
│ │ │ ├── test_assemblyBlueprints.py
│ │ │ ├── test_blockBlueprints.py
│ │ │ ├── test_blueprints.py
│ │ │ ├── test_componentBlueprint.py
│ │ │ ├── test_customIsotopics.py
│ │ │ ├── test_gridBlueprints.py
│ │ │ ├── test_materialModifications.py
│ │ │ └── test_reactorBlueprints.py
│ │ ├── components/
│ │ │ ├── __init__.py
│ │ │ ├── basicShapes.py
│ │ │ ├── complexShapes.py
│ │ │ ├── component.py
│ │ │ ├── componentParameters.py
│ │ │ ├── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_basicShapes.py
│ │ │ │ └── test_complexShapes.py
│ │ │ └── volumetricShapes.py
│ │ ├── composites.py
│ │ ├── converters/
│ │ │ ├── __init__.py
│ │ │ ├── axialExpansionChanger/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── assemblyAxialLinkage.py
│ │ │ │ ├── axialExpansionChanger.py
│ │ │ │ ├── expansionData.py
│ │ │ │ └── redistributeMass.py
│ │ │ ├── blockConverters.py
│ │ │ ├── geometryConverters.py
│ │ │ ├── meshConverters.py
│ │ │ ├── parameterSweeps/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── generalParameterSweepConverters.py
│ │ │ │ └── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ └── test_paramSweepConverters.py
│ │ │ ├── pinTypeBlockConverters.py
│ │ │ ├── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_assemblyAxialLinkage.py
│ │ │ │ ├── test_axialExpansionChanger.py
│ │ │ │ ├── test_axialExpansionChanger_MultiPin.py
│ │ │ │ ├── test_blockConverter.py
│ │ │ │ ├── test_geometryConverters.py
│ │ │ │ ├── test_meshConverters.py
│ │ │ │ ├── test_pinTypeBlockConverters.py
│ │ │ │ └── test_uniformMesh.py
│ │ │ └── uniformMesh.py
│ │ ├── cores.py
│ │ ├── excoreStructure.py
│ │ ├── flags.py
│ │ ├── geometry.py
│ │ ├── grids/
│ │ │ ├── __init__.py
│ │ │ ├── axial.py
│ │ │ ├── cartesian.py
│ │ │ ├── constants.py
│ │ │ ├── grid.py
│ │ │ ├── hexagonal.py
│ │ │ ├── locations.py
│ │ │ ├── structuredGrid.py
│ │ │ ├── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ └── test_grids.py
│ │ │ └── thetarz.py
│ │ ├── parameters/
│ │ │ ├── __init__.py
│ │ │ ├── exceptions.py
│ │ │ ├── parameterCollections.py
│ │ │ ├── parameterDefinitions.py
│ │ │ └── resolveCollections.py
│ │ ├── reactorParameters.py
│ │ ├── reactors.py
│ │ ├── spentFuelPool.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── test_assemblies.py
│ │ │ ├── test_blocks.py
│ │ │ ├── test_components.py
│ │ │ ├── test_composites.py
│ │ │ ├── test_cores.py
│ │ │ ├── test_excoreStructures.py
│ │ │ ├── test_flags.py
│ │ │ ├── test_geometry.py
│ │ │ ├── test_hexBlockRotate.py
│ │ │ ├── test_parameters.py
│ │ │ ├── test_reactors.py
│ │ │ ├── test_rz_reactors.py
│ │ │ ├── test_zones.py
│ │ │ └── zonesFile.yaml
│ │ └── zones.py
│ ├── resources/
│ │ ├── burn-chain.yaml
│ │ └── mcc-nuclides.yaml
│ ├── runLog.py
│ ├── settings/
│ │ ├── __init__.py
│ │ ├── caseSettings.py
│ │ ├── fwSettings/
│ │ │ ├── __init__.py
│ │ │ ├── databaseSettings.py
│ │ │ ├── globalSettings.py
│ │ │ ├── reportSettings.py
│ │ │ ├── tests/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_fwSettings.py
│ │ │ │ └── test_tightCouplingSettings.py
│ │ │ └── tightCouplingSettings.py
│ │ ├── setting.py
│ │ ├── settingsIO.py
│ │ ├── settingsValidation.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ ├── test_inspectors.py
│ │ ├── test_settings.py
│ │ └── test_settingsIO.py
│ ├── testing/
│ │ ├── __init__.py
│ │ ├── reactors/
│ │ │ ├── anl-afci-177/
│ │ │ │ ├── anl-afci-177-blueprints.yaml
│ │ │ │ ├── anl-afci-177-coreMap.yaml
│ │ │ │ ├── anl-afci-177-fuelManagement.py
│ │ │ │ └── anl-afci-177.yaml
│ │ │ ├── c5g7/
│ │ │ │ ├── c5g7-blueprints.yaml
│ │ │ │ └── c5g7-settings.yaml
│ │ │ ├── godiva/
│ │ │ │ ├── godiva-blueprints.yaml
│ │ │ │ └── godiva.armi.unittest.yaml
│ │ │ ├── smallHexReactor/
│ │ │ │ ├── smallHexReactor-bp.yaml
│ │ │ │ └── smallHexReactor.yaml
│ │ │ └── thirdSmallHexReactor/
│ │ │ ├── thirdSmallHexReactor-bp.yaml
│ │ │ └── thirdSmallHexReactor.yaml
│ │ ├── resources/
│ │ │ └── armiRun-SHUFFLES.yaml
│ │ ├── singleMixedAssembly.py
│ │ ├── symmetryTesting.py
│ │ └── tests/
│ │ ├── __init__.py
│ │ └── test_symmetryTesting.py
│ ├── tests/
│ │ ├── 1DslabXSByCompTest.yaml
│ │ ├── ISOAA
│ │ ├── __init__.py
│ │ ├── armiRun.yaml
│ │ ├── detailedAxialExpansion/
│ │ │ ├── armiRun.yaml
│ │ │ ├── refSmallCoreGrid.yaml
│ │ │ ├── refSmallReactor.yaml
│ │ │ └── refSmallReactorBase.yaml
│ │ ├── mockRunLogs.py
│ │ ├── refSmallCartesian.yaml
│ │ ├── refSmallCoreGrid.yaml
│ │ ├── refSmallReactor.yaml
│ │ ├── refSmallReactorBase.yaml
│ │ ├── refSmallReactorShuffleLogic.py
│ │ ├── refSmallSfpGrid.yaml
│ │ ├── refTestCartesian.yaml
│ │ ├── smallestTestReactor/
│ │ │ ├── armiRunSmallest.yaml
│ │ │ ├── refOneBlockReactor.yaml
│ │ │ └── refSmallestReactor.yaml
│ │ ├── test_apps.py
│ │ ├── test_armiTestHelper.py
│ │ ├── test_cartesian.py
│ │ ├── test_context.py
│ │ ├── test_interfaces.py
│ │ ├── test_lwrInputs.py
│ │ ├── test_mpiActions.py
│ │ ├── test_mpiFeatures.py
│ │ ├── test_mpiParameters.py
│ │ ├── test_plugins.py
│ │ ├── test_runLog.py
│ │ ├── test_symmetry.py
│ │ ├── test_tests.py
│ │ ├── test_user_plugins.py
│ │ ├── tutorials/
│ │ │ ├── data_model.ipynb
│ │ │ ├── param_sweep.ipynb
│ │ │ └── pin-rotations.ipynb
│ │ ├── zpprTest.yaml
│ │ └── zpprTestGeom.yaml
│ └── utils/
│ ├── __init__.py
│ ├── asciimaps.py
│ ├── codeTiming.py
│ ├── customExceptions.py
│ ├── densityTools.py
│ ├── directoryChangers.py
│ ├── directoryChangersMpi.py
│ ├── dynamicImporter.py
│ ├── flags.py
│ ├── gridEditor.py
│ ├── hexagon.py
│ ├── iterables.py
│ ├── mathematics.py
│ ├── outputCache.py
│ ├── parsing.py
│ ├── pathTools.py
│ ├── plotting.py
│ ├── properties.py
│ ├── reportPlotting.py
│ ├── tabulate.py
│ ├── tests/
│ │ ├── __init__.py
│ │ ├── resources/
│ │ │ ├── lower/
│ │ │ │ ├── includeA.yaml
│ │ │ │ └── includeB.yaml
│ │ │ └── root.yaml
│ │ ├── test_asciimaps.py
│ │ ├── test_codeTiming.py
│ │ ├── test_custom_exceptions.py
│ │ ├── test_densityTools.py
│ │ ├── test_directoryChangers.py
│ │ ├── test_directoryChangersMpi.py
│ │ ├── test_flags.py
│ │ ├── test_hexagon.py
│ │ ├── test_iterables.py
│ │ ├── test_mathematics.py
│ │ ├── test_outputCache.py
│ │ ├── test_parsing.py
│ │ ├── test_pathTools.py
│ │ ├── test_plotting.py
│ │ ├── test_properties.py
│ │ ├── test_reportPlotting.py
│ │ ├── test_tabulate.py
│ │ ├── test_textProcessors.py
│ │ ├── test_triangle.py
│ │ ├── test_units.py
│ │ └── test_utils.py
│ ├── textProcessors.py
│ ├── triangle.py
│ └── units.py
├── doc/
│ ├── .static/
│ │ ├── __init__.py
│ │ ├── automateScr.py
│ │ ├── cleanup_test_results.py
│ │ ├── css/
│ │ │ └── theme_fixes.css
│ │ ├── dochelpers.py
│ │ ├── looseCouplingIllustration.dot
│ │ └── tightCouplingIllustration.dot
│ ├── Makefile
│ ├── __init__.py
│ ├── conf.py
│ ├── developer/
│ │ ├── documenting.rst
│ │ ├── entrypoints.rst
│ │ ├── first_time_contributors.rst
│ │ ├── guide.rst
│ │ ├── index.rst
│ │ ├── making_armi_based_apps.rst
│ │ ├── parallel_coding.rst
│ │ ├── profiling.rst
│ │ ├── standards_and_practices.rst
│ │ ├── testing.rst
│ │ └── tooling.rst
│ ├── gallery-src/
│ │ ├── README.rst
│ │ ├── analysis/
│ │ │ ├── README.rst
│ │ │ ├── run_blockMcnpMaterialCard.py
│ │ │ ├── run_hexBlockToRZConversion.py
│ │ │ └── run_hexReactorToRZ.py
│ │ └── framework/
│ │ ├── README.rst
│ │ ├── run_blockVolumeFractions.py
│ │ ├── run_chartOfNuclides.py
│ │ ├── run_computeReactionRates.py
│ │ ├── run_fuelManagement.py
│ │ ├── run_grids1_hex.py
│ │ ├── run_grids2_cartesian.py
│ │ ├── run_grids3_rzt.py
│ │ ├── run_isotxs.py
│ │ ├── run_isotxs2_matrix.py
│ │ ├── run_materials.py
│ │ ├── run_programmaticReactorDefinition.py
│ │ ├── run_reactorFacemap.py
│ │ └── run_transmutationMatrix.py
│ ├── getTestResults.py
│ ├── glossary.rst
│ ├── index.rst
│ ├── installation.rst
│ ├── make.bat
│ ├── qa_docs/
│ │ ├── index.rst
│ │ ├── scr/
│ │ │ ├── 0.1.rst
│ │ │ ├── 0.2.rst
│ │ │ ├── 0.3.rst
│ │ │ ├── 0.4.rst
│ │ │ ├── 0.5.rst
│ │ │ ├── 0.6.rst
│ │ │ ├── index.rst
│ │ │ └── latest_scr.rst
│ │ ├── sdid.rst
│ │ ├── srsd/
│ │ │ ├── bookkeeping_reqs.rst
│ │ │ ├── cases_reqs.rst
│ │ │ ├── cli_reqs.rst
│ │ │ ├── framework_reqs.rst
│ │ │ ├── materials_reqs.rst
│ │ │ ├── nucDirectory_reqs.rst
│ │ │ ├── nuclearDataIO_reqs.rst
│ │ │ ├── physics_reqs.rst
│ │ │ ├── reactors_reqs.rst
│ │ │ ├── runLog_reqs.rst
│ │ │ ├── settings_reqs.rst
│ │ │ └── utils_reqs.rst
│ │ ├── srsd.rst
│ │ └── str.rst
│ ├── readme.rst
│ ├── release/
│ │ └── index.rst
│ ├── skip_str.py
│ ├── tutorials/
│ │ ├── data_model.nblink
│ │ ├── index.rst
│ │ ├── making_your_first_app.rst
│ │ ├── materials_demo.ipynb
│ │ ├── nuclide_demo.ipynb
│ │ ├── param_sweep.nblink
│ │ ├── pin-rotations.nblink
│ │ ├── walkthrough_inputs.rst
│ │ └── walkthrough_lwr_inputs.rst
│ └── user/
│ ├── _gallery/
│ │ └── index.rst
│ ├── accessingEntryPoints.rst
│ ├── index.rst
│ ├── inputs.rst
│ ├── manual_data_access.rst
│ ├── outputs.rst
│ ├── params_report.rst
│ ├── physics_coupling.rst
│ ├── radial_and_axial_expansion.rst
│ ├── settings_report.rst
│ ├── spatial_block_data.rst
│ ├── symmetry_handling.rst
│ └── user_install.rst
└── pyproject.toml
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/.codecov.yml
================================================
coverage:
status:
project:
default:
target: 80% # the required coverage value
threshold: 1% # allows a 1% drop from the previous base commit coverage
================================================
FILE: .github/pull_request_template.md
================================================
## What is the change? Why is it being made?
## SCR Information
One-Sentence Rationale: TBD
One-line Impact on Requirements: NA
---
## Checklist
- [ ] This PR has only [one purpose or idea](https://terrapower.github.io/armi/developer/tooling.html#one-idea-one-pr).
- [ ] [Tests](https://terrapower.github.io/armi/developer/tooling.html#test-it) have been added/updated to verify any new/changed code.
- [ ] The [documentation](https://terrapower.github.io/armi/developer/tooling.html#document-it) is still up-to-date in the `doc` folder.
- [ ] The code style follows [good practices](https://terrapower.github.io/armi/developer/standards_and_practices.html).
- [ ] The dependencies are still up-to-date in `pyproject.toml`.
================================================
FILE: .github/workflows/coverage.yaml
================================================
name: Coverage
permissions:
contents: read
on:
push:
branches:
- main
paths-ignore:
- 'doc/**'
pull_request:
paths-ignore:
- 'doc/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
# Deploying coverage to codecov.io should not happen on forks
if: github.repository == 'terrapower/armi'
runs-on: ubuntu-24.04
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.13'
- name: Install ARMI and MPI
run: |
sudo apt-get -y install libopenmpi-dev
pip install -e .[memprof,mpi,test]
pip install codecov
- name: Run Coverage
run: |
set -x
coverage run --rcfile=pyproject.toml -m pytest -n 4 --cov=armi --cov-config=pyproject.toml --cov-report=xml --ignore=venv armi
mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiFeatures.py || true
mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiParameters.py || true
mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiDirectoryChangers.py || true
coverage combine --rcfile=pyproject.toml --keep -a
coverage report --rcfile=pyproject.toml -i --skip-empty --skip-covered --sort=cover --fail-under=90
- name: Publish to codecov.io
continue-on-error: true
if: github.ref == 'refs/heads/main'
uses: codecov/codecov-action@v5
with:
fail_ci_if_error: false
token: ${{ secrets.CODECOV_TOKEN }}
================================================
FILE: .github/workflows/docs.yaml
================================================
name: Documentation
on:
push:
branches:
- main
pull_request:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
# Building and deploying docs is broken on forked repos
if: github.repository == 'terrapower/armi'
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: 3.13
- name: Update package index
run: sudo apt-get update
- name: Install apt-get libs
run: sudo apt-get -y install texlive-xetex=2021.20220204-1 texlive-latex-base=2021.20220204-1 texlive-fonts-recommended=2021.20220204-1 texlive-latex-extra=2021.20220204-1 texlive-full=2021.20220204-1 pandoc libopenmpi-dev
- name: Setup Graphviz
uses: ts-graphviz/setup-graphviz@v2.0.2
- name: Make html/pdf Docs
continue-on-error: true
env:
GH_TOKEN: ${{ github.token }}
PR_NUMBER: ${{ github.ref == 'refs/heads/main' && -1 || github.event.number }}
GIT_COMMIT: ${{ github.sha }}
run: |
echo "Installing ARMI..."
set -x
pip install -U pip
pip install -e .[memprof,mpi,test,docs]
echo "Run unit tests..."
pytest --junit-xml=test_results.xml -v -n 4 armi > pytest_verbose.log
mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi1.xml armi/tests/test_mpiFeatures.py > pytest_verbose_mpi1.log
mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi2.xml armi/tests/test_mpiParameters.py > pytest_verbose_mpi2.log
mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi3.xml armi/utils/tests/test_directoryChangersMpi.py > pytest_verbose_mpi3.log
python doc/.static/cleanup_test_results.py test_results.xml
echo "Git magic so the SCR will build on GitHub Actions..."
git fetch --depth=2000
echo "Build HTML docs..."
cd doc
git submodule init
git submodule update
make html
echo "Build PDF docs..."
make latex
cd _build/latex/
latexmk -pdf -f -interaction=nonstopmode ARMI.tex
- name: Deploy
if: github.ref == 'refs/heads/main'
uses: JamesIves/github-pages-deploy-action@v4.6.1
with:
token: ${{ secrets.ACCESS_TOKEN }}
repository-name: ${{ github.repository_owner }}/terrapower.github.io
branch: main
folder: doc/_build/html
target-folder: armi
- name: Archive HTML Docs
if: github.ref != 'refs/heads/main'
uses: actions/upload-artifact@v4
with:
name: html-docs
path: doc/_build/html
retention-days: 5
- name: Archive PDF Docs
uses: actions/upload-artifact@v4
with:
name: pdf-docs
path: doc/_build/latex/ARMI.pdf
retention-days: 5
================================================
FILE: .github/workflows/find_test_crumbs.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script exists so we can determine if new tests in CI are leaving crumbs."""
import subprocess
# A list of objects we expect during a run, and don't mind (like pycache dirs).
IGNORED_OBJECTS = [
".pytest_cache",
".tox",
"__pycache__",
"armi.egg-info",
"logs/",
]
def main():
# use "git clean" to find all non-tracked files
proc = subprocess.Popen(["git", "clean", "-xnd"], stdout=subprocess.PIPE)
lines = proc.communicate()[0].decode("utf-8").split("\n")
# clean up the whitespace
lines = [ln.strip() for ln in lines if len(ln.strip())]
# ignore certain untracked object, like __pycache__ dirs
for ignore in IGNORED_OBJECTS:
lines = [ln for ln in lines if ignore not in ln]
# fail hard if there are still untracked files
if len(lines):
for line in lines:
print(line)
raise ValueError("The workspace is dirty; the tests are leaving crumbs!")
if __name__ == "__main__":
main()
================================================
FILE: .github/workflows/licensechecker.yaml
================================================
name: Check License Lines
permissions:
contents: read
on: [push]
jobs:
check-license-lines:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@master
- name: Check License Lines
uses: kt3k/license_checker@v1.0.6
================================================
FILE: .github/workflows/linting.yaml
================================================
name: Linting
permissions:
contents: read
on: [push]
jobs:
build:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.13'
- name: Run Linter
run: |
set -x
pip install -e .[test]
ruff format --check .
ruff check .
================================================
FILE: .github/workflows/mac_tests.yaml
================================================
name: ARMI MacOS Tests
permissions:
contents: read
on:
push:
branches:
- main
paths-ignore:
- 'doc/**'
pull_request:
paths-ignore:
- 'doc/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
if: github.repository == 'terrapower/armi'
runs-on: macos-14
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.11'
- name: Upgrade PIP
run: python -m pip install --upgrade pip
- name: Run Unit Tests on MacOS
run: |
brew install openmpi
pip install -e .[memprof,mpi,test]
pytest -n 4 armi
================================================
FILE: .github/workflows/stale.yaml
================================================
# This workflow warns and then closes PRs that have had no activity for a specified amount of time.
#
# You can adjust the behavior by modifying this file.
# For more information, see: https://github.com/actions/stale
name: Mark Stale PRs
on:
schedule:
# once a day at 3:14 AM
- cron: '14 3 * * *'
permissions:
pull-requests: write
jobs:
stale:
# This workflow is not designed to make sense on forks
if: github.repository == 'terrapower/armi'
runs-on: ubuntu-24.04
steps:
- uses: actions/stale@v8
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had any activity in the last 100 days. It will be closed in 7 days if no further activity occurs. Thank you for your contributions."
stale-pr-label: "stale"
days-before-pr-stale: 100
days-before-pr-close: 7
days-before-issue-stale: -1
operations-per-run: 100
================================================
FILE: .github/workflows/unittests.yaml
================================================
name: ARMI unit tests
permissions:
contents: read
on:
push:
paths-ignore:
- 'doc/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-24.04
strategy:
matrix:
python: [3.9, '3.10', '3.11', '3.12', '3.13', '3.14']
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python }}
- name: Install mpi libs
run: sudo apt-get -y install libopenmpi-dev
- name: Run Tests
run: |
set -x
pip install -e .[memprof,mpi,test]
pytest -n 4 armi
mpiexec -n 2 --use-hwthread-cpus pytest armi/tests/test_mpiFeatures.py
mpiexec -n 2 --use-hwthread-cpus pytest armi/tests/test_mpiParameters.py
mpiexec -n 2 --use-hwthread-cpus pytest armi/utils/tests/test_directoryChangersMpi.py
================================================
FILE: .github/workflows/validatemanifest.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Validating the package-data in the pyproject.toml.
Validate that we aren't trying to include files that don't exist.
"""
import os
from glob import glob
import toml
# CONSTANTS
ARMI_DIR = "armi/"
PRPROJECT = "pyproject.toml"
def main():
# parse the data files out of the pyproject.toml
txt = open(PRPROJECT, "r").read()
data = toml.loads(txt)
fileChunks = data["tool"]["setuptools"]["package-data"]["armi"]
# loop through each line in the package-data and find all the file paths
errors = []
for i, line in enumerate(fileChunks):
# make sure the file exists
path = ARMI_DIR + line.strip()
if "*" in path:
paths = [f for f in glob(path) if len(f) > 3]
if not len(paths):
errors.append((i, path))
else:
if not os.path.exists(path):
errors.append((i, path))
# If there were any missing files, raise an Error.
if errors:
for i, line in errors:
print("Nonexistant file on line {}: {}".format(i, line))
raise ValueError("Package-data file is incorrect: includes non-existant files.")
if __name__ == "__main__":
main()
================================================
FILE: .github/workflows/validatemanifest.yaml
================================================
name: Validate Manifest
permissions:
contents: read
on: [push]
jobs:
build:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.11'
- name: Validate Manifest
run: |
pip install toml
python .github/workflows/validatemanifest.py
================================================
FILE: .github/workflows/wheels.yaml
================================================
name: Build Wheel
permissions:
contents: read
on:
push:
branches:
- main
jobs:
build:
if: github.repository == 'terrapower/armi'
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install PIP Packages
run: |
pip install -U pip
pip install -e .
pip install -U wheel
- name: Build Wheels
run: |
mkdir dist
pip wheel . -w dist/
chmod 664 dist/armi*.whl
- name: Archive PIP wheel artifacts
uses: actions/upload-artifact@v4
with:
name: armi-wheels
path: |
dist/armi*.whl
retention-days: 7
================================================
FILE: .github/workflows/wintests.yaml
================================================
name: ARMI Windows tests
permissions:
contents: read
on:
push:
branches:
- main
paths-ignore:
- 'doc/**'
pull_request:
paths-ignore:
- 'doc/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
runs-on: windows-2022
steps:
- uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.11'
- name: Upgrade PIP
run: python -m pip install --upgrade pip
- name: Run Unit Tests on Windows
run: |
pip install mpi4py==3.1.6
pip install -e .[memprof,mpi,test]
pytest -n 4 armi
- name: Find Test Crumbs
run: python .github/workflows/find_test_crumbs.py
================================================
FILE: .gitignore
================================================
# No non-source python resources
*.pyc
*.pyd
*.pyo
*.pyx
# No build artifacts
*.aux
*.dll
*.fdb_latexmk
*.fls
*.lib
armi/tests/tutorials/case-suite
bdist*/
bin
build
coverage.lcov
coverage.xml
coverage_results.*
dist*/
doc/.apidocs
doc/_build
doc/anl-afci-177
doc/gallery
doc/gallery-src/framework/*.yaml
doc/tutorials/anl-afci-177*
doc/tutorials/case-suite
doc/user/tutorials
htmlcov/
monkeytype.*
test_results.*
wheelhouse
# No workspace crumbs
**/.coverage*
**/__pycache__
**/logs/*
*.ascii
*.egg-info/
*.sublime-project
*.sublime-workspace
*.temp
*~
.*.swp
.cache/
.coverage
.DS_Store
.externalToolBuilders/
.hypothesis/
.idea/
.ipynb_checkpoints
.metadata
.mypy_cache/
.project
.pydevproject
.pytest_cache/
.ruff_cache/
.settings
.tox
.vim-bookmarks
.vscode
armi-venv/*
dump-temp-*
dump-tests*
phabricator-lint.txt
pytest_verbose.log
pytestdebug.log
python_details.log
reportsOutputFiles/
system_info.log
tags
temp-*
venv*/
# Ignore certain data files
*.avi
*.diff
*.flux_bg
*.flux_ufg
*.h5
*.html
*.mp4
*.nucdata
*.out
*.ppm
*.sum
*.txt
*.vtd
*.vtu
*.xdmf
*dlayxs*
================================================
FILE: .gitmodules
================================================
[submodule "doc/tutorials/armi-example-app"]
path = doc/tutorials/armi-example-app
url = https://github.com/terrapower/armi-example-app.git
================================================
FILE: .licenserc.json
================================================
{
"**/*.py": "# Copyright "
}
================================================
FILE: AUTHORS
================================================
# This is the list of ARMI's contributors.
#
# This may not list everyone who has ever contributed code, important ideas, or discussions to ARMI. But this is a good
# faith attempt to give credit where it is due.
TerraPower, LLC
Aaron Reynolds (aaronjamesreynolds)
Aidan McDonald (AidanMcDonald)
Alex James (alexhjames)
Antoine Margeride (amargeride)
Arrielle Opotowsky (opotowsky)
Ashley Thompson (Ashlita6)
Bharat Medasani (mbk-tp)
Brandon LaFleur (bdlafleur)
Brian Sculac (bsculac)
Casey Stocking (clstocking)
Chris Keckler (keckler)
Chris Wong (crswong888)
Christen McKenzie (chris10mckenz)
David Pham (dpham-materials)
Drew Johnson (drewejohnson, drewj-tp, drewj-usnctech)
Dustin Langewisch (dlangewisch)
Evan Albright
Graham Malmgren
Hunter Smith (HunterPSmith)
Jacob Hader (jakehader)
James Marshall
Jason Meng (jasonbmeng)
Jeff Baylor (jeffbaylor)
Jinan Yang (jyang-TP)
John Stilley (john-science)
Jonathon Shimwell (shimwell)
Joshua Chen (joshuavictorchen)
Kayla Clements (clemekay)
Lim Swee Kiat (greentfrapp)
Mark Onufer (onufer)
Michael Castillo (kasticrunch, mcastillo10)
Michael Huang (LMikeH)
Michael Jarrett (mgjarrett)
Michael Johnson (mikepjohnson)
Mitch Young (youngmit)
Nick Touran (ntouran, partofthething)
Nicole Powell (nipowell)
Paul Romano (paulromano)
Peter McNabb
Samual Miller (sammiller11235)
Scott Yak (scottyak)
Tian Jing (TianJingwd)
Tommy Cisneros (sombrereau)
Tony Alberti (albeanth)
Virinder Sandhu (Nebbychadnezzar)
Wyatt Scherer (wcscherer)
Zachary Prince (zachmprince)
================================================
FILE: CONTRIBUTING.md
================================================
# Contribution License Agreement
For information on how to contribute to ARMI, see [our official documentation](https://terrapower.github.io/armi/developer/first_time_contributors.html).
This Contribution License Agreement (**"Agreement"**) is agreed to by the party signing below (**"You"**), and conveys certain license rights to TerraPower, LLC and its affiliates (**"TerraPower"**) for Your contributions to TerraPower open source projects. This Agreement is effective as of the latest signature date below.
## 1. Definitions.
**"Code"** means the computer software code, whether in human-readable or machine-executable form, that is delivered by You to TerraPower under this Agreement.
**"Project"** means any of the projects owned or managed by TerraPower in which software is offered under a license approved by the Open Source Initiative (OSI) ([www.opensource.org](http://www.opensource.org)) and documentation offered under an OSI or a Creative Commons license (https://creativecommons.org/licenses).
**"Submit"** is the act of uploading, submitting, transmitting, or distributing code or other content to any Project, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Project for the purpose of discussing and improving that Project, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Submission."
**"Submission"** means the Code and any other copyrightable material Submitted by You, including any associated comments and documentation.
## 2. Your Submission.
You must agree to the terms of this Agreement before making a Submission to any Project. This Agreement covers any and all Submissions that You, now or in the future (except as described in Section 4 below), Submit to any Project.
## 3. Originality of Work.
You represent that each of Your Submissions is entirely Your original work. Should You wish to Submit materials that are not Your original work, You may Submit them separately to the Project if You (a) retain all copyright and license information that was in the materials as You received them, (b) in the description accompanying Your Submission, include the phrase "Submission containing materials of a third party:" followed by the names of the third party and any licenses or other restrictions of which You are aware, and (c) follow any other instructions in the Project’s written guidelines concerning Submissions.
## 4. Your Employer.
References to "employer" in this Agreement include Your employer or anyone else for whom You are acting in making Your Submission, e.g. as a contractor, vendor, or agent. If Your Submission is made in the course of Your work for an employer or Your employer has intellectual property rights in Your Submission by contract or applicable law, You must secure permission from Your employer to make the Submission before signing this Agreement. In that case, the term "You" in this Agreement will refer to You and the employer collectively. If You change employers in the future and desire to Submit additional Submissions for the new employer, then You agree to sign a new Agreement and secure permission from the new employer before Submitting those Submissions.
## 5. Licenses.
### a. Copyright License.
You grant TerraPower, and those who receive the Submission directly or indirectly from TerraPower, a perpetual, worldwide, non-exclusive, royalty-free, irrevocable license in the Submission to reproduce, prepare derivative works of, publicly display, publicly perform, and distribute the Submission and such derivative works, and to sublicense any or all of the foregoing rights to third parties.
### b. Patent License.
You grant TerraPower, and those who receive the Submission directly or indirectly from TerraPower, a perpetual, worldwide, non-exclusive, royalty-free, irrevocable license under Your patent claims that are necessarily infringed by the Submission or the combination of the Submission with the Project to which it was Submitted to make, have made, use, offer to sell, sell and import or otherwise dispose of the Submission alone or with the Project.
### c. Other Rights Reserved.
Each party reserves all rights not expressly granted in this Agreement. No additional licenses or rights whatsoever (including, without limitation, any implied licenses) are granted by implication, exhaustion, estoppel or otherwise.
## 6. Representations and Warranties.
You represent that You are legally entitled to grant the above licenses. You represent that each of Your Submissions is entirely Your original work (except as You may have disclosed under Section 3). You represent that You have secured permission from Your employer to make the Submission in cases where Your Submission is made in the course of Your work for Your employer or Your employer has intellectual property rights in Your Submission by contract or applicable law. If You are signing this Agreement on behalf of Your employer, You represent and warrant that You have the necessary authority to bind the listed employer to the obligations contained in this Agreement. You are not expected to provide support for Your Submission, unless You choose to do so. UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING, AND EXCEPT FOR THE WARRANTIES EXPRESSLY STATED IN SECTIONS 3, 4, AND 6, THE SUBMISSION PROVIDED UNDER THIS AGREEMENT IS PROVIDED WITHOUT WARRANTY OF ANY KIND, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY OF NONINFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
## 7. Notice to TerraPower.
You agree to notify TerraPower in writing of any facts or circumstances of which You later become aware that would make Your representations in this Agreement inaccurate in any respect.
## 8. Information about Submissions.
You agree that contributions to Projects and information about contributions may be maintained indefinitely and disclosed publicly, including Your name and other information that You submit with Your Submission.
## 9. Governing Law/Jurisdiction.
This Agreement is governed by the laws of the State of Washington, USA and the parties consent to exclusive jurisdiction and venue in the federal courts located in King County, Washington, USA unless no federal subject matter jurisdiction exists, in which case the parties consent to exclusive jurisdiction and venue in the Superior Court of King County, Washington, USA. The parties waive all defenses of lack of personal jurisdiction and forum non-conveniens.
## 10. Entire Agreement/Assignment.
This Agreement is the entire agreement between the parties, and supersedes any and all prior agreements, understandings or communications, written or oral, between the parties relating to the subject matter hereof. This Agreement may be assigned by TerraPower.
Please select one of the options below and sign as indicated. By signing, You accept and agree to the terms of this Contribution License Agreement for Your present and future Submissions to TerraPower.
================================================
FILE: LICENSE.md
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 TerraPower, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.rst
================================================
|Build Status| |Code Coverage| |Commit Activity| |Good First Issues|
#################
ARMI Introduction
#################
The Advanced Reactor Modeling Interface (ARMI\ :sup:`®`) is an open-source tool that streamlines your nuclear reactor
design/analysis needs by providing a software *reactor at your fingertips* and a rich ecosystem of utilities working in
concert. It is made for and by professional reactor analysis teams and is maintained by
`TerraPower LLC `_, a nuclear technology development company.
ARMI:
* Provides a hub-and-spoke mechanism to standardize communication and coupling between physics kernels and the
specialist analysts who use them,
* Facilitates the creation and execution of detailed models and complex analysis methodologies,
* Provides an ecosystem within which to rapidly and collaboratively build new analysis and physics simulation
capabilities, and
* Provides useful utilities to assist in reactor development.
A few demos of ARMI can be seen in the `ARMI example gallery `_.
Using ARMI plus a collection of ARMI-aware physics plugins, an engineering team can perform a full analysis of a reactor
system and then repeat the same level of analysis with some changed input parameters for almost no additional cost. Even
better, thousands of perturbed cases can be executed in parallel on large clusters, helping conceptual design teams home
in on an optimal design. Or design teams can analyze sensitivities all the way from, for example, an impurity in a
control material to the peak structural temperature in a design-basis transient.
.. note:: ARMI does not come with a full selection of physics kernels. They will need to be acquired or developed for
your specific project in order to make full use of this tool. Many of the example use-cases discussed in this manual
require functionality that is not included in the open-source ARMI Framework.
In general, ARMI aims to enhance the quality, ease, and rigor of computational nuclear reactor design and analysis.
Additional high-level overview about this system can be found in [#touranarmi]_.
.. list-table:: Quick links
:widths: 30 70
* - Source code
- https://github.com/terrapower/armi
* - Documentation
- https://terrapower.github.io/armi
* - First time contributor's guide
- https://terrapower.github.io/armi/developer/first_time_contributors.html
* - Bug tracker
- https://github.com/terrapower/armi/issues
* - Plugin directory
- https://github.com/terrapower/armi-plugin-directory
* - Contact
- armi-devs@terrapower.com
Quick start
***********
Before starting, you need to have `Python `_ 3.9+.
Get the ARMI code, install the prerequisites, and fire up the launcher with the following commands. You probably want to
do this in a virtual environment as described in the
`Installation documentation `_. Otherwise, the dependencies could
conflict with your system dependencies.
First, upgrade your version of pip::
$ pip install -U pip>=22.1
Now clone and install ARMI::
$ git clone https://github.com/terrapower/armi
$ cd armi
$ pip install -e .
$ armi --help
The ARMI tests are meant to be run using `pytest `_ locally::
$ pip install -e ".[test]"
$ pytest -n 4 armi
From here, we recommend going through a few of our
`gallery examples `_ and
`tutorials `_ to start touring the features and capabilities and
then move on to the `User Manual `_.
Background
**********
Nuclear reactor design requires, among other things, answers to the following questions:
* Where are the neutrons? How fast are they moving? In which direction?
* How quickly are atomic nuclei splitting? How long until the fuel runs out? How many atoms in the structure are being
energetically displaced?
* How much heat do these reactions produce? How quickly must coolant flow past the fuel to maintain appropriate
temperatures? What are the temperatures of the fuel, coolant, and structure?
* Can the structural arrangement support itself given the temperatures and pressures induced by the flowing coolant? For
how long?
* If a pump loses power or a control rod accidentally withdraws, how quickly will the chain reaction stop while keeping
radiation contained?
* How much used nuclear fuel is generated per useful energy produced? How long until it decays to stability?
* Where and when should we move the fuel to most economically maintain the chain reaction?
* What's the dose and activation above the head and in the secondary loop?
* How does containment handle various postulated accidents?
* How does the building handle earthquakes?
Digital computers have assisted in nuclear technology development since the days of the ENIAC in the 1940s. We now
understand reactor physics well enough to build detailed simulations, which can answer many of these design questions in
a cost-effective, and flexible manner. This allows us to simulate all kinds of different reactors with different fuels,
coolants, moderators, power levels, safety systems, and power cycles. We can run our virtual reactors through the
decades, tossing various off-normal conditions at them now and then, to see how they perform in terms of capability,
economics, and safety.
Perhaps surprisingly, some nuclear software written in the 1960s is still in use today. These codes are validated
against physical experiments that no longer exist. Meanwhile, new cutting-edge nuclear software is being developed for
todays powerful computers. Both old and new, these tools are often challenging to operate and coordinate to produce a
full reactor analysis.
The ARMI approach was born out of this situation: how can we best leverage an eclectic mix of legacy and modern tools
with a small team to do full-scope analysis? We built a framework that lets us automate the tedious, uncoupled, and
error-prone parts of reactor engineering/analysis work. We can turn around a very meaningful and detailed core analysis
given a major change (e.g. change power by 50%) in just a few weeks. We can dispatch hundreds of parameter sweeps to
multiple machines and then perform multi-objective optimization on the resulting design space.
The ARMI system is largely written in the Python programming language. Its high-level nature allows nuclear and
mechanical engineers to rapidly automate their analysis tasks from their sub-specialties. This helps eliminate the
translation step between computer-scientists and power plant design engineers. This allows good division of labor: the
computer scientists can focus on the overall performance and maintainability of the framework, while the power plant
engineers focus on power plant engineering.
We have spent over 10 years developing this system. Because of ARMI's high-level nature, we believe we can collaborate
effectively with all ongoing reactor software developments.
Communication and coupling
**************************
ARMI provides a central place for all physics kernels to interact: the Reactor Model. All modules read *state*
information from this Reactor and write their output to it. This common interface allows seamless communication and
coupling between different physics sub-specialties. If you plug one new physics kernel into ARMI, it becomes coupled to
N other kernels. The ARMI Framework, depicted in green below, is the majority of the open source package. Several
skeletal analysis routines are included as well to perform basic data management and to help align efforts on external
physics kernels.
.. figure:: https://terrapower.github.io/armi/_static/armiSchematicView.png
:figclass: align-center
**Figure 1.** The schematic representation of the ARMI data model.
Automation
**********
ARMI can quickly and easily produce complex input files with high levels of detail in various approximations. This
enables users to perform rapid high-fidelity analyses to make sure all important physics are captured. It also enables
sensitivity studies of different modeling approximations (e.g. symmetries, transport vs. diffusion vs. Monte Carlo,
subchannel vs. CFD, etc.).
.. figure:: https://terrapower.github.io/armi/_static/armiGeometries.png
:figclass: align-center
**Figure 2.** A variety of approximations in hexagonal geometry (1/3-core, full core, pin detailed, etc.) are shown,
all derived from one consistent input file. ARMI supports Cartesian, Hex, RZ, and RZTheta geometric grids and
includes many geometric components. Additionally, users can provide custom geometric elements.
New analysis and physics capabilities
*************************************
The ARMI reactor model is fully accessible via a Python-based API, meaning that power-users and developers have full
access to the details of the plant at all times. Developers adding new physics features can take advantage of the ARMI
data management structure by simply reading and writing to the Reactor state. Leveraging the infrastructure of ARMI,
progress can be made rapidly.
Power-user analysts can modify the plant in many ways. For instance, removing all sodium coolant is a one-liner::
core.setNumberDensity('NA23',0.0)
and finding the peak power density is easy::
core.getMaxParam('pdens')
Any ARMI state can be written out to whichever format the user desires, meaning that nominally identical cases can be
produced for multiple similar codes in sensitivity studies. To read power densities, simply read them off the assembly
objects. Instead of producing spreadsheets and making plots manually, analysts may write scripts to generate output
reports that run automatically.
Writing a module within ARMI automatically features access to the ARMI API, including:
* Cross section processing
* Material properties
* Thermal expansion
* Database persistence
* Data visualization
* A code testing, documentation, and version control system
Use cases
*********
Given an input describing a reactor, a typical ARMI run loops over a set of plugins in a certain sequence. Some plugins
trigger third-party simulation codes, producing input files for them, executing them, and translating the output back
onto the reactor model. Other plugins perform physics simulations directly.
For example, one ARMI sequence may involve the calculation of:
* nuclear cross sections
* global flux and power
* subchannel temperatures
* duct wall pressures
* cladding strain and wastage
* fission gas pressure
* reactivity feedbacks
* flow orificing
* the equilibrium fuel cycle
* control rod worth
* shutdown margin
* frequency stability margins
* peak cladding temperature
* transient analysis
* total levelized cost of electricity for the run
Another ARMI sequence may simply compute the cost of feed uranium and enrichment in an initial core and quit.
Larger siumulations may also run through the multi-objective design optimization system, which runs many cases with
input perturbations to help find the best overall system, considering all important physics at the same time.
Other interest may come from the following:
The Research Scientist
======================
A nuclear reactor research scientist, at a national lab or university, may benefit from ARMI. An ARMI workflow can
reduce the time spent on data management. ARMI can handle the tedium so that researchers can better focus on designing
and testing their research.
For example, if an ARMI input file describing the FFTF reactor is provided, the researcher can start running benchmark
cases with their new code method very rapidly, rather than spending the time building their own FFTF model.
If someone wants to try varying nuclear cross sections by a percent here and there to compute sensitivities, ARMI is a
perfect platform upon which to operate.
If a reactor designer wants to try out a new Machine Learning algorithm for fuel management, plugging it into ARMI and
having it run on all the physics kernels of the ARMI ecosystem will be a great way to prove its true value (note that
this requires a rich ARMI physics ecosystem).
The Nuclear Startup Engineer
============================
As various companies evaluate their ideas, they need tools for analysis. They can pick up ARMI and save 10 years of
development and hit the ground running by plugging in their design-specific physics kernels and proprietary design
inputs. ARMI's parameter sweep features, reactor model, and parallel utilities will all come in handy immediately.
Operating and Vendor Engineers
==============================
People at well-established utilities or vendors can hook ARMI into their legacy systems and increase their overall
productivity.
The Enthusiast
==============
If an enthusiast wants to try out a reactor idea they have, they can use ARMI (plus some physics kernels) to quickly get
some performance metrics. They can see if their idea has wings, and if it does, they can then find a way to bring it to
engineering and commercial reality.
History of ARMI
***************
ARMI was originally created by TerraPower, LLC near Seattle WA starting in 2009. Its founding mission was to determine
the optimal fuel management operations required to transition a fresh Traveling Wave Reactor core from startup into an
equilibrium state. It started out automating the Argonne National Lab (ANL) fast reactor neutronics codes, MC2 and
REBUS. The reactor model design was made with the intention of adding other physics capabilities later. Soon, simple
thermal hydraulics were added and it's grown ever since. It has continuously evolved towards a general reactor analysis
framework.
Following requests by outside parties to use ARMI, we started working on a more modular architecture for ARMI, allowing
some of the intertwined physics capabilities to be separated out as plugins from the standalone framework.
The nuclear industry is small, and it faces many challenges. It also has a tradition of secrecy. As a result, there is
risk of overlapping work being done by other entities.
We hypothesize that collaborating on software systems can help align some efforts worldwide, increasing quality and
efficiency. In reactor development, the idea is generally cheap. It's the shakedown, technology and supply chain
development, engineering demo, and commercial demo that are the hard parts.
Thus, ARMI was released under an open-source license in 2019 to facilitate mutually beneficial collaboration across the
nuclear industry, where many teams are independently developing similar reactor analysis/automation frameworks.
We also hope that if more people can rapidly analyze the performance of their reactor ideas, limited available funding
can be spent more effectively.
System Requirements
*******************
Being largely written in the Python programming language, the ARMI system works on most platforms. It can perform
meaningful analysis on a single laptop, but the full value of design optimization and large problems is realized with
parallel runs over large clusters (using the optional ``mpi4py`` library).
.. _getting-help:
Getting Help
************
You can get help with ARMI by either making issues on our `github page `_ or
by e-mailing armi-devs@terrapower.com.
Disclaimers
***********
Due to TerraPower goals and priorities, many ARMI modules were developed with the sodium-cooled fast reactors as a
target, and are not necessarily yet optimized for other plants. This is a known issue with code organization and we are
working on it. On the other hand, the framework is sufficiently general that people have modeled other reactor types
with ARMI, including thermal reactors.
ARMI was developed within a rapidly changing R&D environment. It evolved accordingly, and naturally carries some legacy.
We continuously attempt to identify and update problematic parts of the code. Users should understand that ARMI is not a
polished consumer software product, but rather a powerful and flexible engineering tool. It has the potential to
accelerate work on many kinds of reactors.
ARMI has been written to support specific engineering/design tasks. As such, polish in the GUIs and output is somewhat
lacking.
The ARMI framework uses the ``camelCase`` style, which is not the standard style for Python. As this is an issue of
style, it is not considered worth the API-breaking cost to our downstream users to change it.
License
*******
TerraPower and ARMI are registered trademarks of TerraPower, LLC. Other trademarks and registered trademarks used in
this Manual are the property of the respective trademark holders.
The ARMI system is licensed as follows:
.. code-block:: none
Copyright 2009 TerraPower, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Be careful when including any dependency in ARMI (say in the ``pyproject.toml`` file) not to include anything with a
license that supersedes our Apache license. For instance, any third-party Python library included in ARMI with a GPL
license will make the whole project fall under the GPL license. But a lot of potential users of ARMI will want to keep
some of their work private, so we can't allow any GPL dependencies.
For that reason, it is generally considered best-practice in the ARMI ecosystem to only use third-party Python libraries
that have MIT or BSD licenses.
.. [#touranarmi] Touran, Nicholas W., et al. "Computational tools for the integrated design of advanced nuclear reactors."
Engineering 3.4 (2017): 518-526. https://doi.org/10.1016/J.ENG.2017.04.016
.. |Build Status| image:: https://github.com/terrapower/armi/actions/workflows/unittests.yaml/badge.svg?branch=main
:target: https://github.com/terrapower/armi/actions/workflows/unittests.yaml
.. |Code Coverage| image:: https://codecov.io/gh/terrapower/armi/branch/main/graph/badge.svg
:target: https://app.codecov.io/gh/terrapower/armi/tree/main
.. |Commit Activity| image:: https://img.shields.io/github/commit-activity/m/terrapower/armi
:target: https://github.com/terrapower/armi/pulse
.. |Good First Issues| image:: https://img.shields.io/github/issues/terrapower/armi/good%20first%20issue
:target: https://github.com/terrapower/armi/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22
================================================
FILE: armi/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Welcome to the Advanced Reactor Modeling Interface (ARMI).
This module initializes the ARMI platform. The bootstrap process is broken into several phases:
* Import fundamental dependencies in Python library and some third-party libs
* Investigate environment: Check Python version, code version, MPI situation, and TTY/GUI/interactivity,
* Set up temp dirs
* Set up printout table formats (in preparation of logging info)
* Initialize all possible nuclide objects in the nuclide directory
* Discover and register available built-in :py:mod:`plugins ` (custom ones are registered after inputs)
* Discover and define all potential configuration settings from available plugins
* Read input files
* Update :py:mod:`nuclide directory ` with depletion info based on config
* Discover and define all state :py:mod:`Parameters ` on data model (maybe dependent on config)
* Discover :py:mod:`Entry points ` from plugins
* Choose entry point based on user command
If using the ``run`` entry point, additional work is done:
* Build :py:mod:`reactor model ` based on input
* Build :py:mod:`operator object ` with specific calculation loop
* Build ordered interface stack based on configuration settings
* Begin looping over interface stack, operating upon data model according to operator design
* Loop until complete
* Wrap up
* Quit
"""
# ruff: noqa: F401
import atexit
import datetime
import importlib
import os
import signal
import subprocess
import sys
import traceback
import warnings
from typing import List, Optional, Type
import __main__ as main
# The _bootstrap module performs operations that may need to occur before it is necessarily safe to import the rest of
# the ARMI system. Things like:
# - configure the MPI environment
# - detect the nature of interaction with the user (terminal UI, GUI, unsupervized, etc)
# - Initialize the nuclide database
import armi._bootstrap
from armi import apps, cli, context, pluginManager, plugins, runLog
from armi.context import (
APP_DATA,
CURRENT_MODE,
DOC,
MPI_COMM,
MPI_DISTRIBUTABLE,
MPI_NODENAME,
MPI_NODENAMES,
MPI_RANK,
MPI_SIZE,
RES,
ROOT,
START_TIME,
USER,
Mode,
)
from armi.meta import __version__
from armi.nucDirectory import nuclideBases
from armi.reactor import flags, parameters
# ARMI does not configure its own application by default. This is mostly to catch issues involving calling code that
# requires the framework to be configured before that has explicitly taken place. An application should call
# `configure()` with its App class in order for ARMI to work properly
_app: Optional[apps.App] = None
_ARMI_CONFIGURE_CONTEXT: Optional[str] = None
# Advanced flag used in documentation builds to avoid isConfigured guards.
_ignoreConfigures = False
def disableFutureConfigures():
"""Exposed function to ensure armi.configure() isn't called more than once."""
global _ignoreConfigures
_ignoreConfigures = True
def isStableReleaseVersion(version=None):
"""Determine if the version should be considered a stable release."""
version = version or __version__
return "-" not in version
def init(fName=None, cs=None, skipInspection=False, choice=None):
"""
Scan a directory for armi inputs and load one to interact with.
.. impl:: Settings are used to define an ARMI run.
:id: I_ARMI_SETTING1
:implements: R_ARMI_SETTING
This method initializes an ARMI run, and if successful returns an Operator. That operator is designed to drive
the reactor simulation through time steps to simulate its operation. This method takes in a settings file or
object to initialize the operator. Whether a settings file or object is supplied, the operator will be built
based on the those settings. Because the total collection of settings can be modified by developers of ARMI
applications, providing these settings allow ARMI end-users to granularly define their simulations.
Parameters
----------
fName : str, optional
The path to a settings file to load: my_case.yaml
cs : Settings, optional
If supplied, this CS object will supersede the other case input methods and use the object directly.
skipInspection : bool, optional
Whether or not the inputs should be checked for valid settings. Default is False.
choice : int, optional
Automatically run with this item out of the menu that would be produced by the existing YAML files.
Examples
--------
>>> o = armi.init()
"""
from armi import cases, settings
if cs is None:
if fName is None:
fName = settings.promptForSettingsFile(choice)
cs = settings.Settings(fName)
armiCase = cases.Case(cs=cs)
if not skipInspection:
armiCase.checkInputs()
try:
return armiCase.initializeOperator()
except: # Catch any and all errors. Naked exception on purpose.
# Concatenate errors to the primary log file.
runLog.close()
raise
def getDefaultPlugins() -> List[Type[plugins.ArmiPlugin]]:
"""
Return a list containing the default set of ARMI Framework plugins.
This is useful for an application to fold all of the ARMI Framework's capabilities into its own set of plugins.
"""
from armi import bookkeeping, cli, reactor
from armi.physics import fuelCycle, neutronics, safety
defaultPlugins = [
cli.EntryPointsPlugin,
bookkeeping.BookkeepingPlugin,
fuelCycle.FuelHandlerPlugin,
neutronics.NeutronicsPlugin,
safety.SafetyPlugin,
reactor.ReactorPlugin,
]
return defaultPlugins
def getDefaultPluginManager() -> pluginManager.ArmiPluginManager:
"""
Return a plugin manager containing the default set of ARMI Framework plugins.
This is useful when using standalone facilities of ARMI without a specific application.
"""
pm = plugins.getNewPluginManager()
for plugin in getDefaultPlugins():
pm.register(plugin)
return pm
def isConfigured():
"""Returns whether ARMI has been configured with an App."""
return _app is not None
def getPluginManager() -> Optional[pluginManager.ArmiPluginManager]:
"""Return the plugin manager, if there is one."""
global _app
if _app is None:
return None
return _app.pluginManager
def getPluginManagerOrFail() -> pluginManager.ArmiPluginManager:
"""Return the plugin manager. Raise an error if there is none."""
global _app
assert _app is not None, (
"The ARMI plugin manager was requested, no App has been configured. Ensure that `armi.configure()` has been "
"called before attempting to interact with the plugin manager."
)
return _app.pluginManager
def getApp() -> Optional[apps.App]:
global _app
return _app
def _cleanupOnCancel(signum, _frame):
"""Helper function to clean up upon cancellation."""
print(f"Caught Cancel signal ({signum}); cleaning temporary files and exiting...", file=sys.stderr)
context.cleanFastPathAfterSimulation()
sys.stdout.flush()
sys.stderr.flush()
sys.exit(1) # since we're handling the signal we have to cancel
def _liveInterpreter():
"""Return whether we are running within a live/interactive python interpreter."""
return not hasattr(main, "__file__")
def configure(app: Optional[apps.App] = None, permissive=False):
"""
Set the plugin manager for the Framework and configure internals to those plugins.
Parameters
----------
app :
An :py:class:`armi.apps.App` instance with which the framework is to be configured. If it is not provided, then
the default ARMI App will be used.
permissive :
Whether or not an error should be produced if ``configure`` is called more than once. This should only be set to
``True`` under testing or demonstration purposes, where the contents of otherwise independent scripts need to be
run under the same python instance.
Important
---------
Since this affects the behavior of several modules at their import time, it is generally not safe to re-configure
the ARMI framework once it has been configured. Therefore this will raise an ``RuntimeError`` if such a
re-configuration is attempted, unless ``permissive`` is set to ``True``.
Notes
-----
We are planning on encapsulating much of the global ARMI state that gets configured with an App into the App object
itself (with some other things going into the Case object). This will provide a number of benefits, the main one
being that it will become trivial to re-configure the framework, which is currently not possible.
"""
global _app
global _ARMI_CONFIGURE_CONTEXT
if _ignoreConfigures:
return
app = app or apps.App()
if _app is not None:
if permissive and isinstance(app, apps.App):
return
else:
raise RuntimeError(
f"Multiple calls to armi.configure() are not allowed. Previous call from:\n{_ARMI_CONFIGURE_CONTEXT}"
)
assert not context.BLUEPRINTS_IMPORTED, (
"ARMI can no longer be configured after blueprints have been imported. Blueprints were imported from"
f":\n{context.BLUEPRINTS_IMPORT_CONTEXT}"
)
_ARMI_CONFIGURE_CONTEXT = "".join(traceback.format_stack())
_app = app
context.APP_NAME = app.name
if _liveInterpreter():
runLog.LOG.startLog(name=f"interactive-{app.name}")
cli.splash()
pm = app.pluginManager
parameters.collectPluginParameters(pm)
parameters.applyAllParameters()
_app.registerPluginFlags()
def applyAsyncioWindowsWorkaround() -> None:
"""
Apply Asyncio workaround for Windows and Python 3.8.
This prevents a NotImplementedError on Windows with Python 3.8 his error showed up during jupyter notebook built-
tests and documentation. See https://bugs.python.org/issue37373
"""
import asyncio
if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
applyAsyncioWindowsWorkaround()
# The ``atexit`` handler is like putting it in a finally after everything.
atexit.register(context.cleanFastPathAfterSimulation)
# register cleanups upon HPC cancellations. Linux clusters will send a different signal. SIGBREAK doesn't exist on
# non-windows This actually doesn't work in mpi runs because MSMPI's mpiexec does not pass signals.
if os.name == "nt":
signal.signal(signal.SIGBREAK, _cleanupOnCancel)
signal.signal(signal.SIGINT, _cleanupOnCancel)
================================================
FILE: armi/__main__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Primary entry point into ARMI.
There are a variety of entry points in the ``cli`` package that define the various run options.
This invokes them according to command-line user input.
"""
import sys
import traceback
from armi import apps, configure, context, isConfigured, runLog
from armi.cli import ArmiCLI
def main():
# Main entry point into ARMI
try:
if not isConfigured():
configure(apps.App())
code = ArmiCLI().run()
# sys exit interprets None as 0
sys.exit(code)
except Exception:
# Make sure not to catch all BaseExceptions, lest we catch the expected SystemExit exception
runLog.error(
f"Unhandled exception in __main__, rank {context.MPI_RANK} on {context.MPI_NODENAME}.",
file=sys.__stderr__,
)
runLog.error(traceback.format_exc(), file=sys.__stderr__)
if context.MPI_SIZE > 1:
runLog.error(
f"Killing all MPI tasks from __main__, rank {context.MPI_RANK}.",
file=sys.__stderr__,
)
# cleanFastPathAfterSimulation has @atexit.register so it should be called at the end, but mpi. Abort
# in main will not allow for @atexit.register or except/finally code to be called so
# calling here as well
context.cleanFastPathAfterSimulation()
# .Abort will not allow for @atexit.register or except/finally code to be called
context.MPI_COMM.Abort(errorcode=-1)
raise SystemExit(1)
if __name__ == "__main__":
main()
================================================
FILE: armi/_bootstrap.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code that needs to be executed before most ARMI components are safe to import."""
from armi.nucDirectory import nuclideBases # noqa: E402
# Nuclide bases get built explicitly here to have better determinism
# about when they get instantiated. The burn chain is not applied
# at this point, but only after input is read. Nuclides need to be built super early
# because some import-time code needs them to function. Namely, Block parameter
# collection uses them to create number density params.
nuclideBases.factory()
================================================
FILE: armi/apps.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The base ARMI App class.
This module defines the :py:class:`App` class, which is used to configure the ARMI
Framework for a specific application. An ``App`` implements a simple interface for
customizing much of the Framework's behavior.
"""
# ruff: noqa: E402
import collections
import importlib
import sys
from typing import Dict, List, Optional, Tuple
from armi import context, meta, pluginManager, plugins, settings
from armi.reactor import parameters
from armi.reactor.flags import Flags
from armi.settings import Setting, fwSettings
class App:
"""
The highest-level of abstraction for defining what happens during an ARMI run.
.. impl:: An App has a plugin manager.
:id: I_ARMI_APP_PLUGINS
:implements: R_ARMI_APP_PLUGINS
The App class is intended to be subclassed in order to customize the functionality
and look-and-feel of the ARMI Framework for a specific use case. An App contains a
plugin manager, which should be populated in ``__init__()`` with a collection of
plugins that are deemed suitable for a given application, as well as other methods
which provide further customization.
The base App class is also a good place to expose some more convenient ways to get
data out of the Plugin API; calling the ``pluggy`` hooks directly can sometimes be a
pain, as the results returned by the individual plugins may need to be merged and/or
checked for errors. Adding that logic here reduces boilerplate throughout the rest
of the code.
"""
name = "armi"
"""
The program name of the app. This should be the actual name of the python entry
point that loads the app, or the name of the module that contains the appropriate
__main__ function. For example, if the app is expected to be invoked with ``python
-m myapp``, ``name`` should be ``"myapp"``
"""
def __init__(self):
"""
This mostly initializes the default plugin manager. Subclasses are free to adopt
this plugin manager and register more plugins of their own, or to throw it away
and start from scratch if they do not wish to use the default Framework plugins.
For a description of the things that an ARMI plugin can do, see the
:py:mod:`armi.plugins` module.
"""
self._pluginFlagsRegistered: bool = False
self._pm: Optional[pluginManager.ArmiPluginManager] = None
self._paramRenames: Optional[Tuple[Dict[str, str], int]] = None
self.__initNewPlugins()
def __initNewPlugins(self):
from armi import bookkeeping, cli, reactor
from armi.physics import (
fuelCycle,
fuelPerformance,
neutronics,
safety,
thermalHydraulics,
)
self._pm = plugins.getNewPluginManager()
for plugin in (
cli.EntryPointsPlugin,
bookkeeping.BookkeepingPlugin,
fuelCycle.FuelHandlerPlugin,
fuelPerformance.FuelPerformancePlugin,
neutronics.NeutronicsPlugin,
safety.SafetyPlugin,
thermalHydraulics.ThermalHydraulicsPlugin,
reactor.ReactorPlugin,
):
self._pm.register(plugin)
self._paramRenames = None
@property
def version(self) -> str:
"""Grab the version of this app (defaults to ARMI version).
Notes
-----
This is designed to be over-ridable by Application developers.
"""
return meta.__version__
@property
def pluginManager(self) -> pluginManager.ArmiPluginManager:
"""Return the App's PluginManager."""
return self._pm
def getSettings(self) -> Dict[str, Setting]:
"""Return a dictionary containing all Settings defined by the framework and all plugins."""
# Start with framework settings
settingDefs = {setting.name: setting for setting in fwSettings.getFrameworkSettings()}
# The optionsCache stores options that may have come from a plugin before the setting to
# which they apply. Whenever a new setting is added, we check to see if there are any
# options in the cache, popping them out and adding them to the setting. If all plugins'
# settings have been processed and the cache is not empty, that's an error, because a plugin
# must have provided options to a setting that doesn't exist.
optionsCache: Dict[str, List[settings.Option]] = collections.defaultdict(list)
defaultsCache: Dict[str, settings.Default] = {}
for pluginSettings in self._pm.hook.defineSettings():
for pluginSetting in pluginSettings:
if isinstance(pluginSetting, settings.Setting):
name = pluginSetting.name
if name in settingDefs:
raise ValueError(f"The setting {pluginSetting.name} already exists and cannot be redefined.")
settingDefs[name] = pluginSetting
# handle when new setting has modifier in the cache (modifier loaded first)
if name in optionsCache:
settingDefs[name].addOptions(optionsCache.pop(name))
if name in defaultsCache:
settingDefs[name].changeDefault(defaultsCache.pop(name))
elif isinstance(pluginSetting, settings.Option):
if pluginSetting.settingName in settingDefs:
# modifier loaded after setting, so just apply it (no cache needed)
settingDefs[pluginSetting.settingName].addOption(pluginSetting)
else:
# no setting yet, cache it and apply when it arrives
optionsCache[pluginSetting.settingName].append(pluginSetting)
elif isinstance(pluginSetting, settings.Default):
if pluginSetting.settingName in settingDefs:
# modifier loaded after setting, so just apply it (no cache needed)
settingDefs[pluginSetting.settingName].changeDefault(pluginSetting)
else:
# no setting yet, cache it and apply when it arrives
defaultsCache[pluginSetting.settingName] = pluginSetting
else:
raise TypeError(
"Invalid setting definition found: {} ({})".format(pluginSetting, type(pluginSetting))
)
if optionsCache:
raise ValueError(
"The following options were provided for settings that do "
"not exist. Make sure that the set of active plugins is "
"consistent.\n{}".format(optionsCache)
)
if defaultsCache:
raise ValueError(
"The following defaults were provided for settings that do "
"not exist. Make sure that the set of active plugins is "
"consistent.\n{}".format(defaultsCache)
)
return settingDefs
def getParamRenames(self) -> Dict[str, str]:
"""
Return the parameter renames from all registered plugins.
This renders a merged dictionary containing all parameter renames from all of the registered
plugins. It also performs simple error checking. The result of this operation is cached,
since it is somewhat expensive to perform. If the App detects that its plugin manager's set
of registered plugins has changed, the cache will be invalidated and recomputed.
"""
cacheInvalid = False
if self._paramRenames is not None:
renames, counter = self._paramRenames
if counter != self._pm.counter:
cacheInvalid = True
else:
cacheInvalid = True
if cacheInvalid:
currentNames = {pd.name for pd in parameters.ALL_DEFINITIONS}
renames = dict()
for pluginRenames in self._pm.hook.defineParameterRenames():
collisions = currentNames & pluginRenames.keys()
if collisions:
raise plugins.PluginError(
"The following parameter renames from a plugin collide with "
"currently-defined parameters:\n{}".format(collisions)
)
pluginCollisions = renames.keys() & pluginRenames.keys()
if pluginCollisions:
raise plugins.PluginError(
"The following parameter renames are already defined by another plugin:\n{}".format(
pluginCollisions
)
)
renames.update(pluginRenames)
self._paramRenames = renames, self._pm.counter
return renames
def registerPluginFlags(self):
"""
Apply flags specified in the passed ``PluginManager`` to the ``Flags`` class.
See Also
--------
armi.plugins.ArmiPlugin.defineFlags
"""
if self._pluginFlagsRegistered:
raise RuntimeError("Plugin flags have already been registered. Cannot do it twice!")
for pluginFlags in self._pm.hook.defineFlags():
Flags.extend(pluginFlags)
self._pluginFlagsRegistered = True
def registerUserPlugins(self, pluginPaths):
r"""
Register additional plugins passed in by importable paths.
These plugins may be provided e.g. by an application during startup
based on user input.
Format expected to be a list of full namespaces to plugin classes.
There should be a comma between individual plugins and dots representing
the file path or importable python namespace.
Examples
--------
importable namespace:
``armi.stuff.plugindir.pluginMod.pluginCls,armi.whatever.plugMod2.plugCls2``
or on Linux/Unix:
``/path/to/pluginMod.py:pluginCls,/path/to/plugMod2.py:plugCls2``
or on Windows:
``C:\\path\\to\\pluginMod.py:pluginCls,C:\\\\path\\to\\plugMod2.py:plugCls2``
Notes
-----
These paths are meant to be taken from a settings file, though this method
is public. The idea is that these "user plugins" differ from regular plugins
because they are defined during run time, not import time. As such, we
restrict their flexibility and power as compared to the usual ArmiPlugins.
"""
for pluginPath in pluginPaths:
if self._isPluginRegistered(pluginPath):
continue
if ".py:" in pluginPath:
# The path is of the form: /path/to/why.py:MyPlugin
self.__registerUserPluginsAbsPath(pluginPath)
else:
# The path is of the form: armi.thing.what.MyPlugin
self.__registerUserPluginsInternalImport(pluginPath)
def _isPluginRegistered(self, pluginPath: str):
r"""
Check if the plugin at the provided path is already registered.
The expected path formats are:
------------------------------
importable namespace:
``armi.stuff.plugindir.pluginMod.pluginCls``
or on Linux/Unix:
``/path/to/pluginMod.py:pluginCls``
or on Windows:
``C:\\path\\to\\pluginMod.py:pluginCls``
Parameters
----------
pluginPath : str
String path to a userPlugin.
Returns
-------
bool
Whether or not the plugin name is already registered with the manager.
"""
if ":" in pluginPath:
pluginName = pluginPath.strip().split(":")[-1]
else:
pluginName = pluginPath.strip().split(".")[-1]
return self._pm.has_plugin(pluginName)
def __registerUserPluginsAbsPath(self, pluginPath):
"""Helper method to register a single UserPlugin via absolute path.
Here the given path is of the form: /path/to/why.py:MyPlugin
"""
assert pluginPath.count(".py:") == 1, f"Invalid plugin path: {pluginPath}"
# split the settings string into file path and class name
filePath, className = pluginPath.split(".py:")
filePath += ".py"
spec = importlib.util.spec_from_file_location(className, filePath)
mod = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = mod
spec.loader.exec_module(mod)
plugin = getattr(mod, className)
assert issubclass(plugin, plugins.UserPlugin)
self._pm.register(plugin)
# ensure UserPlugin flags are loaded
newFlags = plugin.defineFlags()
if newFlags:
Flags.extend(newFlags)
def __registerUserPluginsInternalImport(self, pluginPath):
"""Helper method to register a single UserPlugin via internal import.
Here the given path is of the form: armi.thing.what.MyPlugin
"""
names = pluginPath.strip().split(".")
modPath = ".".join(names[:-1])
clsName = names[-1]
mod = importlib.import_module(modPath)
plugin = getattr(mod, clsName)
assert issubclass(plugin, plugins.UserPlugin)
self._pm.register(plugin)
# ensure UserPlugin flags are loaded
newFlags = plugin.defineFlags()
if newFlags:
Flags.extend(newFlags)
@property
def splashText(self):
"""
Return a textual splash screen.
Specific applications will want to customize this, but by default the ARMI one
is produced, with extra data on the App name and version, if available.
"""
# typical ARMI splash text
splash = r"""
+===================================================+
| _ ____ __ __ ___ |
| / \ | _ \ | \/ | |_ _| |
| / _ \ | |_) | | |\/| | | | |
| / ___ \ | _ < | | | | | | |
| /_/ \_\ |_| \_\ |_| |_| |___| |
| Advanced Reactor Modeling Interface |
| |
| version {0:10s} |
| |""".format(meta.__version__)
# add the name/version of the current App, if it's not the default
if context.APP_NAME != "armi":
from armi import getApp
splash += r"""
|---------------------------------------------------|
| {0:>17s} app version {1:10s} |""".format(context.APP_NAME, getApp().version)
# bottom border of the splash
splash += r"""
+===================================================+
"""
return splash
================================================
FILE: armi/bookkeeping/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The bookkeeping package handles data persistence, reporting, and some debugging."""
from armi import plugins
class BookkeepingPlugin(plugins.ArmiPlugin):
@staticmethod
@plugins.HOOKIMPL
def exposeInterfaces(cs):
from armi.bookkeeping import (
historyTracker,
mainInterface,
memoryProfiler,
snapshotInterface,
)
from armi.bookkeeping.db import databaseInterface
from armi.bookkeeping.report import reportInterface
interfaceInfo = []
interfaceInfo += plugins.collectInterfaceDescriptions(mainInterface, cs)
interfaceInfo += plugins.collectInterfaceDescriptions(databaseInterface, cs)
interfaceInfo += plugins.collectInterfaceDescriptions(historyTracker, cs)
interfaceInfo += plugins.collectInterfaceDescriptions(memoryProfiler, cs)
interfaceInfo += plugins.collectInterfaceDescriptions(reportInterface, cs)
interfaceInfo += plugins.collectInterfaceDescriptions(snapshotInterface, cs)
return interfaceInfo
@staticmethod
@plugins.HOOKIMPL
def defineEntryPoints():
from armi.bookkeeping import visualization
from armi.cli import database
entryPoints = []
entryPoints.append(database.ExtractInputs)
entryPoints.append(database.InjectInputs)
entryPoints.append(visualization.VisFileEntryPoint)
return entryPoints
@staticmethod
@plugins.HOOKIMPL
def defineCaseDependencies(case, suite):
if case.cs["loadStyle"] == "fromDB":
# the ([^\/]) capture basically gets the file name portion and excludes any
# directory separator
return case.getPotentialParentFromSettingValue(
case.cs["reloadDBName"],
r"^(?P.*[\/\\])?(?P[^\/\\]+?)(\.[hH]5)?$",
)
return None
@staticmethod
@plugins.HOOKIMPL
def mpiActionRequiresReset(cmd) -> bool:
"""
Prevent reactor resets after certain mpi actions.
* Memory profiling is small enough that we don't want to reset
* distributing state would be undone by this so we don't want that.
See Also
--------
armi.operators.operatorMPI.OperatorMPI.workerOperate
"""
from armi import mpiActions
from armi.bookkeeping import memoryProfiler
if isinstance(cmd, mpiActions.MpiAction):
for donotReset in (
mpiActions.DistributeStateAction,
mpiActions.DistributionAction,
memoryProfiler.PrintSystemMemoryUsageAction,
memoryProfiler.ProfileMemoryUsageAction,
):
if isinstance(cmd, donotReset):
return False
return True
================================================
FILE: armi/bookkeeping/db/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The db package is responsible for reading and writing the state of the reactor to/from disk.
As an ARMI run progresses, this is periodically updated as the primary output file.
It can also be an input file for follow-on analysis or restart runs.
This module contains factories for selecting and building DB-related objects.
When updating a db version
--------------------------
The code associated with reading and writing database files may not benefit from Don't
Repeat Yourself (DRY) practices in the same way as other code. Therefore, do not share
code between different major versions of the databases. Create a new module if you are
creating a new major database version.
Database revision changelog
---------------------------
- 1: Originally, calculation results were stored in a SQL database.
- 2: The storage format was changed to HDF5. This required less external
infrastructure than SQL. However, the implementation did not store a complete
model of a reactor, but a ghost of assembly, block, and reactor parameters that
could be applied to an existing reactor model (so long as the dimensions were
consistent). This was inconvenient and error prone.
- 3: The HDF5 format was kept, but the schema was made more flexible to permit
storing the entire reactor model. All objects in the ARMI Composite Model are
written to the database, and the model can be completely recovered from just the
HDF5 file.
- 3.1: Improved the handling of reading/writing grids.
- 3.2: Changed the strategy for storing large attributes to using a special
string starting with an "@" symbol (e.g., "@/c00n00/attrs/5_linkedDims"). This
was done to support copying time node datasets from one file to another without
invalidating the references. Support was maintained for reading previous
versions, by performing a ``mergeHistory()`` and converting to the new naming
strategy, but the old version cannot be written.
- 3.3: Compressed the way locations are stored in the database and allow
MultiIndex locations to be read and written.
- 3.4: Modified the way locations are stored in the database to include complete
indices for indices that can be composed from multiple grids. Having complete
indices allows for more efficient means of extracting information based on
location, without having to compose the full model.
"""
import os
from armi import runLog
from armi.bookkeeping.db.compareDB3 import compareDatabases
# re-export package components for easier import
from armi.bookkeeping.db.database import Database
from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.bookkeeping.db.factory import databaseFactory
__all__ = [
"Database",
"DatabaseInterface",
"compareDatabases",
"databaseFactory",
]
def loadOperator(
pathToDb,
loadCycle,
loadNode,
statePointName=None,
allowMissing=False,
handleInvalids=True,
callReactorConstructionHook=False,
):
"""
Return an operator given the path to a database.
Parameters
----------
pathToDb : str
The path of the database to load from.
loadCycle : int
The cycle to load the reactor state from.
loadNode : int
The time node to load the reactor from.
statePointName: str
State point name at the end, E.G. `EOC` or `EOL`.
Full name would be C0N2EOC, see database.getH5GroupName
allowMissing : bool
Whether to emit a warning, rather than crash if reading a database
with undefined parameters. Default False.
handleInvalids : bool
Whether to check for invalid settings. Default True.
callReactorConstructionHook : bool
Flag for whether the beforeReactorConstruction plugin hook should be executed. Default is False.
See Also
--------
armi.operator.Operator.loadState:
A method for loading reactor state that is useful if you already have an
operator and a reactor object. loadOperator varies in that it supplies these
given only a database file. loadState should be used if you are in the
middle of an ARMI calculation and need load a different time step.
Notes
-----
The operator will have a reactor attached that is loaded to the specified cycle
and node. The operator will not be in the same state that it was at that cycle and
node, only the reactor.
Examples
--------
>>> o = db.loadOperator(r"pathToDatabase", 0, 1)
>>> r = o.r
>>> cs = o.cs
>>> r.p.timeNode
1
>>> r.getFPMass() # Note since it is loaded from step 1 there are fission products.
12345.67
"""
# `import armi` doesn't work if imported at top
from armi import cases
if not os.path.exists(pathToDb):
raise ValueError(
f"Specified database at path {pathToDb} does not exist. \n\n"
"Double check that escape characters were correctly processed.\n"
"Consider sending the full path, or change directory to be the directory "
"of the database."
)
db = Database(pathToDb, "r")
with db:
# init Case here as it keeps track of execution time and assigns a reactor
# attribute. This attribute includes the time it takes to initialize the reactor
# so creating a reactor from the database should be included.
cs = db.loadCS(handleInvalids=handleInvalids)
thisCase = cases.Case(cs)
r = db.load(
loadCycle,
loadNode,
cs=cs,
statePointName=statePointName,
allowMissing=allowMissing,
handleInvalids=handleInvalids,
callReactorConstructionHook=callReactorConstructionHook,
)
o = thisCase.initializeOperator(r=r)
runLog.important(
"The operator will not be in the same state that it was at that cycle and "
"node, only the reactor.\n"
"The operator should have access to the same interface stack, but the "
"interfaces will not be in the same state (they will be fresh instances "
"of each interface as if __init__ was just called rather than the state "
"during the run at this time node.)\n"
"ARMI does not support loading operator states, as they are not stored."
)
return o
def _getH5File(db):
"""Return the underlying h5py File that provides the backing storage for a database.
This is done here because HDF5 isn't an official aspect of the base Database
abstraction, and thus making this part of the base Database class interface wouldn't
be ideal. **However**, we violate this assumption when working with "auxiliary"
data, which use HDF5 features directly. To be able to convert, we need to be able to
access and copy these groups, so we need access to the HDF5 file under the hood. To
avoid this, we would need to come up with our own formalization of what a
storage-agnostic aux data concept looks like. We can tackle that if/when we decode
that we want to start using protobufs or whatever.
All this being said, we are probably violating this already with genAuxiliaryData,
but we have to start somewhere.
"""
if isinstance(db, Database):
return db.h5db
else:
raise TypeError("Unsupported Database type ({})!".format(type(db)))
================================================
FILE: armi/bookkeeping/db/compareDB3.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Use the generic database class to compare two ARMI databases.
This assumes some intimate knowledge about how the database is structured internally.
For instance, it knows that the database is composed of HDF5 data (the attrs of a
dataset are used, and h5py Groups are indexed), and it knows how special data is
structured within the HDF5 dataset and what the corresponding attributes are used for.
Some of this could be easily pulled up to the public interfaces of the Database class,
which may allow for cross-version database checking, but there is probably little value
in doing so if one is able to convert between versions.
Speaking of conversions, there are some common issues that may arise from comparing
against databases that were converted from an old version. The process of reading in the
old database values can sometimes lead to more parameters being written out to the new
database than were in the original database (set to the parameter's default value). That
means that one generally should not be worried about a converted database having more
parameters in it that the one produced directly may not, assuming that the extra
converted parameters are the default. Also, especially at the Component level, some of
the parameters are expected to be different. Specifically the following:
* temperatures: The old database format simply did not store these on the component
level, so when converting a database, the components in a block will uniformly get
whatever the Block temperature was.
* serial numbers: At all levels, we cannot really expect the serial numbers to line
up from object to object. These are not really supposed to be the same.
* volume: Component volumes also are not stored on the database, and come from
temperatures
* memory usage: Relatively self-evident. Resource usage will vary from run to run,
even if the code hasn't changed.
"""
import collections
import os
import re
import traceback
from typing import Optional, Pattern, Sequence, Tuple
import h5py
import numpy as np
from armi import runLog
from armi.bookkeeping.db import database
from armi.bookkeeping.db.database import Database
from armi.bookkeeping.db.factory import databaseFactory
from armi.bookkeeping.db.permissions import Permissions
from armi.reactor.composites import ArmiObject
from armi.utils.tabulate import tabulate
class OutputWriter:
"""Basically a tee to writeln to runLog and the output file."""
def __init__(self, fname):
self.fname = fname
self._stream = None
def __enter__(self):
self._stream = open(self.fname, "w")
return self
def __exit__(self, *args):
self._stream.close()
def writeln(self, msg: str) -> None:
runLog.info(msg)
self._stream.write(msg)
self._stream.write("\n")
class DiffResults:
"""Utility class for storing differences between database data.
This class is used to store the differences between reference data and other
("source") data. It is configured with a tolerance, below which differences are
ignored. Differences that exceed the tolerance are stored in a collection of
differences, organized by time step to be outputted later. It also keeps track of
the number of issues that may have been encountered in attempting to compare two
databases. For instance, missing datasets on one database or the other, or datasets
with incompatible dimensions and the like.
All differences are based on a weird type of relative difference, which uses the
mean of the reference and source data elements as the normalization value:
2*(C-E)/(C+E). This is somewhat strange, in that if the two are very different, the
reported relative difference will be smaller than expected. It does have the useful
property that if the reference value is zero and the source value is non-zero, the
diff will not be infinite. We do not typically report these in any rigorous manner,
so this should be fine, though we may wish to revisit this in the future.
"""
def __init__(self, tolerance):
self._columns = []
self._structureDiffs = []
self.tolerance = tolerance
# diffs is a dictionary, keyed on strings describing the object to which the
# diffs apply, and the different diff metrics that we use (e.g. mean(abs(diff)),
# max(abs(diff))), with the values being a list of diffs by time step. If the
# diff doesn't exceed the tolerance, a None is inserted instead.
self.diffs = collections.defaultdict(self._getDefault)
def addDiff(self, compType: str, paramName: str, absMean: float, mean: float, absMax: float) -> None:
"""Add a collection of diffs to the diff dictionary if they exceed the tolerance."""
absMean = absMean if absMean > self.tolerance else None
self.diffs["{}/{} mean(abs(diff))".format(compType, paramName)].append(absMean)
mean = mean if abs(mean) > self.tolerance else None
self.diffs["{}/{} mean(diff)".format(compType, paramName)].append(mean)
absMax = absMax if absMax > self.tolerance else None
self.diffs["{}/{} max(abs(diff))".format(compType, paramName)].append(absMax)
def addStructureDiffs(self, nDiffs: int) -> None:
if not self._structureDiffs:
self._structureDiffs = [0]
self._structureDiffs[-1] += nDiffs
def addTimeStep(self, tsName: str) -> None:
self._structureDiffs.append(0)
self._columns.append(tsName)
def _getDefault(self) -> list:
return [None] * (len(self._columns) - 1)
def reportDiffs(self, stream: OutputWriter) -> None:
"""Print out a well-formatted table of the non-zero diffs."""
# filter out empty rows
diffsToPrint = {key: value for key, value in self.diffs.items() if not all(v is None for v in value)}
stream.writeln(
tabulate(
[k.split() + val for k, val in sorted(diffsToPrint.items())],
headers=self._columns,
)
)
def nDiffs(self) -> int:
"""Return the number of differences that exceeded the tolerance."""
return sum(1 for _, value in self.diffs.items() if any(v is not None for v in value)) + sum(
self._structureDiffs
)
def compareDatabases(
refFileName: str,
srcFileName: str,
exclusions: Optional[Sequence[str]] = None,
tolerance: float = 0.0,
timestepCompare: Optional[Sequence[Tuple[int, int]]] = None,
) -> Optional[DiffResults]:
"""High-level method to compare two ARMI H5 files, given file paths."""
compiledExclusions = None
if exclusions is not None:
compiledExclusions = [re.compile(ex) for ex in exclusions]
outputName = os.path.basename(refFileName) + "_vs_" + os.path.basename(srcFileName) + ".txt"
diffResults = DiffResults(tolerance)
with OutputWriter(outputName) as out:
ref = databaseFactory(refFileName, Permissions.READ_ONLY_FME)
src = databaseFactory(srcFileName, Permissions.READ_ONLY_FME)
if not isinstance(ref, Database) or not isinstance(src, Database):
raise TypeError(
"This database comparer only knows how to deal with database version 3; received {} and {}".format(
type(ref), type(src)
)
)
with ref, src:
if not timestepCompare:
_, nDiff = _compareH5Groups(out, ref, src, "timesteps")
if nDiff > 0:
runLog.warning(
"{} and {} have differing timestep groups, and are "
"probably not safe to compare. This is likely due to one of "
"the cases having failed to complete.".format(ref, src)
)
return None
for refGroup, srcGroup in zip(
ref.genTimeStepGroups(timeSteps=timestepCompare),
src.genTimeStepGroups(timeSteps=timestepCompare),
):
runLog.info(
f"Comparing ref time step {refGroup.name.split('/')[1]} to src time "
f"step {srcGroup.name.split('/')[1]}"
)
diffResults.addTimeStep(refGroup.name)
_compareTimeStep(out, refGroup, srcGroup, diffResults, exclusions=compiledExclusions)
diffResults.reportDiffs(out)
return diffResults
def _compareH5Groups(out: OutputWriter, ref: h5py.Group, src: h5py.Group, name: str) -> Tuple[Sequence[str], int]:
refGroups = set(ref.keys())
srcGroups = set(src.keys())
n = _compareSets(srcGroups, refGroups, out, name)
return sorted(refGroups & srcGroups), n
def _compareTimeStep(
out: OutputWriter,
refGroup: h5py.Group,
srcGroup: h5py.Group,
diffResults: DiffResults,
exclusions: Optional[Sequence[Pattern]] = None,
):
groupNames, structDiffs = _compareH5Groups(out, refGroup, srcGroup, "composite objects/auxiliary data")
diffResults.addStructureDiffs(structDiffs)
componentTypes = {gn for gn in groupNames if gn in ArmiObject.TYPES}
auxData = set(groupNames) - componentTypes
auxData.discard("layout")
for componentType in componentTypes:
refTypeGroup = refGroup[componentType]
srcTypeGroup = srcGroup[componentType]
_compareComponentData(out, refTypeGroup, srcTypeGroup, diffResults, exclusions=exclusions)
for aux in auxData:
_compareAuxData(out, refGroup[aux], srcGroup[aux], diffResults)
def _compareAuxData(
out: OutputWriter,
refGroup: h5py.Group,
srcGroup: h5py.Group,
diffResults: DiffResults,
):
"""
Compare auxiliary datasets, which aren't stored as Parameters on the Composite model.
Some parts of ARMI directly create HDF5 groups under the time step group to store
arbitrary data. These still need to be compared. Missing datasets will be treated as
structure differences and reported.
"""
data = dict()
def visitor(name, obj):
if isinstance(obj, h5py.Dataset):
data[name] = obj
refGroup.visititems(visitor)
refData = data
data = dict()
srcGroup.visititems(visitor)
srcData = data
n = _compareSets(set(srcData.keys()), set(refData.keys()), out, name="auxiliary dataset")
diffResults.addStructureDiffs(n)
matchedSets = set(srcData.keys()) & set(refData.keys())
for name in matchedSets:
_diffSimpleData(refData[name], srcData[name], diffResults)
def _compareSets(src: set, ref: set, out: OutputWriter, name: Optional[str] = None) -> int:
nDiffs = 0
printName = "" if name is None else name + " "
if ref - src:
nDiffs += len(ref - src)
out.writeln("ref has {}not in src: {}".format(printName, list(ref - src)))
if src - ref:
nDiffs += len(src - ref)
out.writeln("src has {}not in ref: {}".format(printName, list(src - ref)))
return nDiffs
def _diffSpecialData(
refData: h5py.Dataset,
srcData: h5py.Dataset,
out: OutputWriter,
diffResults: DiffResults,
):
"""
Compare specially-formatted datasets.
This employs the pack/unpackSpecialData functions to reconstitute complicated
datasets for comparison. These usually don't behave well as giant numpy arrays, so
we go element-by-element to calculate the diffs, then concatenate them.
"""
name = refData.name
paramName = refData.name.split("/")[-1]
compName = refData.name.split("/")[-2]
nDiffs = _compareSets(set(srcData.attrs.keys()), set(refData.attrs.keys()), out, "formatting data")
keysMatch = nDiffs == 0
diffResults.addStructureDiffs(nDiffs)
if not keysMatch:
diffResults.addDiff(name, name, np.inf, np.inf, np.inf)
return
if srcData.attrs.get("dict", False):
out.writeln(f"Not comparing {name} as it is a dictionary.")
return
attrsMatch = True
for k, srcAttr in srcData.attrs.items():
refAttr = refData.attrs[k]
if isinstance(srcAttr, np.ndarray) and isinstance(refAttr, np.ndarray):
srcFlat = srcAttr.flatten()
refFlat = refAttr.flatten()
if len(srcFlat) != len(refFlat):
same = False
else:
same = all(srcFlat == refFlat)
else:
same = srcAttr == refAttr
if not same:
attrsMatch = False
out.writeln(
"Special formatting parameters for {} do not match for {}. Src: {} Ref: {}".format(
name, k, srcData.attrs[k], refData.attrs[k]
)
)
break
if not attrsMatch:
diffResults.addDiff(compName, paramName, np.inf, np.inf, np.inf)
return
try:
src = database.unpackSpecialData(srcData[()], srcData.attrs, paramName)
ref = database.unpackSpecialData(refData[()], refData.attrs, paramName)
except Exception:
runLog.error(
f"Unable to unpack special data for paramName {paramName}. {traceback.format_exc()}",
)
return
diff = []
for dSrc, dRef in zip(src.tolist(), ref.tolist()):
if isinstance(dSrc, np.ndarray) and isinstance(dRef, np.ndarray):
if dSrc.shape != dRef.shape:
out.writeln("Shapes did not match for {}".format(refData))
diffResults.addDiff(compName, paramName, np.inf, np.inf, np.inf)
return
if dSrc.dtype.type == np.bytes_ or dRef.dtype.type == np.bytes_:
# data is byte strings; can't be diffed like numbers
if np.array_equal(dSrc, dRef):
diffResults.addDiff(name, name, 0.0, 0.0, 0.0)
else:
diffResults.addDiff(name, name, np.inf, np.inf, np.inf)
return
# Make sure not to try to compare empty arrays. Numpy is mediocre at these;
# they are super degenerate and cannot participate in concatenation.
if 0 not in dSrc.shape:
# Use the mean of the two to calc relative error. This is more robust to
# changes that cause one of the values to be zero, while the other is
# non-zero, leading to infinite relative error
dMean = (dSrc + dRef) / 2
diff.append((dSrc - dRef) / dMean)
continue
if (dSrc is None) ^ (dRef is None):
out.writeln("Mismatched Nones for {} in {}".format(paramName, compName))
diff.append([np.inf])
continue
if dSrc is None:
diff.append([0.0])
continue
try:
# Use mean to avoid some infinities; see above
dMean = (dSrc + dRef) / 2
diff.append([(dSrc - dRef) / dMean])
except ZeroDivisionError:
if dSrc == dRef:
diff.append([0.0])
else:
diff.append([np.inf])
if diff:
try:
diff = [np.array(d).flatten() for d in diff]
diff = np.concatenate(diff)
except ValueError as e:
out.writeln("Failed to concatenate diff data for {} in {}: {}".format(paramName, compName, diff))
out.writeln("Because: {}".format(e))
return
absDiff = np.abs(diff)
mean = np.nanmean(diff)
absMax = np.nanmax(absDiff)
absMean = np.nanmean(absDiff)
diffResults.addDiff(compName, paramName, absMean, mean, absMax)
def _diffSimpleData(ref: h5py.Dataset, src: h5py.Dataset, diffResults: DiffResults):
paramName = ref.name.split("/")[-1]
compName = ref.name.split("/")[-2]
try:
# use mean to avoid some unnecessary infinities
mean = (src[()] + ref[()]) / 2.0
diff = (src[()] - ref[()]) / mean
except TypeError:
# Strings are persnickety
if src.dtype.kind == ref.dtype.kind and src.dtype.kind in {"U", "S"}:
return
else:
runLog.error("Failed to compare {} in {}".format(paramName, compName))
runLog.error("source: {}".format(src))
runLog.error("reference: {}".format(ref))
diff = np.array([np.inf])
except ValueError:
runLog.error("Failed to compare {} in {}".format(paramName, compName))
runLog.error("source: {}".format(src))
runLog.error("reference: {}".format(ref))
diff = np.array([np.inf])
if 0 in diff.shape:
# Empty list, no diff
return
absDiff = np.abs(diff)
mean = np.nanmean(diff)
absMax = np.nanmax(absDiff)
absMean = np.nanmean(absDiff)
diffResults.addDiff(compName, paramName, absMean, mean, absMax)
def _compareComponentData(
out: OutputWriter,
refGroup: h5py.Group,
srcGroup: h5py.Group,
diffResults: DiffResults,
exclusions: Optional[Sequence[Pattern]] = None,
):
exclusions = exclusions or []
compName = refGroup.name
paramNames, nDiff = _compareH5Groups(out, refGroup, srcGroup, "{} parameters".format(compName))
diffResults.addStructureDiffs(nDiff)
for paramName in paramNames:
fullName = "/".join((refGroup.name, paramName))
if any(pattern.match(fullName) for pattern in exclusions):
runLog.debug("Skipping comparison of {} since it is being ignored.".format(fullName))
continue
refDataset = refGroup[paramName]
srcDataset = srcGroup[paramName]
srcSpecial = srcDataset.attrs.get("specialFormatting", False)
refSpecial = refDataset.attrs.get("specialFormatting", False)
if srcSpecial ^ refSpecial:
out.writeln(
"Could not compare data for parameter {} because one uses special "
"formatting, and the other does not. Ref: {} Src: {}".format(paramName, refSpecial, srcSpecial)
)
diffResults.addDiff(refGroup.name, paramName, np.inf, np.inf, np.inf)
continue
if srcSpecial or refSpecial:
_diffSpecialData(refDataset, srcDataset, out, diffResults)
else:
_diffSimpleData(refDataset, srcDataset, diffResults)
================================================
FILE: armi/bookkeeping/db/database.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ARMI Database implementation, version 3.4.
A reactor model should be fully recoverable from the database; all the way down to the component level. As a result, the
structure of the underlying data is bound to the hierarchical Composite Reactor Model. Furthermore, this database format
is intended to be more dynamic, permitting as-yet undeveloped levels and classes in the Composite Reactor Model to be
supported as they are added. More high-level discussion is contained in :ref:`database-file`.
The :py:class:`Database` class contains most of the functionality for interacting with the underlying data. This
includes things like dumping a Reactor state to the database and loading it back again, as well as extracting historical
data for a given object or collection of object from the database file. However, for the nitty-gritty details of how the
hierarchical Composite Reactor Model is translated to the flat file database, please refer to
:py:mod:`armi.bookkeeping.db.layout`.
Refer to :py:mod:`armi.bookkeeping.db` for information about versioning.
"""
import collections
import copy
import gc
import io
import itertools
import os
import pathlib
import re
import shutil
import subprocess
import sys
from platform import uname
from typing import (
Any,
Dict,
Generator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import h5py
import numpy as np
from armi import context, getApp, getPluginManagerOrFail, meta, runLog, settings
from armi.bookkeeping.db.jaggedArray import JaggedArray
from armi.bookkeeping.db.layout import (
DB_VERSION,
LOC_COORD,
Layout,
replaceNonesWithNonsense,
replaceNonsenseWithNones,
)
from armi.bookkeeping.db.typedefs import Histories, History
from armi.physics.neutronics.settings import CONF_LOADING_FILE
from armi.reactor import grids, parameters
from armi.reactor.assemblies import Assembly
from armi.reactor.blocks import Block
from armi.reactor.components import Component
from armi.reactor.composites import ArmiObject
from armi.reactor.parameters import parameterCollections
from armi.reactor.reactorParameters import makeParametersReadOnly
from armi.reactor.reactors import Core, Reactor
from armi.settings.fwSettings.globalSettings import (
CONF_GROW_TO_FULL_CORE_AFTER_LOAD,
CONF_SORT_REACTOR,
)
from armi.utils import getNodesPerCycle, safeCopy, safeMove
from armi.utils.textProcessors import resolveMarkupInclusions
# CONSTANTS
_SERIALIZER_NAME = "serializerName"
_SERIALIZER_VERSION = "serializerVersion"
def getH5GroupName(cycle: int, timeNode: int, statePointName: str = None) -> str:
"""
Naming convention specifier.
ARMI defines the naming convention cXXnYY for groups of simulation data. That is, data is grouped by cycle and time
node information during a simulated run.
"""
return "c{:0>2}n{:0>2}{}".format(cycle, timeNode, statePointName or "")
class Database:
"""
ARMI Database, handling serialization and loading of Reactor states.
This implementation of the database pushes all objects in the Composite Reactor Model into the database. This
process is aided by the ``Layout`` class, which handles the packing and unpacking of the structure of the objects,
their relationships, and their non-parameter attributes.
.. impl:: The database files are H5, and thus language agnostic.
:id: I_ARMI_DB_H51
:implements: R_ARMI_DB_H5
This class implements a light wrapper around H5 files, so they can be used to store ARMI outputs. H5 files are
commonly used in scientific applications in Fortran and C++. As such, they are entirely language agnostic binary
files. The implementation here is that ARMI wraps the ``h5py`` library, and uses its extensive tooling, instead
of re-inventing the wheel.
See Also
--------
`doc/user/outputs/database` for more details.
"""
# Allows matching for, e.g., c01n02EOL
timeNodeGroupPattern = re.compile(r"^c(\d\d)n(\d\d).*$")
def __init__(self, fileName: os.PathLike, permission: str = "r"):
"""
Create a new Database object.
Parameters
----------
fileName:
name of the file
permission:
file permissions, write ("w") or read ("r")
"""
self._fileName = fileName
# No full path yet; we will determine this based on FAST_PATH and permissions
self._fullPath: Optional[str] = None
self._permission = permission
self.h5db: Optional[h5py.File] = None
# Allows context management on open files. If context management is used on a file that is already open, it will
# not reopen and it will also not close after leaving that context. This allows the treatment of all databases
# the same whether they are open or closed.
self._openCount: int = 0
if permission == "w":
self.version = DB_VERSION
else:
# will be set upon read
self._version = None
self._versionMajor = None
self._versionMinor = None
@property
def version(self) -> str:
return self._version
@version.setter
def version(self, value: str):
self._version = value
self._versionMajor, self._versionMinor = (int(v) for v in value.split("."))
if self.versionMajor != 3:
raise ValueError(f"This version of ARMI only supports version 3 of the ARMI DB, found {self.versionMajor}.")
@property
def versionMajor(self):
return self._versionMajor
@property
def versionMinor(self):
return self._versionMinor
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, repr(self.h5db).replace("<", "").replace(">", ""))
def open(self):
if self.h5db is not None:
raise ValueError("This database is already open; make sure to close it before trying to open it again.")
filePath = self._fileName
self._openCount += 1
if self._permission in {"r", "a"}:
self._fullPath = os.path.abspath(filePath)
self.h5db = h5py.File(filePath, self._permission)
self.version = self.h5db.attrs["databaseVersion"]
return
if self._permission == "w":
# assume fast path!
filePath = os.path.join(context.getFastPath(), filePath)
self._fullPath = os.path.abspath(filePath)
else:
runLog.error(f"Unrecognized file permissions `{self._permission}`")
raise ValueError(f"Cannot open database with permission `{self._permission}`")
# open the database, and write a bunch of metadata to it
runLog.info("Opening database file at {}".format(os.path.abspath(filePath)))
self.h5db = h5py.File(filePath, self._permission)
self.h5db.attrs["successfulCompletion"] = False
self.h5db.attrs["version"] = meta.__version__
self.h5db.attrs["databaseVersion"] = self.version
self.writeSystemAttributes(self.h5db)
# store app and plugin data
app = getApp()
self.h5db.attrs["appName"] = app.name
plugins = app.pluginManager.list_name_plugin()
ps = [(os.path.abspath(sys.modules[p[1].__module__].__file__), p[1].__name__) for p in plugins]
ps = np.array([str(p[0]) + ":" + str(p[1]) for p in ps]).astype("S")
self.h5db.attrs["pluginPaths"] = ps
self.h5db.attrs["localCommitHash"] = Database.grabLocalCommitHash()
def isOpen(self):
return self.h5db is not None
@staticmethod
def writeSystemAttributes(h5db):
"""Write system attributes to the database.
.. impl:: Add system attributes to the database.
:id: I_ARMI_DB_QA
:implements: R_ARMI_DB_QA
This method writes some basic system information to the H5 file. This is designed as a starting point, so
users can see information about the system their simulations were run on. As ARMI is used on Windows and
Linux, the tooling here has to be platform independent. The two major sources of information are the ARMI
:py:mod:`context ` module and the Python standard library ``platform``.
"""
h5db.attrs["user"] = context.USER
h5db.attrs["python"] = sys.version
h5db.attrs["armiLocation"] = os.path.dirname(context.ROOT)
h5db.attrs["startTime"] = context.START_TIME
h5db.attrs["machines"] = np.array(context.MPI_NODENAMES).astype("S")
# store platform data
platform_data = uname()
h5db.attrs["platform"] = platform_data.system
h5db.attrs["hostname"] = platform_data.node
h5db.attrs["platformRelease"] = platform_data.release
h5db.attrs["platformVersion"] = platform_data.version
h5db.attrs["platformArch"] = platform_data.processor
@staticmethod
def grabLocalCommitHash():
"""
Try to determine the local Git commit.
We have to be sure to handle the errors where the code is run on a system that doesn't have Git installed. Or if
the code is simply not run from inside a repo.
Returns
-------
str
The commit hash if it exists, otherwise "unknown".
"""
unknown = "unknown"
if not shutil.which("git"):
# no git available. cannot check git info
return unknown
repo_exists = (
subprocess.run(
"git rev-parse --git-dir".split(),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
).returncode
== 0
and subprocess.run(
["git", "describe"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
).returncode
== 0
)
if repo_exists:
try:
commit_hash = subprocess.check_output(["git", "describe"])
return commit_hash.decode("utf-8").strip()
except Exception:
return unknown
else:
return unknown
def close(self, completedSuccessfully=False):
"""Close the DB and perform cleanups and auto-conversions."""
self._openCount = 0
if self.h5db is None:
return
if self._permission == "w":
self.h5db.attrs["successfulCompletion"] = completedSuccessfully
# a bit redundant to call flush, but with unreliable IO issues, why not?
self.h5db.flush()
self.h5db.close()
self.h5db = None
if self._permission == "w":
# move out of the FAST_PATH and into the working directory
newPath = safeMove(self._fullPath, self._fileName)
self._fullPath = os.path.abspath(newPath)
def splitDatabase(self, keepTimeSteps: Sequence[Tuple[int, int]], label: str) -> str:
"""
Discard all data except for specific time steps, retaining old data in a separate file.
This is useful when performing more exotic analyses, where each "time step" may not represent a specific point
in time, but something more nuanced. For example, equilibrium cases store a new "cycle" for each iteration as it
attempts to converge the equilibrium cycle. At the end of the run, the last "cycle" is the converged equilibrium
cycle, whereas the previous cycles constitute the path to convergence, which we typically wish to discard before
further analysis.
Parameters
----------
keepTimeSteps
A collection of the time steps to retain
label
An informative label for the backed-up database. Usually something like "-all-iterations". Will be
interposed between the source name and the ".h5" extension.
Returns
-------
str
The name of the new, backed-up database file.
"""
if self.h5db is None:
raise ValueError("There is no open database to split.")
self.h5db.close()
backupDBPath = os.path.abspath(label.join(os.path.splitext(self._fileName)))
runLog.info(f"Retaining full database history in {backupDBPath}")
if self._fullPath is not None:
safeMove(self._fullPath, backupDBPath)
self.h5db = h5py.File(self._fullPath, self._permission)
dbOut = self.h5db
with h5py.File(backupDBPath, "r") as dbIn:
dbOut.attrs.update(dbIn.attrs)
# Copy everything except time node data
timeSteps = set()
for groupName, _ in dbIn.items():
m = self.timeNodeGroupPattern.match(groupName)
if m:
timeSteps.add((int(m.group(1)), int(m.group(2))))
else:
dbIn.copy(groupName, dbOut)
if not set(keepTimeSteps).issubset(timeSteps):
raise ValueError(f"Not all desired time steps ({keepTimeSteps}) are even present in the database")
minCycle = next(iter(sorted(keepTimeSteps)))[0]
for cycle, node in keepTimeSteps:
offsetCycle = cycle - minCycle
offsetGroupName = getH5GroupName(offsetCycle, node)
dbIn.copy(getH5GroupName(cycle, node), dbOut, name=offsetGroupName)
dbOut[offsetGroupName + "/Reactor/cycle"][()] = offsetCycle
return backupDBPath
@property
def fileName(self):
return self._fileName
@fileName.setter
def fileName(self, fName):
if self.h5db is not None:
raise RuntimeError("Cannot change Database file name while it's opened!")
self._fileName = fName
def loadCS(self, handleInvalids=True):
"""Attempt to load settings from the database file.
Parameters
----------
handleInvalids : bool
Whether to check for invalid settings. Default True.
Notes
-----
There are no guarantees here. If the database was written from a different version of ARMI than you are using,
these results may not be usable. Or if the database was written using a custom Application you do not have
access to, the DB may not be usable.
"""
cs = settings.Settings()
cs.path = self.fileName
cs.loadFromString(self.h5db["inputs/settings"].asstr()[()], handleInvalids=handleInvalids)
return cs
def loadBlueprints(self, cs=None):
"""Attempt to load reactor blueprints from the database file.
Notes
-----
There are no guarantees here. If the database was written from a different version of ARMI than you are using,
these results may not be usable. Or if the database was written using a custom Application you do not have
access to, the DB may not be usable.
"""
# Blueprints use the yamlize package, which uses class attributes to define much of the class's behavior through
# metaclassing. Therefore, we need to be able to import all plugins before importing blueprints.
from armi.reactor.blueprints import Blueprints
bpString = None
try:
bpString = self.h5db["inputs/blueprints"].asstr()[()]
# Need to update the blueprint file to be the database so that its not pointing at a source that doesn't
# exist anymore (the original blueprints yaml).
if cs:
# Update the settings to point at where the file was actually read from
cs[CONF_LOADING_FILE] = os.path.basename(self.fileName)
except KeyError:
# not all reactors need to be created from blueprints, so they may not exist
pass
if not bpString:
# looks like no blueprints contents
return None
stream = io.StringIO(bpString)
stream = Blueprints.migrate(stream)
return Blueprints.load(stream)
def writeInputsToDB(self, cs, csString=None, bpString=None):
"""
Write inputs into the database based the Settings.
This is not DRY on purpose. The goal is that any particular Database implementation should be very stable, so we
dont want it to be easy to change one Database implementation's behavior when trying to change another's.
.. impl:: The run settings are saved the settings file.
:id: I_ARMI_DB_CS
:implements: R_ARMI_DB_CS
A ``Settings`` object is passed into this method, and then the settings are converted into a YAML string
stream. That stream is then written to the H5 file. Optionally, this method can take a pre-build settings
string to be written directly to the file.
.. impl:: The reactor blueprints are saved the settings file.
:id: I_ARMI_DB_BP
:implements: R_ARMI_DB_BP
A ``Blueprints`` string is optionally passed into this method, and then written to the H5 file. If it is not
passed in, this method will attempt to find the blueprints input file in the settings, and read the contents
of that file into a stream to be written to the H5 file.
Notes
-----
This is hard-coded to read the entire file contents into memory and write that directly into the database. We
could have the cs/blueprints/geom write to a string, however the ARMI log file contains a hash of each files'
contents. In the future, we should be able to reproduce a calculation with confidence that the inputs are
identical.
"""
caseTitle = cs.caseTitle if cs is not None else os.path.splitext(self.fileName)[0]
self.h5db.attrs["caseTitle"] = caseTitle
if csString is None:
# Don't read file; use what's in the cs now. Sometimes settings are modified in tests.
stream = io.StringIO()
cs.writeToYamlStream(stream)
stream.seek(0)
csString = stream.read()
if bpString is None:
bpPath = pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE]
if bpPath.suffix.lower() in (".h5", ".hdf5"):
# The blueprints are in a database file, they need to be read
try:
db = h5py.File(bpPath, "r")
bpString = db["inputs/blueprints"].asstr()[()]
except KeyError:
# not all reactors need to be created from blueprints, so they may not exist
bpString = ""
else:
# The blueprints are a standard blueprints yaml that can be read.
if bpPath.exists() and bpPath.is_file():
# Only store blueprints if we actually loaded from them. Ensure that the input as stored in the DB
# is complete
bpString = resolveMarkupInclusions(pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE]).read()
else:
bpString = ""
self.h5db["inputs/settings"] = csString
self.h5db["inputs/blueprints"] = bpString
def readInputsFromDB(self):
return (
self.h5db["inputs/settings"].asstr()[()],
self.h5db["inputs/blueprints"].asstr()[()],
)
def mergeHistory(self, inputDB, startCycle, startNode):
"""
Copy time step data up to, but not including the passed cycle and node.
Notes
-----
This is used for restart runs with the standard operator for example. The current time step (being loaded from)
should not be copied, as that time steps data will be written at the end of the time step.
"""
if self.versionMajor != 3:
raise ValueError(f"Only version 3 of the ARMI DB is supported, found {self.versionMajor}.")
elif inputDB.versionMajor != 3:
raise ValueError(f"Only version 3 of the ARMI DB is supported, found {inputDB.versionMajor}.")
# iterate over the top level H5Groups and copy
for time, h5ts in zip(inputDB.genTimeSteps(), inputDB.genTimeStepGroups()):
cyc, tn = time
if cyc == startCycle and tn == startNode:
# all data up to current state are merged
return
self.h5db.copy(h5ts, h5ts.name)
def __enter__(self):
"""Context management support."""
if self._openCount == 0:
# open also increments _openCount
self.open()
else:
self._openCount += 1
return self
def __exit__(self, type, value, traceback):
"""Typically we don't care why it broke but we want the DB to close."""
self._openCount -= 1
# always close if there is a traceback.
if self._openCount == 0 or traceback:
self.close(all(i is None for i in (type, value, traceback)))
def __del__(self):
if self.h5db is not None:
self.close(False)
def __delitem__(self, tn: Tuple[int, int, Optional[str]]):
cycle, timeNode, statePointName = tn
name = getH5GroupName(cycle, timeNode, statePointName)
if self.h5db is not None:
del self.h5db[name]
def genTimeStepGroups(
self, timeSteps: Sequence[Tuple[int, int]] = None
) -> Generator[h5py._hl.group.Group, None, None]:
"""Returns a generator of HDF5 Groups for all time nodes, or for the passed selection."""
assert self.h5db is not None, "Must open the database before calling genTimeStepGroups"
if timeSteps is None:
for groupName, h5TimeNodeGroup in sorted(self.h5db.items()):
match = self.timeNodeGroupPattern.match(groupName)
if match:
yield h5TimeNodeGroup
else:
for step in timeSteps:
yield self.h5db[getH5GroupName(*step)]
def getLayout(self, cycle, node):
"""Return a Layout object representing the requested cycle and time node."""
version = (self._versionMajor, self._versionMinor)
timeGroupName = getH5GroupName(cycle, node)
return Layout(version, self.h5db[timeGroupName])
def genTimeSteps(self) -> Generator[Tuple[int, int], None, None]:
"""Returns a generator of (cycle, node) tuples that are present in the DB."""
assert self.h5db is not None, "Must open the database before calling genTimeSteps"
for groupName in sorted(self.h5db.keys()):
match = self.timeNodeGroupPattern.match(groupName)
if match:
cycle = int(match.groups()[0])
node = int(match.groups()[1])
yield (cycle, node)
def genAuxiliaryData(self, ts: Tuple[int, int]) -> Generator[str, None, None]:
"""Returns a generator of names of auxiliary data on the requested time point."""
assert self.h5db is not None, "Must open the database before calling genAuxiliaryData"
cycle, node = ts
groupName = getH5GroupName(cycle, node)
timeGroup = self.h5db[groupName]
exclude = set(ArmiObject.TYPES.keys())
exclude.add("layout")
return (groupName + "/" + key for key in timeGroup.keys() if key not in exclude)
@staticmethod
def getAuxiliaryDataPath(ts: Tuple[int, int], name: str) -> str:
return getH5GroupName(*ts) + "/" + name
def keys(self):
return (g.name for g in self.genTimeStepGroups())
def getH5Group(self, r, statePointName=None):
"""
Get the H5Group for the current ARMI timestep.
This method can be used to allow other interfaces to place data into the database at the correct timestep.
"""
groupName = getH5GroupName(r.p.cycle, r.p.timeNode, statePointName)
if groupName in self.h5db:
return self.h5db[groupName]
else:
group = self.h5db.create_group(groupName, track_order=True)
group.attrs["cycle"] = r.p.cycle
group.attrs["timeNode"] = r.p.timeNode
return group
def hasTimeStep(self, cycle, timeNode, statePointName=""):
"""Returns True if (cycle, timeNode, statePointName) is contained in the database."""
return getH5GroupName(cycle, timeNode, statePointName) in self.h5db
def writeToDB(self, reactor, statePointName=None):
assert self.h5db is not None, "Database must be open before writing."
# _createLayout is recursive
h5group = self.getH5Group(reactor, statePointName)
runLog.info("Writing to database for statepoint: {}".format(h5group.name))
layout = Layout((self.versionMajor, self.versionMinor), comp=reactor)
layout.writeToDB(h5group)
groupedComps = layout.groupedComps
for comps in groupedComps.values():
self._writeParams(h5group, comps)
def syncToSharedFolder(self):
"""
Copy DB to run working directory.
Needed when multiple MPI processes need to read the same db, for example when a history is needed from
independent runs (e.g. for fuel performance on a variety of assemblies).
Notes
-----
At some future point, we may implement a client-server like DB system which would render this kind of operation
unnecessary.
"""
runLog.extra("Copying DB to shared working directory.")
self.h5db.flush()
# Close the h5 file so it can be copied
self.h5db.close()
self.h5db = None
safeCopy(self._fullPath, self._fileName)
# Garbage collect so we don't have multiple databases hanging around in memory
gc.collect()
# Reload the file in append mode and continue on our merry way
self.h5db = h5py.File(self._fullPath, "r+")
def load(
self,
cycle,
node,
cs=None,
bp=None,
statePointName=None,
allowMissing=False,
handleInvalids=True,
callReactorConstructionHook=False,
):
"""Load a new reactor from a DB at (cycle, node).
Case settings and blueprints can be provided, or read from the database. Providing can be useful for snapshot
runs or when you want to change settings mid-simulation. Geometry is read from the database.
.. impl:: Users can load a reactor from a DB.
:id: I_ARMI_DB_TIME1
:implements: R_ARMI_DB_TIME
This method creates a ``Reactor`` object by reading the reactor state out of an ARMI database file. This is
done by passing in mandatory arguments that specify the exact place in time you want to load the reactor
from. (That is, the cycle and node numbers.) Users can either pass the settings and blueprints directly into
this method, or it will attempt to read them from the database file. The primary work done here is to read
the hierarchy of reactor objects from the data file, then reconstruct them in the correct order.
Parameters
----------
cycle : int
Cycle number
node : int
Time node. If value is negative, will be indexed from EOC backwards like a list.
cs : armi.settings.Settings, optional
If not provided one is read from the database
bp : armi.reactor.Blueprints, optional
If not provided one is read from the database
statePointName : str, optional
Statepoint name (e.g., "special" for "c00n00-special/")
allowMissing : bool, optional
Whether to emit a warning, rather than crash if reading a database
with undefined parameters. Default False.
handleInvalids : bool
Whether to check for invalid settings. Default True.
callReactorConstructionHook : bool
Flag for whether the beforeReactorConstruction plugin hook should be executed. Default is False.
Returns
-------
root : Reactor
The top-level object stored in the database; a Reactor.
"""
runLog.info(f"Loading reactor state for time node ({cycle}, {node})")
if cs is None:
cs = self.loadCS(handleInvalids=handleInvalids)
if bp is None:
bp = self.loadBlueprints(cs)
if callReactorConstructionHook:
getPluginManagerOrFail().hook.beforeReactorConstruction(cs=cs)
if node < 0:
numNodes = getNodesPerCycle(cs)[cycle]
if (node + numNodes) < 0:
raise ValueError(f"Node {node} specified does not exist for cycle {cycle}")
node = numNodes + node
h5group = self.h5db[getH5GroupName(cycle, node, statePointName)]
layout = Layout((self.versionMajor, self.versionMinor), h5group=h5group)
comps, groupedComps = layout._initComps(cs.caseTitle, bp)
# populate data onto initialized components
for compType, compTypeList in groupedComps.items():
self._readParams(h5group, compType, compTypeList, allowMissing=allowMissing)
# assign params from blueprints
if bp is not None:
self._assignBlueprintsParams(bp, groupedComps)
# stitch together
self._compose(iter(comps), cs)
# also, make sure to update the global serial number so we don't reuse a number
parameterCollections.GLOBAL_SERIAL_NUM = max(parameterCollections.GLOBAL_SERIAL_NUM, layout.serialNum.max())
root = comps[0][0]
# return a Reactor object
if cs[CONF_SORT_REACTOR]:
root.sort()
else:
runLog.warning(
"DeprecationWarning: This Reactor is not being sorted on DB load. Due to the setting "
f"{CONF_SORT_REACTOR}, this Reactor is unsorted. But this feature is temporary and will be removed by "
"2024."
)
if cs[CONF_GROW_TO_FULL_CORE_AFTER_LOAD] and not root.core.isFullCore:
root.core.growToFullCore(cs)
return root
def loadReadOnly(self, cycle, node, statePointName=None):
"""Load a new reactor, in read-only mode from a DB at (cycle, node).
Parameters
----------
cycle : int
Cycle number
node : int
Time node. If value is negative, will be indexed from EOC backwards like a list.
statePointName : str, optional
Statepoint name (e.g., "special" for "c00n00-special/")
Returns
-------
Reactor
The top-level object stored in the database; a Reactor.
"""
r = self.load(cycle, node, statePointName=statePointName, allowMissing=True)
self._setParamsBeforeFreezing(r)
makeParametersReadOnly(r)
return r
@staticmethod
def _setParamsBeforeFreezing(r: Reactor):
"""Set some special case parameters before they are made read-only."""
for child in r.iterChildren(deep=True, predicate=lambda c: isinstance(c, Component)):
# calling Component.getVolume() sets the volume parameter
child.getVolume()
@staticmethod
def _assignBlueprintsParams(blueprints, groupedComps):
for compType, designs in (
(Block, blueprints.blockDesigns),
(Assembly, blueprints.assemDesigns),
):
paramsToSet = {pDef.name for pDef in compType.pDefs.inCategory(parameters.Category.assignInBlueprints)}
for comp in groupedComps[compType]:
design = designs[comp.p.type]
for pName in paramsToSet:
val = getattr(design, pName)
if val is not None:
comp.p[pName] = val
def _compose(self, comps, cs, parent=None):
"""Given a flat collection of all of the ArmiObjects in the model, reconstitute the hierarchy."""
comp, _, numChildren, location, locationType = next(comps)
# attach the parent early, if provided; some cases need the parent attached for the rest of _compose to work
# properly.
comp.parent = parent
# The Reactor adds a Core child by default, this is not ideal
for spontaneousChild in list(comp):
comp.remove(spontaneousChild)
if isinstance(comp, Core):
pass
elif isinstance(comp, Assembly):
# Assemblies force their name to be something based on assemNum. When the assembly is created it gets a new
# assemNum, and throws out the correct name read from the DB.
comp.name = comp.makeNameFromAssemNum(comp.p.assemNum)
comp.lastLocationLabel = Assembly.DATABASE
# set the spatialLocators on each component
if location is not None:
if parent is not None and parent.spatialGrid is not None:
if locationType != LOC_COORD:
# We can directly index into the spatial grid for IndexLocation and MultiIndexLocators to get
# equivalent spatial locators
comp.spatialLocator = parent.spatialGrid[location]
else:
comp.spatialLocator = grids.CoordinateLocation(
location[0], location[1], location[2], parent.spatialGrid
)
else:
comp.spatialLocator = grids.CoordinateLocation(location[0], location[1], location[2], None)
# Need to keep a collection of Component instances for linked dimension resolution, before they can be add()ed
# to their parents. Not just filtering out of `children`, since resolveLinkedDims() needs a dict
childComponents = collections.OrderedDict()
children = []
for _ in range(numChildren):
child = self._compose(comps, cs, parent=comp)
children.append(child)
if isinstance(child, Component):
childComponents[child.name] = child
for _childName, child in childComponents.items():
child.resolveLinkedDims(childComponents)
for child in children:
comp.add(child)
if isinstance(comp, Core):
comp.processLoading(cs, dbLoad=True)
elif isinstance(comp, Assembly):
comp.calculateZCoords()
elif isinstance(comp, Component):
comp.finalizeLoadingFromDB()
return comp
@staticmethod
def _getArrayShape(arr: Union[np.ndarray, List, Tuple]):
"""Get the shape of a np.ndarray, list, or tuple."""
if isinstance(arr, np.ndarray):
return arr.shape
elif isinstance(arr, (list, tuple)):
return (len(arr),)
else:
# not a list, tuple, or array (likely int, float, or None)
return 1
def _writeParams(self, h5group, comps) -> tuple:
c = comps[0]
groupName = c.__class__.__name__
if groupName not in h5group:
# Only create the group if it doesn't already exist. This happens when re-writing params in the same time
# node (e.g. something changed between EveryNode and EOC).
g = h5group.create_group(groupName, track_order=True)
else:
g = h5group[groupName]
for paramDef in c.p.paramDefs.toWriteToDB():
attrs = {}
if hasattr(c, "DIMENSION_NAMES") and paramDef.name in c.DIMENSION_NAMES:
linkedDims = []
data = []
for _, c in enumerate(comps):
val = c.p[paramDef.name]
if isinstance(val, tuple):
linkedDims.append("{}.{}".format(val[0].name, val[1]))
data.append(val[0].getDimension(val[1]))
else:
linkedDims.append("")
data.append(val)
data = np.array(data)
if any(linkedDims):
attrs["linkedDims"] = np.array(linkedDims).astype("S")
else:
# NOTE: after loading, the previously unset values will be defaulted
temp = [c.p.get(paramDef.name, paramDef.default) for c in comps]
if paramDef.serializer is not None:
data, sAttrs = paramDef.serializer.pack(temp)
assert data.dtype.kind != "O", "{} failed to convert {} to a numpy-supported type.".format(
paramDef.serializer.__name__, paramDef.name
)
attrs.update(sAttrs)
attrs[_SERIALIZER_NAME] = paramDef.serializer.__name__
attrs[_SERIALIZER_VERSION] = paramDef.serializer.version
else:
# check if temp is a jagged array
if any(isinstance(x, (np.ndarray, list)) for x in temp):
jagged = len(set([self._getArrayShape(x) for x in temp])) != 1
else:
jagged = False
data = JaggedArray(temp, paramDef.name) if jagged else np.array(temp)
del temp
# - Check to see if the array is jagged. If so, flatten, store the data offsets and array shapes, and None
# locations as attrs.
# - If not jagged, all top-level ndarrays are the same shape, so it is easier to replace Nones with ndarrays
# filled with special values.
if isinstance(data, JaggedArray):
data, specialAttrs = packSpecialData(data, paramDef.name)
attrs.update(specialAttrs)
else: # np.ndarray
# Convert Unicode to byte-string
if data.dtype.kind == "U":
data = data.astype("S")
if data.dtype.kind == "O":
# Something was added to the data array that caused np to want to treat it as a general-purpose
# Object array. This usually happens because:
# - the data contain NoDefaults
# - the data contain one or more Nones,
# - the data contain special types like tuples, dicts, etc
# - there is some sort of honest-to-goodness weird object
# We want to support the first two cases with minimal intrusion, since these should be pretty easy
# to faithfully represent in the db. The last case isn't really worth supporting.
if parameters.NoDefault in data:
data = None
else:
data, specialAttrs = packSpecialData(data, paramDef.name)
attrs.update(specialAttrs)
if data is None:
continue
try:
if paramDef.name in g:
raise ValueError(f"`{paramDef.name}` was already in `{g}`. This time node should have been empty")
dataset = g.create_dataset(paramDef.name, data=data, compression="gzip", track_order=True)
if any(attrs):
Database._writeAttrs(dataset, h5group, attrs)
except Exception:
runLog.error(f"Failed to write {paramDef.name} to database. Data: {data}")
raise
if isinstance(c, Block):
self._addHomogenizedNumberDensityParams(comps, g)
@staticmethod
def _addHomogenizedNumberDensityParams(blocks, h5group):
"""
Create on-the-fly block homog. number density params for XTVIEW viewing.
See Also
--------
collectBlockNumberDensities
"""
nDens = collectBlockNumberDensities(blocks)
for nucName, numDens in nDens.items():
h5group.create_dataset(nucName, data=numDens, compression="gzip", track_order=True)
@staticmethod
def _readParams(h5group, compTypeName, comps, allowMissing=False):
g = h5group[compTypeName]
renames = getApp().getParamRenames()
pDefs = comps[0].pDefs
# this can also be made faster by specializing the method by type
for paramName, dataSet in g.items():
# Honor historical databases where the parameters may have changed names since.
while paramName in renames:
paramName = renames[paramName]
try:
pDef = pDefs[paramName]
except KeyError:
if re.match(r"^n[A-Z][a-z]?\d*", paramName):
# This is a temporary viz param (number density) made by _addHomogenizedNumberDensityParams ignore
# it safely
continue
else:
# If a parameter exists in the database but not in the application reading it, we can technically
# keep going. Since this may lead to potential correctness issues, raise a warning
if allowMissing:
runLog.warning(
"Found `{}` parameter `{}` in the database, which is not defined. Ignoring it.".format(
compTypeName, paramName
)
)
continue
else:
raise
data = dataSet[:]
attrs = Database._resolveAttrs(dataSet.attrs, h5group)
if pDef.serializer is not None:
assert _SERIALIZER_NAME in dataSet.attrs
assert dataSet.attrs[_SERIALIZER_NAME] == pDef.serializer.__name__
assert _SERIALIZER_VERSION in dataSet.attrs
data = np.array(pDef.serializer.unpack(data, dataSet.attrs[_SERIALIZER_VERSION], attrs))
# nuclides are a special case where we want to keep in np.bytes_ format
if data.dtype.type is np.bytes_ and "nuclides" not in paramName.lower():
data = np.char.decode(data)
if attrs.get("specialFormatting", False):
data = unpackSpecialData(data, attrs, paramName)
linkedDims = []
if "linkedDims" in attrs:
linkedDims = np.char.decode(attrs["linkedDims"])
unpackedData = data.tolist()
if len(comps) != len(unpackedData):
msg = (
"While unpacking special data for {}, encountered composites and parameter "
"data with unmatched sizes.\nLength of composites list = {}\nLength of data "
"list = {}\nThis could indicate an error in data unpacking, which could "
"result in faulty data on the resulting reactor model.".format(
paramName, len(comps), len(unpackedData)
)
)
runLog.error(msg)
raise ValueError(msg)
if paramName == "numberDensities" and attrs.get("dict", False):
Database._applyComponentNumberDensitiesMigration(comps, unpackedData)
else:
# iterating of np is not fast...
for c, val, linkedDim in itertools.zip_longest(comps, unpackedData, linkedDims, fillvalue=""):
try:
if linkedDim != "":
c.p[paramName] = linkedDim
else:
c.p[paramName] = val
except AssertionError as ae:
# happens when a param was deprecated but being loaded from old DB
runLog.warning(
f"{str(ae)}\nSkipping load of invalid param `{paramName}` (possibly loading from old DB)\n"
)
def getHistoryByLocation(
self,
comp: ArmiObject,
params: Optional[List[str]] = None,
timeSteps: Optional[Sequence[Tuple[int, int]]] = None,
) -> History:
"""Get the parameter histories at a specific location."""
return self.getHistoriesByLocation([comp], params=params, timeSteps=timeSteps)[comp]
def getHistoriesByLocation(
self,
comps: Sequence[ArmiObject],
params: Optional[List[str]] = None,
timeSteps: Optional[Sequence[Tuple[int, int]]] = None,
) -> Histories:
"""
Get the parameter histories at specific locations.
This has a number of limitations, which should in practice not be too limiting:
- The passed objects must have IndexLocations. This type of operation doesn't make much sense otherwise.
- The passed objects must exist in a hierarchy that leads to a Core object, which serves as an anchor that can
fully define all index locations. This could possibly be made more general by extending grids, but that gets
a little more complicated.
- All requested objects must exist under the **same** anchor object, and at the same depth below it.
- All requested objects must have the same type.
Parameters
----------
comps : list of ArmiObject
The components/composites that currently occupy the location that you want histories at. ArmiObjects are
passed, rather than locations, because this makes it easier to figure out things related to layout.
params : List of str, optional
The parameter names for the parameters that we want the history of. If None, all parameter history is given.
timeSteps : List of (cycle, node) tuples, optional
The time nodes that you want history for. If None, all available time nodes will be returned.
"""
if self.versionMajor != 3:
raise ValueError(f"This version of ARMI only supports version 3 of the ARMI DB, found {self.versionMajor}.")
elif self.versionMinor < 4:
raise ValueError(
"Location-based histories are only supported for db version 3.4 and greater. This database is version "
f"{self.versionMajor}, {self.versionMinor}."
)
locations = [c.spatialLocator.getCompleteIndices() for c in comps]
histData: Histories = {c: collections.defaultdict(collections.OrderedDict) for c in comps}
# Check our assumptions about the passed locations: All locations must have the same parent and bear the same
# relationship to the anchor object.
anchors = {obj.getAncestorAndDistance(lambda a: isinstance(a, Core)) for obj in comps}
if len(anchors) != 1:
raise ValueError(
"The passed objects do not have the same anchor or distance to that anchor; encountered the following: "
f"{anchors}"
)
anchorInfo = anchors.pop()
if anchorInfo is not None:
anchor, anchorDistance = anchorInfo
else:
raise ValueError("Could not determine an anchor object for the passed components")
anchorSerialNum = anchor.p.serialNum
# All objects of the same type
objectTypes = {type(obj) for obj in comps}
if len(objectTypes) != 1:
raise TypeError(f"The passed objects must be the same type; got objects of types `{objectTypes}`")
compType = objectTypes.pop()
objClassName = compType.__name__
locToComp = {c.spatialLocator.getCompleteIndices(): c for c in comps}
for h5TimeNodeGroup in self.genTimeStepGroups(timeSteps):
if "layout" not in h5TimeNodeGroup:
# Layout hasn't been written for this time step, so we can't get anything useful here. Perhaps the
# current value is of use, in which case the DatabaseInterface should be used.
continue
cycle = h5TimeNodeGroup.attrs["cycle"]
timeNode = h5TimeNodeGroup.attrs["timeNode"]
layout = Layout((self.versionMajor, self.versionMinor), h5group=h5TimeNodeGroup)
ancestors = layout.computeAncestors(layout.serialNum, layout.numChildren, depth=anchorDistance)
lLocation = layout.location
# filter for objects that live under the desired ancestor and at a desired location
objectIndicesInLayout = np.array(
[
i
for i, (ancestor, loc) in enumerate(zip(ancestors, lLocation))
if ancestor == anchorSerialNum and loc in locations
]
)
# This could also be way more efficient if lLocation were a numpy array
objectLocationsInLayout = [lLocation[i] for i in objectIndicesInLayout]
objectIndicesInData = np.array(layout.indexInData)[objectIndicesInLayout].tolist()
try:
h5GroupForType = h5TimeNodeGroup[objClassName]
except KeyError as ee:
runLog.error(f"{objClassName} not found in {h5TimeNodeGroup} of {self}")
raise ee
for paramName in params or h5GroupForType.keys():
if paramName == "location":
# location is special, since it is stored in layout/
data = np.array(layout.location)[objectIndicesInLayout]
elif paramName in h5GroupForType:
dataSet = h5GroupForType[paramName]
try:
data = dataSet[objectIndicesInData]
except:
runLog.error(f"Failed to load index {objectIndicesInData} from {dataSet}@{(cycle, timeNode)}")
raise
if data.dtype.type is np.bytes_:
data = np.char.decode(data)
if dataSet.attrs.get("specialFormatting", False):
if dataSet.attrs.get("nones", False):
data = replaceNonsenseWithNones(data, paramName)
else:
raise ValueError(
"History tracking for non-None, special-formatted parameters is not supported: "
"{}, {}".format(paramName, {k: v for k, v in dataSet.attrs.items()})
)
else:
# Nothing in the database for this param, so use the default value
data = np.repeat(
parameters.byNameAndType(paramName, compType).default,
len(comps),
)
# store data to the appropriate comps. This is where taking components as the argument (rather than
# locations) is a little bit peculiar.
#
# At this point, `data` are arranged by the order of elements in `objectIndicesInData`, which
# corresponds to the order of `objectIndicesInLayout`
for loc, val in zip(objectLocationsInLayout, data.tolist()):
comp = locToComp[loc]
histData[comp][paramName][cycle, timeNode] = val
return histData
def getHistory(
self,
comp: ArmiObject,
params: Optional[Sequence[str]] = None,
timeSteps: Optional[Sequence[Tuple[int, int]]] = None,
) -> History:
"""
Get parameter history for a single ARMI Object.
Parameters
----------
comps
An individual ArmiObject
params
parameters to gather
Returns
-------
dict
Dictionary of str/list pairs.
"""
return self.getHistories([comp], params, timeSteps)[comp]
def getHistories(
self,
comps: Sequence[ArmiObject],
params: Optional[Sequence[str]] = None,
timeSteps: Optional[Sequence[Tuple[int, int]]] = None,
) -> Histories:
"""
Get the parameter histories for a sequence of ARMI Objects.
This implementation is unaware of the state of the reactor outside of the database itself, and is therefore not
usually what client code should be calling directly during normal ARMI operation. It only knows about historical
data that have actually been written to the database. Usually one wants to be able to get historical, plus
current data, for which the similar method on the DatabaseInterface may be more useful.
Parameters
----------
comps
Something that is iterable multiple times
params
parameters to gather.
timeSteps
Selection of time nodes to get data for. If omitted, return full history
Returns
-------
dict
Dictionary ArmiObject (input): dict of str/list pairs containing ((cycle, node), value).
"""
histData: Histories = {c: collections.defaultdict(collections.OrderedDict) for c in comps}
types = {c.__class__ for c in comps}
compsByTypeThenSerialNum: Dict[Type[ArmiObject], Dict[int, ArmiObject]] = {t: dict() for t in types}
for c in comps:
compsByTypeThenSerialNum[c.__class__][c.p.serialNum] = c
for h5TimeNodeGroup in self.genTimeStepGroups(timeSteps):
if "layout" not in h5TimeNodeGroup:
# Layout hasn't been written for this time step, so whatever is in there didn't come from the
# DatabaseInterface. Probably because it's the current time step and something has created the group to
# store aux data
continue
# might save as int or np.int64, so forcing int keeps things predictable
cycle = int(h5TimeNodeGroup.attrs["cycle"])
timeNode = int(h5TimeNodeGroup.attrs["timeNode"])
layout = Layout((self.versionMajor, self.versionMinor), h5group=h5TimeNodeGroup)
for compType, compsBySerialNum in compsByTypeThenSerialNum.items():
compTypeName = compType.__name__
try:
h5GroupForType = h5TimeNodeGroup[compTypeName]
except KeyError as ee:
runLog.error("{} not found in {} of {}".format(compTypeName, h5TimeNodeGroup, self))
raise ee
layoutIndicesForType = np.where(layout.type == compTypeName)[0]
serialNumsForType = layout.serialNum[layoutIndicesForType].tolist()
layoutIndexInData = layout.indexInData[layoutIndicesForType].tolist()
indexInData = []
reorderedComps = []
for ii, sn in zip(layoutIndexInData, serialNumsForType):
d = compsBySerialNum.get(sn, None)
if d is not None:
indexInData.append(ii)
reorderedComps.append(d)
if not indexInData:
continue
# note this is very similar to _readParams but there are some important differences.
# 1) we are not assigning to p[paramName]
# 2) not using linkedDims at all
# 3) not performing parameter renaming. This may become necessary
for paramName in params or h5GroupForType.keys():
if paramName == "location":
locs = []
for id in indexInData:
locs.append((layout.location[layoutIndicesForType[id]]))
data = np.array(locs)
elif paramName in h5GroupForType:
dataSet = h5GroupForType[paramName]
try:
data = dataSet[indexInData]
except:
runLog.error(
"Failed to load index {} from {}@{}".format(indexInData, dataSet, (cycle, timeNode))
)
raise
if data.dtype.type is np.bytes_:
data = np.char.decode(data)
if dataSet.attrs.get("specialFormatting", False):
if dataSet.attrs.get("nones", False):
data = replaceNonsenseWithNones(data, paramName)
else:
raise ValueError(
"History tracking for non-none special formatting not supported: {}, {}".format(
paramName,
{k: v for k, v in dataSet.attrs.items()},
)
)
else:
# Nothing in the database, so use the default value
data = np.repeat(
parameters.byNameAndType(paramName, compType).default,
len(reorderedComps),
)
# iterating of np is not fast..
for c, val in zip(reorderedComps, data.tolist()):
if paramName == "location":
val = tuple(val)
elif isinstance(val, list):
val = np.array(val)
histData[c][paramName][cycle, timeNode] = val
r = comps[0].getAncestor(lambda c: isinstance(c, Reactor))
cycleNode = r.p.cycle, r.p.timeNode
for c, paramHistories in histData.items():
for paramName, hist in paramHistories.items():
if cycleNode not in hist:
try:
hist[cycleNode] = c.p[paramName]
except Exception:
if paramName == "location":
hist[cycleNode] = tuple(c.spatialLocator.indices)
return histData
@staticmethod
def _writeAttrs(obj, group, attrs):
"""
Handle safely writing attributes to a dataset, handling large data if necessary.
This will attempt to store attributes directly onto an HDF5 object if possible, falling back to proper datasets
and reference attributes if necessary. This is needed because HDF5 tries to fit attributes into the object
header, which has limited space. If an attribute is too large, h5py raises a RuntimeError. In such cases, this
will store the attribute data in a proper dataset and place a reference to that dataset in the attribute
instead.
In practice, this takes ``linkedDims`` attrs from a particular component type (like ``c00n00/Circle/id``) and
stores them in new datasets (like ``c00n00/attrs/1_linkedDims``, ``c00n00/attrs/2_linkedDims``) and then sets
the object's attrs to links to those datasets.
"""
for key, value in attrs.items():
try:
obj.attrs[key] = value
except RuntimeError as err:
if "object header message is too large" not in err.args[0]:
raise
runLog.info(f"Storing attribute `{key}` for `{obj}` into it's own dataset within `{group}/attrs`")
if "attrs" not in group:
attrGroup = group.create_group("attrs")
else:
attrGroup = group["attrs"]
dataName = str(len(attrGroup)) + "_" + key
attrGroup[dataName] = value
# using a soft link here allows us to cheaply copy time nodes without needing to crawl through and
# update object references.
linkName = attrGroup[dataName].name
obj.attrs[key] = "@{}".format(linkName)
@staticmethod
def _resolveAttrs(attrs, group):
"""
Reverse the action of _writeAttrs.
This reads actual attrs and looks for the real data in the datasets that the attrs were pointing to.
"""
attr_link = re.compile("^@(.*)$")
resolved = {}
for key, val in attrs.items():
try:
if isinstance(val, h5py.h5r.Reference):
# Old style object reference. If this cannot be dereferenced, it is likely because mergeHistory was
# used to get the current database, which does not preserve references.
resolved[key] = group[val]
elif isinstance(val, str):
m = attr_link.match(val)
if m:
# dereference the path to get the data out of the dataset.
resolved[key] = group[m.group(1)][()]
else:
resolved[key] = val
else:
resolved[key] = val
except ValueError:
runLog.error(f"HDF error loading {key} : {val}\nGroup: {group}")
raise
return resolved
@staticmethod
def _applyComponentNumberDensitiesMigration(comps, unpackedData):
"""
Special migration from <= v0.5.1 component numberDensities parameter data type.
old format: dict[str: float]
new format: two numpy arrays
- nuclides = np.array(dtype="S6")
- numberDensities = np.array(dtype=np.float64)
"""
for c, ndensDict in zip(comps, unpackedData):
nuclides = np.array(list(ndensDict.keys()), dtype="S6")
numberDensities = np.array(list(ndensDict.values()), dtype=np.float64)
c.p.nuclides = nuclides
c.p.numberDensities = numberDensities
@staticmethod
def getCycleNodeAtTime(dbPath, startTime, endTime, errorIfNotExactlyOne=True):
"""Given the path to an ARMI database file and a start and end time (in years), return the full set of all time
nodes that correspond to that time period in the database.
Parameters
----------
dbPath : str
File path to an ARMI database.
startTime : int
In years, start of the desired interval.
endTime : int
In years, end of the desired interval.
errorIfNotExactlyOne : boolean
Raise an error if more than one cycle/node combination is returned. Default is True.
Returns
-------
list of strings
A list of strings to the desired time interval, e.g.: ["c01n08", "c14n18EOL"]
"""
# basic sanity checks
assert startTime >= 0.0, f"The start time cannot be negative: {startTime}."
assert endTime >= startTime, f"The end time ({endTime}) is not greater than the start time ({startTime})."
# open the H5 file directly
with h5py.File(dbPath, "r") as h5:
# read time steps in H5 file
thisTime = 0.0
cycleNodes = []
for h5Key in h5.keys():
if h5Key == "inputs":
continue
thisTime = h5[h5Key]["Reactor"]["time"][0]
if thisTime >= endTime:
cycleNodes.append(h5Key)
break
elif thisTime >= startTime:
cycleNodes.append(h5Key)
# more validation
if not cycleNodes:
raise ValueError(f"Provided start time ({startTime}) was greater than the modeled period: {thisTime}.")
elif errorIfNotExactlyOne and len(cycleNodes) != 1:
raise ValueError(f"Did not find exactly one cycle/node pair: {cycleNodes}")
return cycleNodes
def packSpecialData(
arrayData: [np.ndarray, JaggedArray], paramName: str
) -> Tuple[Optional[np.ndarray], Dict[str, Any]]:
"""
Reduce data that wouldn't otherwise play nicely with HDF5/numpy arrays to a format that will.
This is the main entry point for conforming "strange" data into something that will both fit
into a numpy array/HDF5 dataset, and be recoverable to its original-ish state when reading it
back in. This is accomplished by detecting a handful of known offenders and using various HDF5
attributes to store necessary auxiliary data. It is important to keep in mind that the data that
is passed in has already been converted to a numpy array, so the top dimension is always
representing the collection of composites that are storing the parameters. For instance, if we
are dealing with a Block parameter, the first index in the numpy array of data is the block
index; so if each block has a parameter that is a dictionary, ``data`` would be a ndarray,
where each element is a dictionary. This routine supports a number of different things:
* Dict[str, float]: These are stored by finding the set of all keys for all instances, and
storing those keys as a list in an attribute. The data themselves are stored as arrays indexed
by object, then key index. Dictionaries lacking data for a key store a nan in it's place. This
will work well in instances where most objects have data for most keys.
* Jagged arrays: These are stored by concatenating all of the data into a single, one-
dimensional array, and storing attributes to describe the shapes of each object's data, and an
offset into the beginning of each object's data.
* Arrays with ``None`` in them: These are stored by replacing each instance of ``None`` with a
magical value that shouldn't be encountered in realistic scenarios.
Parameters
----------
arrayData
An ndarray or JaggedArray object storing the data that we want to stuff into the database.
If the data is jagged, a special JaggedArray instance is passed in, which contains a 1D
array with offsets and shapes.
paramName
The parameter name that we are trying to store. This is mostly used for diagnostics.
See Also
--------
unpackSpecialData
"""
if isinstance(arrayData, JaggedArray):
data = arrayData.flattenedArray
else:
# Check to make sure that we even need to do this. If the numpy data type is not "O",
# chances are we have nice, clean data.
if arrayData.dtype != "O":
return arrayData, {}
else:
data = arrayData
attrs: Dict[str, Any] = {"specialFormatting": True}
# make a copy of the data, so that the original is unchanged
data = copy.copy(data)
# Find locations of Nones.
nones = np.where([d is None for d in data])[0]
if len(nones) == data.shape[0]:
# Everything is None, so why bother?
return None, attrs
if len(nones) > 0:
attrs["nones"] = True
# Pack different types of data
if any(isinstance(d, dict) for d in data):
# We're assuming that a dict is {str: float}.
attrs["dict"] = True
keys = sorted({k for d in data for k in d})
data = np.array([[d.get(k, np.nan) for k in keys] for d in data])
if data.dtype == "O":
raise TypeError(f"Unable to coerce dictionary data into usable numpy array for {paramName}")
# We store the union of all of the keys for all of the objects as a special "keys"
# attribute, and store a value for all of those keys for all objects, whether or not there
# is actually data associated with that key
attrs["keys"] = np.array(keys).astype("S")
return data, attrs
elif isinstance(arrayData, JaggedArray):
attrs["jagged"] = True
attrs["offsets"] = arrayData.offsets
attrs["shapes"] = arrayData.shapes
attrs["noneLocations"] = arrayData.nones
return data, attrs
# conform non-numpy arrays to numpy
for i, val in enumerate(data):
if isinstance(val, (list, tuple)):
data[i] = np.array(val)
if not any(isinstance(d, np.ndarray) for d in data):
# looks like 1-D plain-old-data
data = replaceNonesWithNonsense(data, paramName, nones)
return data, attrs
elif any(isinstance(d, (tuple, list, np.ndarray)) for d in data):
data = replaceNonesWithNonsense(data, paramName, nones)
return data, attrs
if len(nones) == 0:
raise TypeError(f"Cannot write {paramName} to the database, it did not resolve to a numpy/HDF5 type.")
runLog.error(f"Data unable to find special none value: {data}")
raise TypeError(f"Failed to process special data for {paramName}")
def unpackSpecialData(data: np.ndarray, attrs, paramName: str) -> np.ndarray:
"""
Extract data from a specially-formatted HDF5 dataset into a numpy array.
This should invert the operations performed by :py:func:`packSpecialData`.
Parameters
----------
data
Specially-formatted data array straight from the database.
attrs
The attributes associated with the dataset that contained the data.
paramName
The name of the parameter that is being unpacked. Only used for diagnostics.
Returns
-------
np.ndarray
An ndarray containing the closest possible representation of the data that was originally written to the
database.
See Also
--------
packSpecialData
"""
if not attrs.get("specialFormatting", False):
# The data were not subjected to any special formatting; short circuit.
assert data.dtype != "O"
return data
unpackedData: List[Any]
if attrs.get("nones", False) and not attrs.get("jagged", False):
data = replaceNonsenseWithNones(data, paramName)
return data
if attrs.get("jagged", False):
offsets = attrs["offsets"]
shapes = attrs["shapes"]
nones = attrs["noneLocations"]
data = JaggedArray.fromH5(data, offsets, shapes, nones, data.dtype, paramName)
return data
if attrs.get("dict", False):
keys = np.char.decode(attrs["keys"])
unpackedData = []
assert data.ndim == 2
for d in data:
unpackedData.append({key: value for key, value in zip(keys, d) if not np.isnan(value)})
return np.array(unpackedData)
raise ValueError(
"Do not recognize the type of special formatting that was applied to {}. Attrs: {}".format(
paramName, {k: v for k, v in attrs.items()}
)
)
def collectBlockNumberDensities(blocks) -> Dict[str, np.ndarray]:
"""
Collect block-by-block homogenized number densities for each nuclide.
Homogenize the component-level to the block level. These are written to the database and useful for visualization.
"""
# find the NuclidesBases object on the Reactor
nuclideBases = None
for b in blocks:
if b.nuclideBases is not None:
nuclideBases = b.nuclideBases
break
if not nuclideBases:
return {}
nucNames = sorted(list(set(nucName for b in blocks for nucName in b.getNuclides())))
nucBases = [nuclideBases.byName[nn] for nn in nucNames]
# It's faster to loop over blocks first and get all number densities from each than it is to get one nuclide at a
# time from each block because of area fraction calculations. So we use some RAM here instead.
nucDensityMatrix = []
for block in blocks:
nucDensityMatrix.append(block.getNuclideNumberDensities(nucNames))
nucDensityMatrix = np.array(nucDensityMatrix)
dataDict = dict()
for ni, nb in enumerate(nucBases):
# the nth column is a vector of nuclide densities for this nuclide across all blocks
dataDict[nb.getDatabaseName()] = nucDensityMatrix[:, ni]
return dataDict
================================================
FILE: armi/bookkeeping/db/databaseInterface.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The database interface provides a way to save the reactor state to a file, throughout
a simulation.
"""
import copy
import os
import pathlib
import time
from typing import (
MutableSequence,
Optional,
Sequence,
Tuple,
)
from armi import context, interfaces, runLog
from armi.bookkeeping.db.database import Database, getH5GroupName
from armi.bookkeeping.db.typedefs import Histories, History
from armi.reactor.composites import ArmiObject
from armi.reactor.parameters import parameterDefinitions
from armi.settings.fwSettings.databaseSettings import (
CONF_FORCE_DB_PARAMS,
CONF_SYNC_AFTER_WRITE,
)
from armi.utils import getPreviousTimeNode, getStepLengths
ORDER = interfaces.STACK_ORDER.BOOKKEEPING
def describeInterfaces(cs):
"""Function for exposing interface(s) to other code."""
return (DatabaseInterface, {"enabled": cs["db"]})
class DatabaseInterface(interfaces.Interface):
"""
Handles interactions between the ARMI data model and the persistent data storage
system.
This reads/writes the ARMI state to/from the database and helps derive state
information that can be derived.
"""
name = "database"
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
self._db = None
self._dbPath: Optional[pathlib.Path] = None
if cs[CONF_FORCE_DB_PARAMS]:
toSet = {paramName: set() for paramName in cs[CONF_FORCE_DB_PARAMS]}
for (name, _), pDef in parameterDefinitions.ALL_DEFINITIONS.items():
if name in toSet.keys():
toSet[name].add(pDef)
for name, pDefs in toSet.items():
runLog.info("Forcing parameter {} to be written to the database, per user input".format(name))
for pDef in pDefs:
pDef.saveToDB = True
def __repr__(self):
return "<{} '{}' {} >".format(self.__class__.__name__, self.name, repr(self._db))
@property
def database(self):
"""Presents the internal database object, if it exists."""
if self._db is not None:
return self._db
else:
raise RuntimeError(
"The Database interface has not yet created a database "
"object. InteractBOL or loadState must be called first."
)
def interactBOL(self):
"""Initialize the database if the main interface was not available. (Beginning of Life)."""
if not self._db:
self.initDB()
def initDB(self, fName: Optional[os.PathLike] = None):
"""
Open the underlying database to be written to, and write input files to DB.
Notes
-----
Main Interface calls this so that the database is available as early as possible in the run.
The database interface interacts near the end of the interface stack (so that all the
parameters have been updated) while the Main Interface interacts first.
"""
if fName is None:
self._dbPath = pathlib.Path(self.cs.caseTitle + ".h5")
else:
self._dbPath = pathlib.Path(fName)
if self.cs["reloadDBName"].lower() == str(self._dbPath).lower():
raise ValueError(
"It appears that reloadDBName is the same as the case title. "
"This could lead to data loss! Rename the reload DB or the case."
)
self._db = Database(self._dbPath, "w")
self._db.open()
self._db.writeInputsToDB(self.cs)
def interactEveryNode(self, cycle, node):
"""
Write to database.
DBs should receive the state information of the run at each node.
Notes
-----
- If tight coupling is enabled, the DB will be written in ``Operator::_timeNodeLoop`` via
writeDBEveryNode.
"""
if self.o.cs["tightCoupling"]:
# h5 can't handle overwriting so we skip here and write once the tight coupling loop has completed
return
self.writeDBEveryNode()
def writeDBEveryNode(self):
"""Write the database at the end of the time node."""
self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0
self._db.writeToDB(self.r)
if self.cs[CONF_SYNC_AFTER_WRITE]:
self._db.syncToSharedFolder()
def interactEOC(self, cycle=None):
"""
Do not write; this state doesn't tend to be important since its decay only step.
Notes
-----
The same time is available at start of next cycle.
"""
return
def interactEOL(self):
"""DB's should be closed at run's end. (End of Life)."""
# minutesSinceStarts should include as much of the ARMI run as possible so EOL is necessary, too.
self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0
self._db.writeToDB(self.r, "EOL")
self.closeDB()
def closeDB(self):
"""Close the DB, writing to file."""
self._db.close(True)
def interactError(self):
"""Get shutdown state information even if the run encounters an error."""
try:
self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0
# this can result in a double-error if the error occurred in the database
# writing
self._db.writeToDB(self.r, "error")
self._db.close(False)
except Exception: # we're already responding to an error
pass
def interactDistributeState(self) -> None:
"""
Reconnect to pre-existing database.
DB is created and managed by the primary node only but we can still connect to it
from workers to enable things like history tracking.
"""
if context.MPI_RANK > 0:
# DB may not exist if distribute state is called early.
if self._dbPath is not None and os.path.exists(self._dbPath):
self._db = Database(self._dbPath, "r")
self._db.open()
def distributable(self):
return self.Distribute.SKIP
def prepRestartRun(self):
"""
Load the data history from the database requested in the case setting
`reloadDBName`.
Reactor state is put at the cycle/node requested in the case settings
`startCycle` and `startNode`, having loaded the state from all cycles prior
to that in the requested database.
.. impl:: Runs at a particular timenode can be re-instantiated for a snapshot.
:id: I_ARMI_SNAPSHOT_RESTART
:implements: R_ARMI_SNAPSHOT_RESTART
This method loads the state of a reactor from a particular point in time
from a standard ARMI
:py:class:`Database `. This is a
major use-case for having ARMI databases in the first case. And restarting
from such a database is easy, you just need to set a few settings::
* reloadDBName - Path to existing H5 file to reload from.
* startCycle - Operational cycle to restart from.
* startNode - Time node to start from.
Notes
-----
Mixing the use of simple vs detailed cycles settings is allowed, provided
that the cycle histories prior to `startCycle`/`startNode` are equivalent.
ARMI expects the reload DB to have been made in the same version of ARMI as you
are running. ARMI does not guarantee that a DB from a decade ago will be easily
used to restart a run.
"""
reloadDBName = self.cs["reloadDBName"]
runLog.info(f"Merging database history from {reloadDBName} for restart analysis.")
startCycle = self.cs["startCycle"]
startNode = self.cs["startNode"]
with Database(reloadDBName, "r") as inputDB:
loadDbCs = inputDB.loadCS()
# pull the history up to the cycle/node prior to `startCycle`/`startNode`
dbCycle, dbNode = getPreviousTimeNode(
startCycle,
startNode,
self.cs,
)
self._checkThatCyclesHistoriesAreEquivalentUpToRestartTime(loadDbCs, dbCycle, dbNode)
self._db.mergeHistory(inputDB, startCycle, startNode)
self.loadState(dbCycle, dbNode)
def _checkThatCyclesHistoriesAreEquivalentUpToRestartTime(self, loadDbCs, dbCycle, dbNode):
"""Check that cycle histories are equivalent up to this point."""
dbStepLengths = getStepLengths(loadDbCs)
currentCaseStepLengths = getStepLengths(self.cs)
dbStepHistory = []
currentCaseStepHistory = []
try:
for cycleIdx in range(dbCycle + 1):
if cycleIdx == dbCycle:
# truncate it at dbNode
dbStepHistory.append(dbStepLengths[cycleIdx][:dbNode])
currentCaseStepHistory.append(currentCaseStepLengths[cycleIdx][:dbNode])
else:
dbStepHistory.append(dbStepLengths[cycleIdx])
currentCaseStepHistory.append(currentCaseStepLengths[cycleIdx])
except IndexError:
runLog.error(f"DB cannot be loaded to this time: cycle={dbCycle}, node={dbNode}")
raise
if dbStepHistory != currentCaseStepHistory:
raise ValueError("The cycle history up to the restart cycle/node must be equivalent.")
def _getLoadDB(self, fileName):
"""
Return the database to load from in order of preference.
Notes
-----
If filename is present only returns one database since specifically instructed to load from that database.
"""
if fileName is not None:
# only yield 1 database if the file name is specified
if self._db is not None and fileName == self._db._fileName:
yield self._db
elif os.path.exists(fileName):
yield Database(fileName, "r")
else:
if self._db is not None:
yield self._db
if os.path.exists(self.cs["reloadDBName"]):
yield Database(self.cs["reloadDBName"], "r")
def loadState(self, cycle, timeNode, timeStepName="", fileName=None):
"""
Loads a fresh reactor and applies it to the Operator.
Notes
-----
Will load preferentially from the ``fileName`` if passed. Otherwise will load from existing database in memory
or ``cs["reloadDBName"]`` in that order.
Raises
------
RuntimeError
If fileName is specified and that file does not have the time step.
If fileName is not specified and neither the database in memory, nor the
``cs["reloadDBName"]`` have the time step specified.
"""
for potentialDatabase in self._getLoadDB(fileName):
with potentialDatabase as loadDB:
if loadDB.hasTimeStep(cycle, timeNode, statePointName=timeStepName):
newR = loadDB.load(
cycle,
timeNode,
statePointName=timeStepName,
cs=self.cs,
allowMissing=True,
)
self.o.reattach(newR, self.cs)
break
else:
# reactor was never set so fail
if fileName:
raise RuntimeError(
"Cannot load state from specified file {} @ {}".format(
fileName, getH5GroupName(cycle, timeNode, timeStepName)
)
)
raise RuntimeError(
"Cannot load state from @ {}".format(getH5GroupName(cycle, timeNode, timeStepName))
)
def getHistory(
self,
comp: ArmiObject,
params: Optional[Sequence[str]] = None,
timeSteps: Optional[MutableSequence[Tuple[int, int]]] = None,
byLocation: bool = False,
) -> History:
"""
Get historical parameter values for a single object.
This is mostly a wrapper around the same function on the ``Database`` class,
but knows how to return the current value as well.
See Also
--------
Database.getHistory
"""
# make a copy so that we can potentially remove timesteps without affecting the caller
timeSteps = copy.copy(timeSteps)
now = (self.r.p.cycle, self.r.p.timeNode)
nowRequested = timeSteps is None
if timeSteps is not None and now in timeSteps:
nowRequested = True
timeSteps.remove(now)
if byLocation:
history = self.database.getHistoryByLocation(comp, params, timeSteps)
else:
history = self.database.getHistory(comp, params, timeSteps)
if nowRequested:
for param in params or history.keys():
if param == "location":
# might save as int or np.int64, so forcing int keeps things predictable
history[param][now] = tuple(int(i) for i in comp.spatialLocator.indices)
else:
history[param][now] = comp.p[param]
return history
def getHistories(
self,
comps: Sequence[ArmiObject],
params: Optional[Sequence[str]] = None,
timeSteps: Optional[MutableSequence[Tuple[int, int]]] = None,
byLocation: bool = False,
) -> Histories:
"""
Get historical parameter values for one or more objects.
This is mostly a wrapper around the same function on the ``Database`` class,
but knows how to return the current value as well.
See Also
--------
Database.getHistories
"""
now = (self.r.p.cycle, self.r.p.timeNode)
nowRequested = timeSteps is None
if timeSteps is not None:
# make a copy so that we can potentially remove timesteps without affecting
# the caller
timeSteps = copy.copy(timeSteps)
if timeSteps is not None and now in timeSteps:
nowRequested = True
timeSteps.remove(now)
if byLocation:
histories = self.database.getHistoriesByLocation(comps, params, timeSteps)
else:
histories = self.database.getHistories(comps, params, timeSteps)
if nowRequested:
for c in comps:
for param in params or histories[c].keys():
if param == "location":
histories[c][param][now] = tuple(int(i) for i in c.spatialLocator.indices)
else:
histories[c][param][now] = c.p[param]
return histories
================================================
FILE: armi/bookkeeping/db/factory.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Optional
import h5py
from armi.bookkeeping.db import permissions
from armi.bookkeeping.db.database import Database
def databaseFactory(dbName: str, permission: str, version: Optional[str] = None):
"""
Return an appropriate object for interacting with a database file.
Parameters
----------
dbName: str
Path to db file, e.g. `baseCase.h5`
permission: str
String defining permission, `r` for read only. See armi.bookkeeping.db.permissions
version: str, optional
Version of database you want to read or write. In most cases ARMI will
auto-detect. For advanced users.
Notes
-----
This is not a proper factory, as the different database versions do not present a
common interface. However, this is useful code, since it at least creates an object
based on some knowledge of how to probe around. This allows client code to just
interrogate the type of the returned object to figure out to do based on whatever it
needs.
"""
dbPath = pathlib.Path(dbName)
# if it's not an hdf5 file, we dont even know where to start...
if dbPath.suffix != ".h5":
raise RuntimeError("Unknown database format for {}".format(dbName))
if permission in permissions.Permissions.read:
if version is not None:
raise ValueError("Cannot specify version when reading a database.")
if not dbPath.exists() or not dbPath.is_file():
raise ValueError("Database file `{}` does not appear to be a file.".format(dbName))
# probe for the database version. We started adding these with "database 3", so if
# databaseVersion is not present, assume it's the "old" version
version = "2"
tempDb = h5py.File(dbPath, "r")
if "databaseVersion" in tempDb.attrs:
version = tempDb.attrs["databaseVersion"]
del tempDb
majorversion = version.split(".")[0] if version else "2"
if majorversion == "2":
raise ValueError(
'Database version 2 ("XTView database") is no longer '
"supported. To migrate to a newer version, use version 0.1.5."
)
if majorversion == "3":
return Database(dbPath, permission)
raise ValueError("Unable to determine Database version for {}".format(dbName))
elif permission in permissions.Permissions.write:
majorversion = version.split(".")[0] if version else "3"
if majorversion == "2":
raise ValueError(
'Database version 2 ("XTView database") is no longer '
"supported. To migrate to a newer version, use version 0.1.5 to migrate."
)
if majorversion == "3":
return Database(dbPath, permission)
return None
================================================
FILE: armi/bookkeeping/db/jaggedArray.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tooling to help flatten jagged (non rectangular) data into rectangular arrays.
The goal here is to support jagged data for NumPy arrays to be written into the ARMI databases.
"""
from typing import List, Optional
import numpy as np
from armi import runLog
class JaggedArray:
"""
Take a list of numpy arrays or lists and flatten them into a single 1D array.
This implementation can preserve the structure of a multi-dimensional numpy array
by storing the dimensions in self.shapes and then re-populating a numpy array of
that shape from the flattened 1D array. However, it can only preserve one layer of
jaggedness in a list of lists (or other iterables). For example, a list of tuples
with varying lengths can be flattened and reconstituted exactly. But, if a list of
lists of tuples is passed in, the tuples in that final layer of nesting will all be
flattened to a single 1D numpy array after a round trip. No structure is retained
from nested lists of jagged lists or tuples.
"""
def __init__(self, jaggedData, paramName):
"""
JaggedArray constructor.
Parameters
----------
jaggedData: list of np.ndarray
A list of numpy arrays (or lists or tuples) to be flattened into a single array
paramName: str
The name of the parameter represented by this data
"""
offset = 0
flattenedArray = []
offsets = []
shapes = []
nones = []
for i, arr in enumerate(jaggedData):
if isinstance(arr, (np.ndarray, list, tuple)):
if len(arr) == 0:
nones.append(i)
else:
offsets.append(offset)
try:
numpyArray = np.array(arr)
shapes.append(numpyArray.shape)
offset += numpyArray.size
flattenedArray.extend(numpyArray.flatten())
except: # noqa: E722
# numpy might fail if it's jagged
flattenedList = self.flatten(arr)
shapes.append(
len(flattenedList),
)
offset += len(flattenedList)
flattenedArray.extend(flattenedList)
elif isinstance(arr, (int, float)):
offsets.append(offset)
shapes.append((1,))
offset += 1
flattenedArray.append(arr)
elif arr is None:
nones.append(i)
self.flattenedArray = np.array(flattenedArray)
self.offsets = np.array(offsets)
try:
self.shapes = np.array(shapes)
except ValueError as ee:
runLog.error(
"Error! It seems like ARMI may have tried to flatten a jagged array "
"where the elements have different numbers of dimensions. `shapes` "
"attribute of the JaggedArray for {} cannot be made into a numpy "
"array; it might be jagged.".format(paramName)
)
runLog.error(shapes)
raise ValueError(ee)
self.nones = np.array(nones)
self.dtype = self.flattenedArray.dtype
self.paramName = paramName
def __iter__(self):
"""Iterate over the unpacked list."""
return iter(self.unpack())
def __contains__(self, other):
return other in self.flattenedArray
@staticmethod
def flatten(x):
"""
Recursively flatten an iterable (list, tuple, or numpy.ndarray).
x : list, tuple, np.ndarray
An iterable. Can be a nested iterable in which the elements
themselves are also iterable.
"""
if isinstance(x, (list, tuple, np.ndarray)):
if len(x) == 0:
return []
first, rest = x[0], x[1:]
return JaggedArray.flatten(first) + JaggedArray.flatten(rest)
else:
return [x]
@classmethod
def fromH5(cls, data, offsets, shapes, nones, dtype, paramName):
"""
Create a JaggedArray instance from an HDF5 dataset.
The JaggedArray is stored in HDF5 as a flat 1D array with accompanying
attributes of "offsets" and "shapes" to define how to reconstitute the
original data.
Parameters
----------
data: np.ndarray
A flattened 1D numpy array read in from an HDF5 file
offsets: np.ndarray
Offset indices for the zeroth element of each constituent array
shapes: np.ndarray
The shape of each constituent array
nones: np.ndarray
The location of Nones
dtype: np.dtype
The data type for the array
paramName: str
The name of the parameter represented by this data
Returns
-------
obj: JaggedArray An instance of JaggedArray populated with the input data
"""
obj = cls([], paramName)
obj.flattenedArray = np.array(data)
obj.offsets = np.array(offsets)
obj.shapes = np.array(shapes)
obj.nones = np.array(nones)
obj.dtype = dtype
obj.paramName = paramName
return obj
def tolist(self):
"""Alias for unpack() to make this class respond like a np.ndarray."""
return self.unpack()
def unpack(self):
"""
Unpack a JaggedArray object into a list of arrays.
Returns
-------
unpackedJaggedData: list of np.ndarray
List of numpy arrays with varying dimensions (i.e., jagged arrays)
"""
unpackedJaggedData: List[Optional[np.ndarray]] = []
shapeIndices = [i for i, x in enumerate(self.shapes) if sum(x) != 0]
numElements = len(shapeIndices) + len(self.nones)
j = 0 # non-None element counter
for i in range(numElements):
if i in self.nones:
unpackedJaggedData.append(None)
else:
k = shapeIndices[j]
unpackedJaggedData.append(
np.ndarray(
self.shapes[k],
dtype=self.dtype,
buffer=self.flattenedArray[self.offsets[k] :],
)
)
j += 1
return unpackedJaggedData
================================================
FILE: armi/bookkeeping/db/layout.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Groundwork for ARMI Database, version 3.4.
When interacting with the database file, the :py:class:`Layout` class is used to help
map the hierarchical Composite Reactor Model to the flat representation in
:py:class:`Database `.
This module also stores packing/packing tools to support
:py:class:`Database `, as well as database
versioning information.
"""
import collections
from typing import (
Any,
Dict,
List,
Optional,
Tuple,
Type,
)
import numpy as np
from armi import runLog
from armi.reactor import grids
from armi.reactor.components import Component
from armi.reactor.composites import ArmiObject
from armi.reactor.excoreStructure import ExcoreStructure
from armi.reactor.reactors import Core, Reactor
# Here we store the Database version information.
DB_MAJOR = 3
DB_MINOR = 4
DB_VERSION = f"{DB_MAJOR}.{DB_MINOR}"
# CONSTANTS USED TO PACK AND UNPACK DATA
LOC_NONE = "N"
LOC_COORD = "C"
LOC_INDEX = "I"
LOC_MULTI = "M:"
LOCATION_TYPE_LABELS = {
type(None): LOC_NONE,
grids.CoordinateLocation: LOC_COORD,
grids.IndexLocation: LOC_INDEX,
grids.MultiIndexLocation: LOC_MULTI,
}
# NOTE: Here we assume no one assigns min(int)+2 as a meaningful value
NONE_MAP = {float: float("nan"), str: ""}
NONE_MAP.update(
{
intType: np.iinfo(intType).min + 2
for intType in (
int,
np.int8,
np.int16,
np.int32,
np.int64,
)
}
)
NONE_MAP.update(
{
intType: np.iinfo(intType).max - 2
for intType in (
np.uint,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
)
}
)
NONE_MAP.update({floatType: floatType("nan") for floatType in (float, np.float64)})
class Layout:
"""
The Layout class describes the hierarchical layout of the Composite Reactor model
in a flat representation for
:py:class:`Database `.
A Layout is built by starting at the root of a composite tree and recursively
appending each node in the tree to a list of data. So the data will be ordered by
depth-first search: [r, c, a1, a1b1, a1b1c1, a1b1c2, a1b2, a1b2c1, ..., a2, ...].
The layout is also responsible for storing Component attributes, like location,
material, and temperatures, which aren't stored as Parameters. Temperatures,
specifically, are rather complicated in ARMI.
Notes
-----
* Elements in Layout are stored in depth-first order. This permits use of
algorithms such as Pre-Order Tree Traversal for efficient traversal of regions
of the model.
* ``indexInData`` increases monotonically within each object ``type``. For
example, the data for all ``HexBlock`` children of a given parent are stored
contiguously within the ``HexBlock`` group, and will not be interleaved with
data from the ``HexBlock`` children of any of the parent's siblings.
* Aside from the hierarchy, there is no guarantee what order objects are stored
in the layout. The ``Core`` is not necessarily the first child of the
``Reactor``, and is not guaranteed to use the zeroth grid.
"""
def __init__(self, version: Tuple[int, int], h5group=None, comp=None):
self.type: List[str] = []
self.name: List[str] = []
self.serialNum: List[int] = []
# The index into the parameter datasets corresponding to each object's class.
# E.g., the 5th HexBlock object in the tree would get 5; to look up its
# "someParameter" value, you would extract cXXnYY/HexBlock/someParameter[5].
self.indexInData: List[int] = []
# The number of direct children this object has.
self.numChildren: List[int] = []
# The type of location that specifies the object's physical location; see the
# associated pack/unpackLocation functions for more information about how
# locations are handled.
self.locationType: List[str] = []
# There is a minor asymmetry here in that before writing to the DB, this is
# truly a flat list of tuples. However when reading, this may contain lists of
# tuples, which represent MI locations. This comes from the fact that we map the
# tuples to Location objects in Database._compose, but map from Locations to
# tuples in Layout._createLayout. Ideally we would handle both directions in the
# same place so this can be less surprising. Resolving this would require
# changing the interface of the various pack/unpack functions, which have
# multiple versions, so the update would need to be done with care.
self.location: List[Tuple[int, int, int]] = []
# Which grid, as stored in the database, this object uses to arrange its
# children
self.gridIndex: List[int] = []
self.temperatures: List[float] = []
self.material: List[str] = []
# Used to cache all of the spatial locators so that we can pack them all at
# once. The benefit here is that the version checking can happen up front and
# less branching down below
self._spatialLocators: List[grids.LocationBase] = []
# set of grid parameters that have been seen in _createLayout. For efficient
# checks for uniqueness
self._seenGridParams: Dict[Any, Any] = dict()
# actual list of grid parameters, with stable order for safe indexing
self.gridParams: List[Any] = []
self.version = version
self.groupedComps: Dict[Type[ArmiObject], List[ArmiObject]] = collections.defaultdict(list)
# it should be noted, one of the two inputs must be non-None: comp/h5group
if comp is not None:
self._createLayout(comp)
self.locationType, self.location = _packLocations(self._spatialLocators)
else:
self._readLayout(h5group)
self._snToLayoutIndex = {sn: i for i, sn in enumerate(self.serialNum)}
# find all subclasses of Grid
self.gridClasses = {c.__name__: c for c in Layout.allSubclasses(grids.Grid)}
self.gridClasses["Grid"] = grids.Grid
def __getitem__(self, sn):
layoutIndex = self._snToLayoutIndex[sn]
return (
self.type[layoutIndex],
self.name[layoutIndex],
self.serialNum[layoutIndex],
self.indexInData[layoutIndex],
self.numChildren[layoutIndex],
self.locationType[layoutIndex],
self.location[layoutIndex],
self.temperatures[layoutIndex],
self.material[layoutIndex],
)
def _createLayout(self, comp):
"""
Populate a hierarchical representation and group the reactor model items by type.
This is used when writing a reactor model to the database.
Notes
-----
This is recursive.
See Also
--------
_readLayout : does the opposite
"""
compList = self.groupedComps[type(comp)]
compList.append(comp)
self.type.append(comp.__class__.__name__)
self.name.append(comp.name)
self.serialNum.append(comp.p.serialNum)
self.indexInData.append(len(compList) - 1)
self.numChildren.append(len(comp))
# determine how many components have been read in, to set the grid index
if comp.spatialGrid is not None:
gridType = type(comp.spatialGrid).__name__
gridParams = (gridType, comp.spatialGrid.reduce())
if gridParams not in self._seenGridParams:
self._seenGridParams[gridParams] = len(self.gridParams)
self.gridParams.append(gridParams)
self.gridIndex.append(self._seenGridParams[gridParams])
else:
self.gridIndex.append(None)
self._spatialLocators.append(comp.spatialLocator)
# set the materials and temperatures
try:
self.temperatures.append((comp.inputTemperatureInC, comp.temperatureInC))
self.material.append(comp.material.__class__.__name__)
except Exception:
self.temperatures.append((-900, -900)) # an impossible temperature
self.material.append("")
try:
comps = sorted(list(comp))
except ValueError:
runLog.error(
"Failed to sort some collection of ArmiObjects for database output: {} value {}".format(
type(comp), list(comp)
)
)
raise
# depth-first search recursion of all components
for c in comps:
self._createLayout(c)
def _readLayout(self, h5group):
"""
Populate a hierarchical representation and group the reactor model items by type.
This is used when reading a reactor model from a database.
See Also
--------
_createLayout : does the opposite
"""
try:
# location is either an index, or a point
# iter over list is faster
locations = h5group["layout/location"][:].tolist()
self.locationType = np.char.decode(h5group["layout/locationType"][:]).tolist()
self.location = _unpackLocations(self.locationType, locations, self.version[1])
self.type = np.char.decode(h5group["layout/type"][:])
self.name = np.char.decode(h5group["layout/name"][:])
self.serialNum = h5group["layout/serialNum"][:]
self.indexInData = h5group["layout/indexInData"][:]
self.numChildren = h5group["layout/numChildren"][:]
self.material = np.char.decode(h5group["layout/material"][:])
self.temperatures = h5group["layout/temperatures"][:]
self.gridIndex = replaceNonsenseWithNones(h5group["layout/gridIndex"][:], "layout/gridIndex")
gridGroup = h5group["layout/grids"]
gridTypes = [t.decode() for t in gridGroup["type"][:]]
self.gridParams = []
for iGrid, gridType in enumerate(gridTypes):
thisGroup = gridGroup[str(iGrid)]
unitSteps = thisGroup["unitSteps"][:]
bounds = []
for ibound in range(3):
boundName = "bounds_{}".format(ibound)
if boundName in thisGroup:
bounds.append(thisGroup[boundName][:])
else:
bounds.append(None)
unitStepLimits = thisGroup["unitStepLimits"][:]
offset = thisGroup["offset"][:] if thisGroup.attrs["offset"] else None
geomType = thisGroup["geomType"].asstr()[()] if "geomType" in thisGroup else None
symmetry = thisGroup["symmetry"].asstr()[()] if "symmetry" in thisGroup else None
self.gridParams.append(
(
gridType,
grids.GridParameters(
unitSteps,
bounds,
unitStepLimits,
offset,
geomType,
symmetry,
),
)
)
except KeyError as e:
runLog.error("Failed to get layout information from group: {}".format(h5group.name))
raise e
def _initComps(self, caseTitle, bp):
comps = []
groupedComps = collections.defaultdict(list)
for (
compType,
name,
serialNum,
numChildren,
location,
locationType,
material,
temperatures,
gridIndex,
) in zip(
self.type,
self.name,
self.serialNum,
self.numChildren,
self.location,
self.locationType,
self.material,
self.temperatures,
self.gridIndex,
):
Klass = ArmiObject.TYPES[compType]
if issubclass(Klass, Reactor):
comp = Klass(caseTitle, bp)
elif issubclass(Klass, Core):
comp = Klass(name)
elif issubclass(Klass, ExcoreStructure):
comp = Klass(name)
elif issubclass(Klass, Component):
# init all dimensions to 0, they will be loaded and assigned after load
kwargs = dict.fromkeys(Klass.DIMENSION_NAMES, 0)
kwargs["modArea"] = None
kwargs["material"] = material
kwargs["name"] = name
kwargs["Tinput"] = temperatures[0]
kwargs["Thot"] = temperatures[1]
comp = Klass(**kwargs)
else:
comp = Klass(name)
if gridIndex is not None:
gridParams = self.gridParams[gridIndex]
comp.spatialGrid = self.gridClasses[gridParams[0]](*gridParams[1], armiObject=comp)
comps.append((comp, serialNum, numChildren, location, locationType))
groupedComps[compType].append(comp)
return comps, groupedComps
def writeToDB(self, h5group):
"""Write a chunk of data to the database.
.. impl:: Write data to the DB for a given time step.
:id: I_ARMI_DB_TIME0
:implements: R_ARMI_DB_TIME
This method writes a snapshot of the current state of the reactor to the
database. It takes a pointer to an existing HDF5 file as input, and it
writes the reactor data model to the file in depth-first search order.
Other than this search order, there are no guarantees as to what order the
objects are written to the file. Though, this turns out to still be very
powerful. For instance, the data for all ``HexBlock`` children of a given
parent are stored contiguously within the ``HexBlock`` group, and will not
be interleaved with data from the ``HexBlock`` children of any of the parent's siblings.
"""
if "layout/type" in h5group:
# It looks like we have already written the layout to DB, skip for now
return
try:
h5group.create_dataset(
"layout/type",
data=np.array(self.type).astype("S"),
compression="gzip",
)
h5group.create_dataset(
"layout/name",
data=np.array(self.name).astype("S"),
compression="gzip",
)
h5group.create_dataset("layout/serialNum", data=self.serialNum, compression="gzip")
h5group.create_dataset("layout/indexInData", data=self.indexInData, compression="gzip")
h5group.create_dataset(
"layout/numChildren",
data=self.numChildren,
compression="gzip",
track_order=True,
)
h5group.create_dataset(
"layout/location",
data=self.location,
compression="gzip",
track_order=True,
)
h5group.create_dataset(
"layout/locationType",
data=np.array(self.locationType).astype("S"),
compression="gzip",
track_order=True,
)
h5group.create_dataset(
"layout/material",
data=np.array(self.material).astype("S"),
compression="gzip",
track_order=True,
)
h5group.create_dataset(
"layout/temperatures",
data=self.temperatures,
compression="gzip",
track_order=True,
)
h5group.create_dataset(
"layout/gridIndex",
data=replaceNonesWithNonsense(np.array(self.gridIndex), "layout/gridIndex"),
compression="gzip",
)
gridsGroup = h5group.create_group("layout/grids", track_order=True)
gridsGroup.attrs["nGrids"] = len(self.gridParams)
gridsGroup.create_dataset(
"type",
data=np.array([gp[0] for gp in self.gridParams]).astype("S"),
track_order=True,
)
for igrid, gridParams in enumerate(gp[1] for gp in self.gridParams):
thisGroup = gridsGroup.create_group(str(igrid), track_order=True)
thisGroup.create_dataset("unitSteps", data=gridParams.unitSteps, track_order=True)
for ibound, bound in enumerate(gridParams.bounds):
if bound is not None:
bound = np.array(bound)
thisGroup.create_dataset("bounds_{}".format(ibound), data=bound, track_order=True)
thisGroup.create_dataset("unitStepLimits", data=gridParams.unitStepLimits, track_order=True)
offset = gridParams.offset
thisGroup.attrs["offset"] = offset is not None
if offset is not None:
thisGroup.create_dataset("offset", data=offset, track_order=True)
thisGroup.create_dataset("geomType", data=gridParams.geomType, track_order=True)
thisGroup.create_dataset("symmetry", data=gridParams.symmetry, track_order=True)
except RuntimeError:
runLog.error("Failed to create datasets in: {}".format(h5group))
raise
@staticmethod
def computeAncestors(serialNum, numChildren, depth=1) -> List[Optional[int]]:
"""
Return a list containing the serial number of the parent corresponding to each
object at the given depth.
Depth in this case means how many layers to reach up to find the desired
ancestor. A depth of 1 will yield the direct parent of each element, depth of 2
would yield the elemen's parent's parent, and so on.
The zero-th element will always be None, as the first object is the root element
and so has no parent. Subsequent depths will result in more Nones.
This function is useful for forming a lightweight sense of how the database
contents stitch together, without having to go to the trouble of fully unpacking
the Reactor model.
Parameters
----------
serialNum : List of int
List of serial numbers for each object/element, as laid out in Layout
numChildren : List of int
List of numbers of children for each object/element, as laid out in Layout
Note
----
This is not using a recursive approach for a couple of reasons. First, the
iterative form isn't so bad; we just need two stacks. Second, the interface of
the recursive function would be pretty unwieldy. We are progressively
consuming two lists, of which we would need to keep passing down with an
index/cursor, or progressively slice them as we go, which would be pretty
inefficient.
"""
ancestors: List[Optional[int]] = [None]
snStack = [serialNum[0]]
ncStack = [numChildren[0]]
for sn, nc in zip(serialNum[1:], numChildren[1:]):
ncStack[-1] -= 1
if nc > 0:
ancestors.append(snStack[-1])
snStack.append(sn)
ncStack.append(nc)
else:
ancestors.append(snStack[-1])
while ncStack and ncStack[-1] == 0:
snStack.pop()
ncStack.pop()
if depth > 1:
# handle deeper scenarios. This is a bit tricky. Store the original
# ancestors for the first generation, since that ultimately contains all of
# the information that we need. Then in a loop, keep hopping one more layer
# of indirection, and indexing into the corresponding location in the
# original ancestor array
indexMap = {sn: i for i, sn in enumerate(serialNum)}
origAncestors = ancestors
for _ in range(depth - 1):
ancestors = [origAncestors[indexMap[ia]] if ia is not None else None for ia in ancestors]
return ancestors
@staticmethod
def allSubclasses(cls) -> set:
"""Find all subclasses of the given class, in any namespace."""
return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in Layout.allSubclasses(c)])
def _packLocations(
locations: List[grids.LocationBase], minorVersion: int = DB_MINOR
) -> Tuple[List[str], List[Tuple[int, int, int]]]:
"""
Extract information from a location needed to write it to this DB.
Each locator has one locationType and up to N location-defining datums,
where N is the number of entries in a possible multiindex, or just 1
for everything else.
Shrink grid locator names for storage efficiency.
Notes
-----
Contains some conditionals to still load databases made before
db version 3.3 which can be removed once no users care about
those DBs anymore.
"""
if minorVersion <= 2:
locationTypes, locationData = _packLocationsV1(locations)
elif minorVersion == 3:
locationTypes, locationData = _packLocationsV2(locations)
elif minorVersion > 3:
locationTypes, locationData = _packLocationsV3(locations)
else:
raise ValueError("Unsupported minor version: {}".format(minorVersion))
return locationTypes, locationData
def _packLocationsV1(
locations: List[grids.LocationBase],
) -> Tuple[List[str], List[Tuple[int, int, int]]]:
"""Delete when reading v <=3.2 DB's no longer wanted."""
locTypes = []
locData: List[Tuple[int, int, int]] = []
for loc in locations:
locationType = loc.__class__.__name__
if loc is None:
locationType = "None"
locDatum = [(0.0, 0.0, 0.0)]
elif isinstance(loc, grids.IndexLocation):
locDatum = [loc.indices]
else:
raise ValueError(f"Invalid location type: {loc}")
locTypes.append(locationType)
locData.extend(locDatum)
return locTypes, locData
def _packLocationsV2(
locations: List[grids.LocationBase],
) -> Tuple[List[str], List[Tuple[int, int, int]]]:
"""Location packing implementation for minor version 3. See module docstring above."""
locTypes = []
locData: List[Tuple[int, int, int]] = []
for loc in locations:
locationType = LOCATION_TYPE_LABELS[type(loc)]
if loc is None:
locDatum = [(0.0, 0.0, 0.0)]
elif loc.__class__ is grids.CoordinateLocation:
locDatum = [loc.indices]
elif loc.__class__ is grids.IndexLocation:
locDatum = [loc.indices]
elif loc.__class__ is grids.MultiIndexLocation:
# encode number of sub-locations to allow in-line unpacking.
locationType += f"{len(loc)}"
locDatum = [subloc.indices for subloc in loc]
else:
raise ValueError(f"Invalid location type: {loc}")
locTypes.append(locationType)
locData.extend(locDatum)
return locTypes, locData
def _packLocationsV3(
locations: List[grids.LocationBase],
) -> Tuple[List[str], List[Tuple[int, int, int]]]:
"""Location packing implementation for minor version 4. See module docstring above."""
locTypes = []
locData: List[Tuple[int, int, int]] = []
for loc in locations:
locationType = LOCATION_TYPE_LABELS[type(loc)]
if loc is None:
locDatum = [(0.0, 0.0, 0.0)]
elif type(loc) is grids.IndexLocation:
locDatum = [loc.getCompleteIndices()]
elif type(loc) is grids.CoordinateLocation:
# CoordinateLocations do not implement getCompleteIndices properly, and we
# do not really have a motivation to store them as we do with index
# locations.
locDatum = [loc.indices]
elif type(loc) is grids.MultiIndexLocation:
locationType += f"{len(loc)}"
locDatum = [subloc.indices for subloc in loc]
else:
raise ValueError(f"Invalid location type: {loc}")
locTypes.append(locationType)
locData.extend(locDatum)
return locTypes, locData
def _unpackLocations(locationTypes, locData, minorVersion: int = DB_MINOR):
"""
Convert location data as read from DB back into data structure for building reactor model.
location and locationType will only have different lengths when multiindex locations
are used.
"""
if minorVersion < 3:
return _unpackLocationsV1(locationTypes, locData)
else:
return _unpackLocationsV2(locationTypes, locData)
def _unpackLocationsV1(locationTypes, locData):
"""Delete when reading v <=3.2 DB's no longer wanted."""
locsIter = iter(locData)
unpackedLocs = []
for lt in locationTypes:
if lt == "None":
loc = next(locsIter)
unpackedLocs.append(None)
elif lt == "IndexLocation":
loc = next(locsIter)
# the data is stored as float, so cast back to int
unpackedLocs.append(tuple(int(i) for i in loc))
else:
loc = next(locsIter)
unpackedLocs.append(tuple(loc))
return unpackedLocs
def _unpackLocationsV2(locationTypes, locData):
"""Location unpacking implementation for minor version 3+. See module docstring above."""
locsIter = iter(locData)
unpackedLocs = []
for lt in locationTypes:
if lt == LOC_NONE:
loc = next(locsIter)
unpackedLocs.append(None)
elif lt == LOC_INDEX:
loc = next(locsIter)
# the data is stored as float, so cast back to int
unpackedLocs.append(tuple(int(i) for i in loc))
elif lt == LOC_COORD:
loc = next(locsIter)
unpackedLocs.append(tuple(loc))
elif lt.startswith(LOC_MULTI):
# extract number of sublocations from e.g. "M:345" string.
numSubLocs = int(lt.split(":")[1])
multiLocs = []
for _ in range(numSubLocs):
subLoc = next(locsIter)
# All multiindexes sublocs are index locs
multiLocs.append(tuple(int(i) for i in subLoc))
unpackedLocs.append(multiLocs)
else:
raise ValueError(f"Read unknown location type {lt}. Invalid DB.")
return unpackedLocs
def replaceNonesWithNonsense(data: np.ndarray, paramName: str, nones: np.ndarray = None) -> np.ndarray:
"""
Replace instances of ``None`` with nonsense values that can be detected/recovered
when reading.
Parameters
----------
data
The numpy array containing ``None`` values that need to be replaced.
paramName
The name of the parameter who's data we are treating. Only used for diagnostics.
nones
An array containing the index locations on the ``None`` elements. It is a little
strange to pass these, in but we find these indices to determine whether we need
to call this function in the first place, so might as well pass it in, so that
we don't need to perform the operation again.
Notes
-----
This only supports situations where the data is a straight-up ``None``, or a valid,
database-storable numpy array (or easily convertible to one (e.g. tuples/lists with
numerical values)). This does not support, for instance, a numpy ndarray with some
Nones in it.
For example, the following is supported::
[[1, 2, 3], None, [7, 8, 9]]
However, the following is not::
[[1, 2, 3], [4, None, 6], [7, 8, 9]]
See Also
--------
replaceNonsenseWithNones
Reverses this operation.
"""
if nones is None:
nones = np.where([d is None for d in data])[0]
try:
# loop to find what the default value should be. This is the first non-None
# value that we can find.
defaultValue = None
realType = None
val = None
for val in data:
if isinstance(val, np.ndarray):
# if multi-dimensional, val[0] could still be an array, val.flat is
# a flattened iterator, so next(val.flat) gives the first value in
# an n-dimensional array
realType = type(next(val.flat))
if realType is type(None):
continue
defaultValue = np.reshape(np.repeat(NONE_MAP[realType], val.size), val.shape)
break
else:
realType = type(val)
if realType is type(None):
continue
defaultValue = NONE_MAP[realType]
break
else:
# Couldn't find any non-None entries, so it really doesn't matter what type we
# use. Using float, because NaN is nice.
realType = float
defaultValue = NONE_MAP[realType]
if isinstance(val, np.ndarray):
data = np.array([d if d is not None else defaultValue for d in data])
else:
data[nones] = defaultValue
except Exception as ee:
runLog.error(
"Error while attempting to determine default for {}.\nvalue: {}\nError: {}".format(paramName, val, ee)
)
raise TypeError(
"Could not determine None replacement for {} with type {}, val {}, default {}".format(
paramName, realType, val, defaultValue
)
)
try:
data = data.astype(realType)
except Exception:
raise ValueError("Could not coerce data for {} to {}, data:\n{}".format(paramName, realType, data))
if data.dtype.kind == "O":
raise TypeError("Failed to convert data to valid HDF5 type {}, data:{}".format(paramName, data))
return data
def replaceNonsenseWithNones(data: np.ndarray, paramName: str) -> np.ndarray:
"""
Replace special nonsense values with ``None``.
This essentially reverses the operations performed by
:py:func:`replaceNonesWithNonsense`.
Parameters
----------
data
The array from the database that contains special ``None`` nonsense values.
paramName
The param name who's data we are dealing with. Only used for diagnostics.
See Also
--------
replaceNonesWithNonsense
"""
# NOTE: This is closely-related to the NONE_MAP.
if np.issubdtype(data.dtype, np.floating):
isNone = np.isnan(data)
elif np.issubdtype(data.dtype, np.integer):
isNone = data == np.iinfo(data.dtype).min + 2
elif np.issubdtype(data.dtype, np.str_):
isNone = data == ""
else:
raise TypeError("Unable to resolve values that should be None for `{}`".format(paramName))
if data.ndim > 1:
result = np.ndarray(data.shape[0], dtype=np.dtype("O"))
for i in range(data.shape[0]):
if isNone[i].all():
result[i] = None
elif isNone[i].any():
# This is the meat of the logic to replace "nonsense" with None.
result[i] = np.array(data[i], dtype=np.dtype("O"))
result[i][isNone[i]] = None
else:
result[i] = data[i]
else:
result = np.ndarray(data.shape, dtype=np.dtype("O"))
result[:] = data
result[isNone] = None
return result
================================================
FILE: armi/bookkeeping/db/passiveDBLoadPlugin.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides the ability to ignore parameters sections of blueprint files.
This plugin can allow you to more easily open a database, because you can ignore sections of the
blueprint files, and ignore any parameters as you want.
This was designed to allow loading an ARMI database without the application that created it.
"""
import yamlize
from armi import plugins
from armi.reactor import parameters
from armi.reactor.parameters import ParamLocation
from armi.utils import units
class PassThroughYamlize(yamlize.Object):
"""Just a helper for PassiveDBLoadPlugin, to allow for ignore unknown blueprints sections."""
@classmethod
def from_yaml(cls, loader, node, round_trip_data=None):
node.value = []
return yamlize.Object.from_yaml.__func__(PassThroughYamlize, loader, node, round_trip_data)
class PassiveDBLoadPlugin(plugins.ArmiPlugin):
"""Provides the ability to passively load a reactor data model from an ARMI DB even if there are
unknown parameters and blueprint sections.
This plugin allows you two define two things:
1. Sections of blueprint files to ignore entirely.
2. A collection of unknown parameters that will be loaded without units or underlying metadata.
To use this plugin, you need to set two class variables before instantiating the ARMI App:
1. Set ``SKIP_BP_SECTIONS`` to a list of BP section names (strings).
2. Set ``UNKNOWN_PARAMS`` to a mapping from param class to name: ``{Core: ["a", "b", "c"]}``
Notes
-----
Obviously, if you are loading huge numbers of unknown parameters and ignoring whole sections of
blueprints, you are losing information. There is no way to use this plugin and still claim full
fidelity of your understanding of the reactor. ARMI does not support any such claims.
"""
SKIP_BP_SECTIONS = []
UNKNOWN_PARAMS = {}
@staticmethod
@plugins.HOOKIMPL
def defineBlueprintsSections():
"""Ignore a pre-determined set of blueprint sections."""
skips = []
for skippedBp in PassiveDBLoadPlugin.SKIP_BP_SECTIONS:
skips.append(
(
skippedBp.replace(" ", ""),
yamlize.Attribute(key=skippedBp, type=PassThroughYamlize, default=None),
PassThroughYamlize,
)
)
return skips
@staticmethod
@plugins.HOOKIMPL
def defineParameters():
"""Define parameters for the plugin."""
# build all the parameters we are missing in default ARMI
params = {}
for dataClass, paramNames in PassiveDBLoadPlugin.UNKNOWN_PARAMS.items():
if len(paramNames):
params[dataClass] = PassiveDBLoadPlugin.buildParamColl(paramNames)
return params
@staticmethod
def buildParamColl(names):
"""Try replacing any missing parameters with unitless nonsense."""
# build a collection of defaulted parameters to passively ignore
desc = "This is just a placeholder Parameter; it's meaning is unknown."
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE) as pb:
for param in names:
pb.defParam(param, units=units.UNITLESS, description=desc, saveToDB=False)
return pDefs
================================================
FILE: armi/bookkeeping/db/permissions.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Permissions:
"""Mappings to HDF5 permissions flags."""
READ_ONLY_FME = "r" # File Must Exist
READ_WRITE_FME = "r+" # File Must Exist
CREATE_FILE_TIE = "w" # Truncate If Exists
CREATE_FILE_FIE = "w-" # Fail If Exists
CREATE_FILE_FIE2 = "x" # Fail If Exists, Alternate option
READ_WRITE_CREATE = "a"
DEFAULT = READ_WRITE_CREATE
# Strictly reading, not writing or creating a file if it doesn't exist
read = {READ_ONLY_FME, READ_WRITE_FME}
write = {
READ_WRITE_FME,
CREATE_FILE_TIE,
CREATE_FILE_FIE,
CREATE_FILE_FIE2,
READ_WRITE_CREATE,
}
create = {CREATE_FILE_TIE, CREATE_FILE_FIE, CREATE_FILE_FIE2, READ_WRITE_CREATE}
all = {
READ_ONLY_FME,
READ_WRITE_FME,
CREATE_FILE_TIE,
CREATE_FILE_FIE,
CREATE_FILE_FIE2,
READ_WRITE_CREATE,
}
================================================
FILE: armi/bookkeeping/db/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database tests."""
================================================
FILE: armi/bookkeeping/db/tests/test_comparedb3.py
================================================
# Copyright 2021 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the compareDB3 module."""
import unittest
import warnings
from unittest.mock import patch
import h5py
import numpy as np
from armi.bookkeeping.db.compareDB3 import (
DiffResults,
OutputWriter,
_compareAuxData,
_compareSets,
_diffSimpleData,
_diffSpecialData,
compareDatabases,
)
from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.reactor.tests import test_reactors
from armi.tests import TEST_ROOT, mockRunLogs
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestCompareDB3(unittest.TestCase):
"""Tests for the compareDB3 module."""
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
def test_outputWriter(self):
fileName = "test_outputWriter.txt"
with OutputWriter(fileName) as out:
out.writeln("Rubber Baby Buggy Bumpers")
txt = open(fileName, "r").read()
self.assertIn("Rubber", txt)
def test_compareSets(self):
shorter = set({1, 2, 3})
longer = set({1, 2, 3, 4})
fileName = "fakeOutWriter.txt"
with OutputWriter(fileName) as out:
nDiffs = _compareSets(shorter, longer, out, name="number")
self.assertEqual(nDiffs, 1)
nDiffs = _compareSets(longer, shorter, out, name="number")
self.assertEqual(nDiffs, 1)
def test_diffResultsBasic(self):
# init an instance of the class
dr = DiffResults(0.01)
self.assertEqual(len(dr._columns), 0)
self.assertEqual(len(dr._structureDiffs), 0)
self.assertEqual(len(dr.diffs), 0)
# simple test of addDiff
dr.addDiff("thing", "what", 123.4, 122.2345, 555)
self.assertEqual(len(dr._columns), 0)
self.assertEqual(len(dr._structureDiffs), 0)
self.assertEqual(len(dr.diffs), 3)
self.assertEqual(dr.diffs["thing/what mean(abs(diff))"][0], 123.4)
self.assertEqual(dr.diffs["thing/what mean(diff)"][0], 122.2345)
self.assertEqual(dr.diffs["thing/what max(abs(diff))"][0], 555)
# simple test of addTimeStep
dr.addTimeStep("timeStep")
self.assertEqual(dr._structureDiffs[0], 0)
self.assertEqual(dr._columns[0], "timeStep")
# simple test of addStructureDiffs
dr.addStructureDiffs(7)
self.assertEqual(len(dr._structureDiffs), 1)
self.assertEqual(dr._structureDiffs[0], 7)
# simple test of _getDefault
self.assertEqual(len(dr._getDefault()), 0)
# simple test of nDiffs
self.assertEqual(dr.nDiffs(), 10)
def test_compareDatabaseDuplicate(self):
"""End-to-end test of compareDatabases() on a photocopy database."""
# build two super-simple H5 files for testing
o, r = test_reactors.loadTestReactor(
TEST_ROOT,
customSettings={"reloadDBName": "reloadingDB.h5"},
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
# create two DBs, identical but for file names
dbs = []
for i in range(2):
# create the tests DB
dbi = DatabaseInterface(r, o.cs)
dbi.initDB(fName=self._testMethodName + str(i) + ".h5")
db = dbi.database
# validate the file exists, and force it to be readable again
b = h5py.File(db._fullPath, "r")
self.assertEqual(list(b.keys()), ["inputs"])
self.assertEqual(sorted(b["inputs"].keys()), ["blueprints", "settings"])
b.close()
# append to lists
dbs.append(db)
# end-to-end validation that comparing a photocopy database works
diffs = compareDatabases(dbs[0]._fullPath, dbs[1]._fullPath)
self.assertEqual(len(diffs.diffs), 0)
self.assertEqual(diffs.nDiffs(), 0)
def test_compareDatabaseSim(self):
"""End-to-end test of compareDatabases() on very similar databases."""
# build two super-simple H5 files for testing
o, r = test_reactors.loadTestReactor(
TEST_ROOT,
customSettings={"reloadDBName": "reloadingDB.h5"},
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
# create two DBs, identical but for file names and cycle lengths
dbs = []
for lenCycle in range(1, 3):
# build some test data
days = 100
cs = o.cs.modified(
newSettings={
"cycles": [{"step days": [days, days], "power fractions": [1, 0.5]}],
"reloadDBName": "something_fake.h5",
}
)
# create the tests DB
dbi = DatabaseInterface(r, cs)
dbi.initDB(fName=self._testMethodName + str(lenCycle) + ".h5")
db = dbi.database
# populate the db with something
r.p.cycle = 0
for node in range(2):
r.p.timeNode = node
r.p.cycleLength = days * lenCycle
db.writeToDB(r)
# validate the file exists, and force it to be readable again
b = h5py.File(db._fullPath, "r")
dbKeys = sorted(b.keys())
self.assertEqual(len(dbKeys), 3)
self.assertIn("inputs", dbKeys)
self.assertIn("c00n00", dbKeys)
self.assertEqual(sorted(b["inputs"].keys()), ["blueprints", "settings"])
b.close()
# append to lists
dbs.append(db)
# end-to-end validation that comparing a photocopy database works
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
diffs = compareDatabases(
dbs[0]._fullPath,
dbs[1]._fullPath,
timestepCompare=[(0, 0), (0, 1)],
)
# spot check the diffs
self.assertGreater(len(diffs.diffs), 200)
self.assertLess(len(diffs.diffs), 800)
self.assertIn("/c00n00", diffs._columns)
self.assertIn("/c00n01", diffs._columns)
self.assertIn(0, diffs._structureDiffs)
self.assertEqual(sum(diffs._structureDiffs), 0)
self.assertEqual(diffs.tolerance, 0)
self.assertIn("SpentFuelPool/flags max(abs(diff))", diffs.diffs)
self.assertIn("Circle/volume mean(diff)", diffs.diffs)
self.assertIn("Reactor/flags mean(diff)", diffs.diffs)
self.assertEqual(diffs.nDiffs(), 3)
def test_diffSpecialData(self):
dr = DiffResults(0.01)
fileName = "test_diffSpecialData.txt"
with OutputWriter(fileName) as out:
# spin up one example H5 Dataset
f1 = h5py.File("test_diffSpecialData1.hdf5", "w")
a1 = np.arange(100, dtype=""'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
).returncode
if code == 128:
# GitHub Actions blocks certain kinds of Git commands
return
# create a tag off our new commit
code = subprocess.run(
["git", "tag", "thanks", "-m", '"you_rock"'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
).returncode
self.assertEqual(code, 0)
# test that we recover the correct commit hash
localHash = Database.grabLocalCommitHash()
self.assertEqual(localHash, "thanks")
# delete the .git directory
code = subprocess.run(["git", "clean", "-f"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode
self.assertEqual(code, 0)
code = subprocess.run(
["git", "clean", "-f", "-d"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
).returncode
self.assertEqual(code, 0)
def test_fileName(self):
# test the file name getter
self.assertEqual(str(self.db.fileName), "test_fileName.h5")
# test the file name setter
self.db.close()
self.db.fileName = "thing.h5"
self.assertEqual(str(self.db.fileName), "thing.h5")
def test_readInputsFromDB(self):
"""Test that we can read inputs from the database.
.. test:: Save and retrieve settings from the database.
:id: T_ARMI_DB_CS
:tests: R_ARMI_DB_CS
.. test:: Save and retrieve blueprints from the database.
:id: T_ARMI_DB_BP
:tests: R_ARMI_DB_BP
"""
inputs = self.db.readInputsFromDB()
self.assertEqual(len(inputs), 2)
# settings
self.assertGreater(len(inputs[0]), 100)
self.assertIn("settings:", inputs[0])
# blueprints
self.assertGreater(len(inputs[1]), 2400)
self.assertIn("blocks:", inputs[1])
def test_deleting(self):
self.assertTrue(isinstance(self.db, Database))
del self.db
self.assertFalse(hasattr(self, "db"))
self.db = self.dbi.database
def test_open(self):
self.assertTrue(self.db.isOpen())
with self.assertRaises(ValueError):
self.db.open()
def test_loadCS(self):
cs = self.db.loadCS()
self.assertEqual(cs["nTasks"], 1)
self.assertEqual(cs["nCycles"], 2)
def test_loadBlueprints(self):
bp = self.db.loadBlueprints()
self.assertIsNone(bp.nuclideFlags)
self.assertEqual(len(bp.assemblies), 0)
def test_prepRestartRun(self):
"""
This test is based on the armiRun.yaml case that is loaded during the `setUp` above. In that cs, `reloadDBName`
is set to 'reloadingDB.h5', `startCycle` = 1, and `startNode` = 2. The nonexistent 'reloadingDB.h5' must first
be created here for this test.
.. test:: Runs can be restarted from a snapshot.
:id: T_ARMI_SNAPSHOT_RESTART
:tests: R_ARMI_SNAPSHOT_RESTART
"""
# first successfully call to prepRestartRun
o, r = loadTestReactor(
inputFilePath=TESTING_ROOT,
inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml",
customSettings={"reloadDBName": "reloadingDB.h5"},
)
cs = o.cs
ratedPower = cs["power"]
startCycle = cs["startCycle"]
startNode = cs["startNode"]
cyclesSetting = [
{"step days": [1000, 1000], "power fractions": [1, 1]},
{"step days": [1000, 1000], "power fractions": [1, 1]},
{"step days": [1000, 1000], "power fractions": [1, 1]},
]
cycleP, nodeP = getPreviousTimeNode(startCycle, startNode, cs)
cyclesSetting[cycleP]["power fractions"][nodeP] = 0.5
numCycles = 2
numNodes = 2
cs = cs.modified(
newSettings={
"nCycles": numCycles,
"cycles": cyclesSetting,
"reloadDBName": "something_fake.h5",
}
)
# create a db based on the cs
dbi = DatabaseInterface(r, cs)
dbi.initDB(fName="reloadingDB.h5")
db = dbi.database
# populate the db with some things
for cycle, node in ((cycle, node) for cycle in range(numCycles) for node in range(numNodes)):
r.p.cycle = cycle
r.p.timeNode = node
r.p.cycleLength = sum(cyclesSetting[cycle]["step days"])
r.core.p.power = ratedPower * cyclesSetting[cycle]["power fractions"][node]
db.writeToDB(r)
self.assertTrue(db.isOpen())
db.close()
self.assertFalse(db.isOpen())
self.dbi.prepRestartRun()
# prove that the reloaded reactor has the correct power
self.assertEqual(self.o.r.p.cycle, cycleP)
self.assertEqual(self.o.r.p.timeNode, nodeP)
self.assertEqual(cyclesSetting[cycleP]["power fractions"][nodeP], 0.5)
self.assertEqual(
self.o.r.core.p.power,
ratedPower * cyclesSetting[cycleP]["power fractions"][nodeP],
)
# now make the cycle histories clash and confirm that an error is thrown
cs = cs.modified(
newSettings={
"cycles": [
{"step days": [666, 666], "power fractions": [1, 1]},
{"step days": [666, 666], "power fractions": [1, 1]},
{"step days": [666, 666], "power fractions": [1, 1]},
],
}
)
# create a db based on the cs
dbi = DatabaseInterface(r, cs)
dbi.initDB(fName="reloadingDB.h5")
db = dbi.database
# populate the db with something
for cycle, node in ((cycle, node) for cycle in range(numCycles) for node in range(numNodes)):
r.p.cycle = cycle
r.p.timeNode = node
r.p.cycleLength = 2000
db.writeToDB(r)
self.assertTrue(db.isOpen())
db.close()
self.assertFalse(db.isOpen())
with self.assertRaises(ValueError):
self.dbi.prepRestartRun()
def test_computeParents(self):
# The below arrays represent a tree structure like this:
# 71 -----------------------.
# | \
# 12--.-----.------. 72
# / | \ \ \
# 22 30 4---. 6 18-.
# / | | | \ \ / | \
# 8 17 2 32 52 62 1 9 10
#
# This should cover a handful of corner cases
numChildren = [2, 5, 2, 0, 0, 1, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0]
serialNums = [71, 12, 22, 8, 17, 30, 2, 4, 32, 53, 62, 6, 18, 1, 9, 10, 72]
expected_1 = [None, 71, 12, 22, 22, 12, 30, 12, 4, 4, 4, 12, 12, 18, 18, 18, 71]
expected_2 = [
None,
None,
71,
12,
12,
71,
12,
71,
12,
12,
12,
71,
71,
12,
12,
12,
None,
]
expected_3 = [
None,
None,
None,
71,
71,
None,
71,
None,
71,
71,
71,
None,
None,
71,
71,
71,
None,
]
self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren), expected_1)
self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren, 2), expected_2)
self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren, 3), expected_3)
class TestWriteReadDatabase(unittest.TestCase):
"""Round-trip tests that we can write/read data to and from a Database."""
SMALL_YAML = """!include refOneBlockReactor.yaml
systems:
core:
grid name: core
origin:
x: 0.0
y: 0.0
z: 0.0
sfp:
type: sfp
grid name: sfp
origin:
x: 1000.0
y: 1000.0
z: 1000.0
evst:
type: excore
grid name: evst
origin:
x: 2000.0
y: 2000.0
z: 2000.0
grids:
core:
geom: hex_corners_up
lattice map: |
IC
symmetry: full
evst:
lattice pitch:
x: 32.0
y: 32.0
geom: hex
symmetry: full
"""
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
# copy these test files over, so we can edit them
thisDir = self.td.destination
yamls = glob(os.path.join(TEST_ROOT, "smallestTestReactor", "*.yaml"))
for yam in yamls:
safeCopy(os.path.join(TEST_ROOT, "smallestTestReactor", yam), thisDir)
# Add an EVST to this reactor
with open("refSmallestReactor.yaml", "w") as f:
f.write(self.SMALL_YAML)
self.o, self.r = loadTestReactor(thisDir, inputFileName="armiRunSmallest.yaml")
self.dbi = DatabaseInterface(self.r, self.o.cs)
self.dbi.initDB(fName=f"{self._testMethodName}.h5")
self.db: Database = self.dbi.database
def tearDown(self):
self.db.close()
self.td.__exit__(None, None, None)
def test_readWriteRoundTrip(self):
"""Test DB some round tripping, writing some data to a DB, then reading from it.
In particular, we test some parameters on the reactor, core, and blocks. And we move an assembly from the core
to an EVST between timenodes, and test that worked.
"""
# put some data in the DB, for timenode 0
self.r.p.cycle = 0
self.r.p.timeNode = 0
self.r.core.p.keff = 0.99
b = self.r.core.getFirstBlock()
self.assertIsInstance(b[0].spatialLocator, MultiIndexLocation)
self.assertIsInstance(b[-1].spatialLocator, CoordinateLocation)
b.p.power = 12345.6
self.db.writeToDB(self.r)
# put some data in the DB, for timenode 1
self.r.p.timeNode = 1
self.r.core.p.keff = 1.01
# move the assembly from the core to the EVST
a = self.r.core.getFirstAssembly()
loc = self.r.excore.evst.spatialGrid[(0, 0, 0)]
self.r.core.remove(a)
self.r.excore.evst.add(a, loc)
self.db.writeToDB(self.r)
# close the DB
self.db.close()
# open the DB and verify, the first timenode
with Database(self.db.fileName) as db:
r0 = db.load(0, 0, allowMissing=True)
self.assertEqual(r0.p.cycle, 0)
self.assertEqual(r0.p.timeNode, 0)
self.assertEqual(r0.core.p.keff, 0.99)
# check the types of the data model objects
self.assertTrue(isinstance(r0, Reactor))
self.assertTrue(isinstance(r0.core, Core))
self.assertTrue(isinstance(r0.excore, ExcoreCollection))
self.assertTrue(isinstance(r0.excore.evst, ExcoreStructure))
self.assertTrue(isinstance(r0.excore.sfp, SpentFuelPool))
# Prove our one special block is in the core
self.assertEqual(len(r0.core.getChildren()), 1)
b0 = r0.core.getFirstBlock()
self.assertEqual(b0.p.power, 12345.6)
self.assertIsInstance(b0[0].spatialLocator, MultiIndexLocation)
np.testing.assert_array_equal(b[0].spatialLocator.indices, b0[0].spatialLocator.indices)
self.assertIsInstance(b0[-1].spatialLocator, CoordinateLocation)
np.testing.assert_array_equal(b[-1].spatialLocator.indices, b0[-1].spatialLocator.indices)
# the ex-core structures should be empty
self.assertEqual(len(r0.excore["sfp"].getChildren()), 0)
self.assertEqual(len(r0.excore["evst"].getChildren()), 0)
# open the DB and verify, the second timenode
with Database(self.db.fileName, "r") as db:
r1 = db.load(0, 1, allowMissing=True)
self.assertEqual(r1.p.cycle, 0)
self.assertEqual(r1.p.timeNode, 1)
self.assertEqual(r1.core.p.keff, 1.01)
# check the types of the data model objects
self.assertTrue(isinstance(r1, Reactor))
self.assertTrue(isinstance(r1.core, Core))
self.assertTrue(isinstance(r1.excore, ExcoreCollection))
self.assertTrue(isinstance(r1.excore.evst, ExcoreStructure))
self.assertTrue(isinstance(r1.excore.sfp, SpentFuelPool))
# Prove our one special block is NOT in the core, or the SFP
self.assertEqual(len(r1.core.getChildren()), 0)
self.assertEqual(len(r1.excore["sfp"].getChildren()), 0)
self.assertEqual(len(r1.excore.sfp.getChildren()), 0)
# Prove our one special block is in the EVST
evst = r1.excore["evst"]
self.assertEqual(len(evst.getChildren()), 1)
b1 = evst.getChildren()[0].getChildren()[0]
self.assertEqual(b1.p.power, 12345.6)
def test_badData(self):
# create a DB to be modified
self.db.writeToDB(self.r)
self.db.close()
# modify the HDF5 file to corrupt a dataset
with h5py.File(self.db.fileName, "r+") as hf:
circleGroup = hf["c00n00"]["Circle"]
circleMass = np.array(circleGroup["massHmBOL"][()])
badData = circleMass[:-1]
del circleGroup["massHmBOL"]
circleGroup.create_dataset("massHmBOL", data=badData)
with self.assertRaises(ValueError):
with Database(self.db.fileName, "r") as db:
_r = db.load(0, 0, allowMissing=True)
class TestSimplestDatabaseItems(unittest.TestCase):
"""The tests here are simple, direct tests of Database, that don't need a DatabaseInterface or Reactor."""
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
def test_open(self):
dbPath = "test_open.h5"
db = Database(dbPath, "w")
self.assertFalse(db.isOpen())
db._permission = "mock"
with self.assertRaises(ValueError):
db.open()
class TestStaticDatabaseItems(unittest.TestCase):
def test_applyComponentNumberDensitiesMigration(self):
b = loadTestBlock()
comps = [b[0], b[1]]
unpacked = [
{"U235": 1.23e-3, "U238": 2.34e-3},
{"PU239": 5.6e-4, "PU240": 7.8e-4},
]
Database._applyComponentNumberDensitiesMigration(comps, unpacked)
for comp, orig in zip(comps, unpacked):
expected_nucs = np.array(list(orig.keys()), dtype="S6")
expected_nds = np.array(list(orig.values()), dtype=np.float64)
# verify nuclide names and dtype
self.assertTrue(np.array_equal(comp.p["nuclides"], expected_nucs))
self.assertEqual(comp.p["nuclides"].dtype, np.dtype("S6"))
# verify number densities and dtype
self.assertTrue(np.allclose(comp.p["numberDensities"], expected_nds))
self.assertEqual(comp.p["numberDensities"].dtype, np.float64)
================================================
FILE: armi/bookkeeping/db/tests/test_databaseInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the Database Interface."""
import os
import types
import unittest
import h5py
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from armi import __version__ as version
from armi import interfaces, runLog, settings
from armi.bookkeeping.db.database import Database
from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.cases import case
from armi.context import PROJECT_ROOT
from armi.physics.neutronics.settings import CONF_LOADING_FILE
from armi.reactor import blueprints, grids
from armi.reactor.blueprints import loadFromCs
from armi.reactor.flags import Flags
from armi.reactor.reactors import Reactor
from armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings
from armi.tests import TEST_ROOT
from armi.utils import directoryChangers
def getSimpleDBOperator(cs):
"""
Return a very simple operator that covers most of the database interactions.
Notes
-----
This reactor has only 1 assembly with 1 type of block.
It's used to make the db unit tests run very quickly.
"""
newSettings = {}
newSettings[CONF_LOADING_FILE] = "smallestTestReactor/refSmallestReactor.yaml"
newSettings["verbosity"] = "important"
newSettings["db"] = True
newSettings["runType"] = "Standard"
newSettings["nCycles"] = 1
cs = cs.modified(newSettings=newSettings)
genDBCase = case.Case(cs)
runLog.setVerbosity("info")
o = genDBCase.initializeOperator()
o.interfaces = [interface for interface in o.interfaces if interface.name in ["database", "main"]]
return o, cs
class MockInterface(interfaces.Interface):
name = "mockInterface"
def __init__(self, r, cs, action=None):
interfaces.Interface.__init__(self, r, cs)
self.action = action
def interactEveryNode(self, cycle, node):
self.action(cycle, node)
class TestDatabaseInterfaceBOL(unittest.TestCase):
"""Test the DatabaseInterface class at the BOL."""
def test_interactBOL(self):
"""This test is in its own class, because of temporary directory issues."""
with directoryChangers.TemporaryDirectoryChanger():
self.o, self.r = loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml")
self.dbi = DatabaseInterface(self.r, self.o.cs)
dbName = f"{self._testMethodName}.h5"
self.dbi.initDB(fName=dbName)
self.db: Database = self.dbi.database
self.stateRetainer = self.r.retainState().__enter__()
self.assertIsNotNone(self.dbi._db)
self.dbi.interactBOL()
self.dbi.closeDB()
self.dbi._db = None
self.assertIsNone(self.dbi._db)
if os.path.exists(dbName):
os.remove(dbName)
class TestDatabaseInterface(unittest.TestCase):
"""Tests for the DatabaseInterface class."""
def setUp(self):
self.td = directoryChangers.TemporaryDirectoryChanger()
self.td.__enter__()
self.o, self.r = loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml")
self.dbi = DatabaseInterface(self.r, self.o.cs)
self.dbi.initDB(fName=self._testMethodName + ".h5")
self.db: Database = self.dbi.database
self.stateRetainer = self.r.retainState().__enter__()
def tearDown(self):
self.db.close()
self.stateRetainer.__exit__()
self.td.__exit__(None, None, None)
# test_interactBOL leaves behind some dirt (accessible after db close) that the
# TempDirChanger is not catching
bolDirt = [
os.path.join(PROJECT_ROOT, "armiRun.h5"),
os.path.join(PROJECT_ROOT, "armiRunSmallest.h5"),
]
for dirt in bolDirt:
if os.path.exists(dirt):
os.remove(dirt)
def test_distributable(self):
self.assertEqual(self.dbi.distributable(), 4)
self.dbi.interactDistributeState()
self.assertEqual(self.dbi.distributable(), 4)
def test_demonstrateWritingInteractions(self):
"""Test what nodes are written to the database during the interaction calls."""
self.o.cs["burnSteps"] = 2 # make test insensitive to burn steps
r = self.r
# BOC/BOL doesn't write anything
r.p.cycle, r.p.timeNode = 0, 0
self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
self.dbi.interactBOL()
self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
self.dbi.interactBOC(0)
self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
# but the first time node does
self.dbi.interactEveryNode(0, 0)
self.assertTrue(self.dbi.database.hasTimeStep(0, 0))
# EOC 0 shouldn't write, its written by last time node
r.p.cycle, r.p.timeNode = 0, self.o.cs["burnSteps"]
self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
self.dbi.interactEOC(r.p.cycle)
self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
# The last node of the step should write though
self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)
self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
# EOL should also write, but lets write last time node first
r.p.cycle, r.p.timeNode = self.o.cs["nCycles"] - 1, self.o.cs["burnSteps"]
self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)
self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))
# now write EOL
self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode, "EOL"))
self.dbi.interactEOL() # this also saves and closes db
# reopen db to show EOL is written
with Database(self._testMethodName + ".h5", "r") as db:
self.assertTrue(db.hasTimeStep(r.p.cycle, r.p.timeNode, "EOL"))
# and confirm that last time node is still there/separate
self.assertTrue(db.hasTimeStep(r.p.cycle, r.p.timeNode))
def test_interactEveryNodeReturnTightCoupling(self):
"""Test that the DB is NOT written to if cs["tightCoupling"] = True."""
self.o.cs["tightCoupling"] = True
self.dbi.interactEveryNode(0, 0)
self.assertFalse(self.dbi.database.hasTimeStep(0, 0))
def test_timeNodeLoop_tightCoupling(self):
"""Test that database is written out after the coupling loop has completed."""
# clear out interfaces (no need to run physics) but leave database
self.o.interfaces = [self.dbi]
self.o.cs["tightCoupling"] = True
self.assertFalse(self.dbi._db.hasTimeStep(0, 0))
self.o._timeNodeLoop(0, 0)
self.assertTrue(self.dbi._db.hasTimeStep(0, 0))
def test_syncDbAfterWrite(self):
"""
Test to ensure that the fast-path database is copied to working
directory at every time node when ``syncDbAfterWrite`` is ``True``.
"""
r = self.r
self.o.cs["syncDbAfterWrite"] = True
self.o.cs["burnSteps"] = 2 # make test insensitive to burn steps
self.dbi.interactBOL()
self.assertFalse(os.path.exists(self.dbi.database.fileName))
# Go through a few time nodes to ensure appending is working
for timeNode in range(self.o.cs["burnSteps"]):
r.p.cycle = 0
r.p.timeNode = timeNode
self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)
# The file should have been copied to working directory
self.assertTrue(os.path.exists(self.dbi.database.fileName))
# The copied file should have the newest time node
with Database(self.dbi.database.fileName, "r") as db:
for tn in range(timeNode + 1):
self.assertTrue(db.hasTimeStep(r.p.cycle, tn))
# The in-memory database should have been reloaded properly
for tn in range(timeNode + 1):
self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, tn))
# Make sure EOL runs smoothly
self.dbi.interactEOL()
self.assertTrue(os.path.exists(self.dbi.database.fileName))
def test_noSyncDbAfterWrite(self):
"""
Test to ensure that the fast-path database is NOT copied to working
directory at every time node when ``syncDbAfterWrite`` is ``False``.
"""
self.o.cs["syncDbAfterWrite"] = False
self.dbi.interactBOL()
self.assertFalse(os.path.exists(self.dbi.database.fileName))
self.dbi.interactEveryNode(0, 0)
self.assertFalse(os.path.exists(self.dbi.database.fileName))
self.dbi.interactEOL()
self.assertTrue(os.path.exists(self.dbi.database.fileName))
def test_writeDBFromDBLoadSameDir(self):
"""
Test to ensure that a reactor loaded from a database can be written to a
working database file (one that has case settings and blueprints if applicable).
"""
# Write this reactor to a database file.
dbi = DatabaseInterface(self.r, self.o.cs)
dbi.initDB(fName="testDB1.h5")
db = dbi.database
db.writeToDB(self.r)
db.close()
# Now load the db again
with Database("testDB1.h5", "r") as db:
cs2 = db.loadCS()
r2 = db.load(0, 0, cs=cs2)
self.assertIsInstance(cs2, settings.Settings)
self.assertIsInstance(r2, Reactor)
# Now write this db to this folder
dbi = DatabaseInterface(r2, cs2)
dbi.initDB(fName="testDB2.h5")
db = dbi.database
db.writeToDB(r2)
db.close()
# Now load this db. It should load
with Database("testDB2.h5", "r") as db:
cs3 = db.loadCS()
bp3 = loadFromCs(cs3)
self.assertIsInstance(bp3, blueprints.Blueprints)
r3 = db.load(0, 0, cs=cs3, bp=bp3)
self.assertIsInstance(cs3, settings.Settings)
self.assertIsInstance(r3, Reactor)
def test_writeDBFromDBLoadDifDir(self):
"""
Test to ensure that a reactor loaded from a database can be written to a
working database file (one that has case settings and blueprints if applicable).
The directory is changed between writing and loading.
"""
# Write this reactor to a database file.
dbi = DatabaseInterface(self.r, self.o.cs)
dbi.initDB(fName="testDB1.h5")
db = dbi.database
db.writeToDB(self.r)
db.close()
# Let's move to a different folder
os.makedirs("sub", exist_ok=True)
os.chdir("sub")
# Now load the db again
with Database(os.path.join(os.pardir, "testDB1.h5"), "r") as db:
cs2 = db.loadCS()
r2 = db.load(0, 0, cs=cs2)
self.assertIsInstance(cs2, settings.Settings)
self.assertIsInstance(r2, Reactor)
# Now write this db to this folder
dbi = DatabaseInterface(r2, cs2)
dbi.initDB(fName="testDB2.h5")
db = dbi.database
db.writeToDB(r2)
db.close()
# Now load this db. It should load
with Database("testDB2.h5", "r") as db:
cs3 = db.loadCS()
r3 = db.load(0, 0, cs=cs3)
self.assertIsInstance(cs3, settings.Settings)
self.assertIsInstance(r3, Reactor)
class TestDatabaseWriter(unittest.TestCase):
def setUp(self):
self.td = directoryChangers.TemporaryDirectoryChanger()
self.td.__enter__()
cs = settings.Settings(os.path.join(TEST_ROOT, "armiRun.yaml"))
cs = cs.modified(newSettings={"power": 0.0, "powerDensity": 9e4})
self.o, cs = getSimpleDBOperator(cs)
self.r = self.o.r
self.stateRetainer = self.r.retainState().__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
self.stateRetainer.__exit__()
def test_writeSystemAttributes(self):
"""Test the writeSystemAttributes method.
.. test:: Validate that we can directly write system attributes to a database file.
:id: T_ARMI_DB_QA0
:tests: R_ARMI_DB_QA
"""
with h5py.File("test_writeSystemAttributes.h5", "w") as h5:
Database.writeSystemAttributes(h5)
with h5py.File("test_writeSystemAttributes.h5", "r") as h5:
self.assertIn("user", h5.attrs)
self.assertIn("python", h5.attrs)
self.assertIn("armiLocation", h5.attrs)
self.assertIn("startTime", h5.attrs)
self.assertIn("machines", h5.attrs)
self.assertIn("platform", h5.attrs)
self.assertIn("hostname", h5.attrs)
self.assertIn("platformRelease", h5.attrs)
self.assertIn("platformVersion", h5.attrs)
self.assertIn("platformArch", h5.attrs)
def test_metaData_endSuccessfully(self):
"""Test databases have the correct metadata in them.
.. test:: Validate that databases have system attributes written to them during the usual workflow.
:id: T_ARMI_DB_QA1
:tests: R_ARMI_DB_QA
"""
# the power should start at zero
self.assertEqual(self.r.core.p.power, 0)
def goodMethod(cycle, node):
pass
self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, goodMethod))
with self.o:
self.o.operate()
self.assertEqual(0, self.r.p.cycle)
self.assertEqual(2, self.r.p.timeNode)
with h5py.File(self.o.cs.caseTitle + ".h5", "r") as h5:
self.assertTrue(h5.attrs["successfulCompletion"])
self.assertEqual(h5.attrs["version"], version)
self.assertIn("caseTitle", h5.attrs)
self.assertIn("settings", h5["inputs"])
self.assertIn("blueprints", h5["inputs"])
# validate system attributes
self.assertIn("user", h5.attrs)
self.assertIn("python", h5.attrs)
self.assertIn("armiLocation", h5.attrs)
self.assertIn("startTime", h5.attrs)
self.assertIn("machines", h5.attrs)
self.assertIn("platform", h5.attrs)
self.assertIn("hostname", h5.attrs)
self.assertIn("platformRelease", h5.attrs)
self.assertIn("platformVersion", h5.attrs)
self.assertIn("platformArch", h5.attrs)
# after operating, the power will be greater than zero
self.assertGreater(self.r.core.p.power, 1e9)
def test_metaDataEndFail(self):
def failMethod(cycle, node):
if cycle == 0 and node == 1:
raise Exception("forcing failure")
self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, failMethod))
with self.assertRaises(Exception):
with self.o:
self.o.operate()
self.assertEqual(0, self.r.p.cycle)
self.assertEqual(1, self.r.p.timeNode)
with h5py.File(self.o.cs.caseTitle + ".h5", "r") as h5:
self.assertFalse(h5.attrs["successfulCompletion"])
self.assertEqual(h5.attrs["version"], version)
self.assertIn("caseTitle", h5.attrs)
def test_getHistory(self):
expectedFluxes0 = {}
expectedFluxes7 = {}
def setFluxAwesome(cycle, node):
for bi, b in enumerate(self.r.core.iterBlocks()):
b.p.flux = 1e6 * bi + 1e3 * cycle + node
if bi == 0:
expectedFluxes0[cycle, node] = b.p.flux
if bi == 7:
expectedFluxes7[cycle, node] = b.p.flux
# use as attribute so it is accessible within getFluxAwesome
self.called = False
def getFluxAwesome(cycle, node):
if cycle != 0 or node != 2:
return
b0 = next(self.r.core.iterBlocks())
db = self.o.getInterface("database")._db
# we are now in cycle 1, node 2 ... AFTER setFluxAwesome, but BEFORE writeToDB
actualFluxes0 = db.getHistory(b0)["flux"]
self.assertEqual(expectedFluxes0, actualFluxes0)
self.called = True
self.o.interfaces.insert(0, MockInterface(self.o.r, self.o.cs, setFluxAwesome))
self.o.interfaces.insert(1, MockInterface(self.o.r, self.o.cs, getFluxAwesome))
with self.o:
self.o.operate()
self.assertTrue(self.called)
def test_getHistoryByLocation(self):
def setFluxAwesome(cycle, node):
for bi, b in enumerate(self.r.core.iterBlocks()):
b.p.flux = 1e6 * bi + 1e3 * cycle + node
def getFluxAwesome(cycle, node):
if cycle != 1 or node != 2:
return
b = next(self.r.core.iterBlocks())
db = self.o.getInterface("database").database
# we are now in cycle 1, node 2 ... AFTER setFluxAwesome
_fluxes = db.getHistory(b, params=["flux"])
self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, setFluxAwesome))
self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, getFluxAwesome))
with self.o:
self.o.operate()
with h5py.File(self.o.cs.caseTitle + ".h5", "r") as h5:
self.assertEqual(h5.attrs["version"], version)
class TestDatabaseReading(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.td = directoryChangers.TemporaryDirectoryChanger()
cls.td.__enter__()
# The database writes the settings object to the DB rather than the original input file.
# This allows settings to be changed in memory like this and survive for testing.
newSettings = {"verbosity": "extra"}
cls.nCycles = 2
newSettings["nCycles"] = cls.nCycles
newSettings["burnSteps"] = 2
o, r = loadTestReactor(
inputFilePath=TESTING_ROOT,
inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml",
customSettings=newSettings,
)
reduceTestReactorRings(r, o.cs, 3)
o.interfaces = [i for i in o.interfaces if isinstance(i, (DatabaseInterface))]
dbi = o.getInterface("database")
dbi.enabled(True)
dbi.initDB() # Main Interface normally does this
# update a few parameters
def writeFlux(cycle, node):
for bi, b in enumerate(o.r.core.iterBlocks()):
b.p.flux = 1e6 * bi + cycle * 100 + node
b.p.mgFlux = np.repeat(b.p.flux / 33, 33)
o.interfaces.insert(0, MockInterface(o.r, o.cs, writeFlux))
with o:
o.operate()
cls.cs = o.cs
cls.bp = o.r.blueprints
cls.dbName = o.cs.caseTitle + ".h5"
# needed for test_readWritten
cls.r = o.r
@classmethod
def tearDownClass(cls):
cls.td.__exit__(None, None, None)
del cls.r
cls.r = None
def _fullCoreSizeChecker(self, r):
self.assertEqual(r.core.numRings, 3)
self.assertEqual(r.p.cycle, 0)
self.assertEqual(len(r.core.assembliesByName), 19)
self.assertEqual(len(r.core.circularRingList), 0)
self.assertEqual(len(r.core.blocksByName), 57)
def test_loadReadOnly(self):
with Database(self.dbName, "r") as db:
r = db.loadReadOnly(0, 0)
# now show we can no longer edit those parameters
with self.assertRaises(RuntimeError):
r.core.p.keff = 0.99
b = r.core.getFirstBlock()
with self.assertRaises(RuntimeError):
b.p.power = 432.1
for c in b:
self.assertGreater(c.getVolume(), 0)
def test_growToFullCore(self):
with Database(self.dbName, "r") as db:
r = db.load(0, 0, allowMissing=True)
# test partial core values
self.assertEqual(r.core.numRings, 3)
self.assertEqual(r.p.cycle, 0)
self.assertEqual(len(r.core.assembliesByName), 7)
self.assertEqual(len(r.core.circularRingList), 0)
self.assertEqual(len(r.core.blocksByName), 21)
r.core.growToFullCore(None)
self._fullCoreSizeChecker(r)
def test_growToFullCoreWithCS(self):
with Database(self.dbName, "r") as db:
r = db.load(0, 0, allowMissing=True)
r.core.growToFullCore(self.cs)
self._fullCoreSizeChecker(r)
def test_growToFullCoreFromFactory(self):
from armi.bookkeeping.db import databaseFactory
db = databaseFactory(self.dbName, "r")
with db:
r = db.load(0, 0, allowMissing=True)
r.core.growToFullCore(None)
self._fullCoreSizeChecker(r)
def test_growToFullCoreFromFactoryWithCS(self):
from armi.bookkeeping.db import databaseFactory
db = databaseFactory(self.dbName, "r")
with db:
r = db.load(0, 0, allowMissing=True)
r.core.growToFullCore(self.cs)
self._fullCoreSizeChecker(r)
def test_readWritten(self):
with Database(self.dbName, "r") as db:
r2 = db.load(0, 0, self.cs)
for a1, a2 in zip(self.r.core, r2.core):
# assemblies assign a name based on assemNum at initialization
self.assertEqual(a1.name, a2.name)
assert_equal(a1.spatialLocator.indices, a2.spatialLocator.indices)
self.assertEqual(a1.p.assemNum, a2.p.assemNum)
self.assertEqual(a1.p.serialNum, a2.p.serialNum)
for b1, b2 in zip(a1, a2):
# blocks assign a name based on assemNum at initialization
self.assertEqual(b1.name, b2.name)
assert_equal(b1.spatialLocator.indices, b2.spatialLocator.indices)
self.assertEqual(b1.p.serialNum, b2.p.serialNum)
for c1, c2 in zip(sorted(b1), sorted(b2)):
self.assertEqual(c1.name, c2.name)
if isinstance(c1.spatialLocator, grids.MultiIndexLocation):
assert_equal(
np.array(c1.spatialLocator.indices),
np.array(c2.spatialLocator.indices),
)
else:
assert_equal(c1.spatialLocator.indices, c2.spatialLocator.indices)
self.assertEqual(c1.p.serialNum, c2.p.serialNum)
# volume is pretty difficult to get right. it relies upon linked dimensions
v1 = b1.getVolume()
v2 = b2.getVolume()
assert_allclose(v1, v2)
self.assertEqual(b1.p.serialNum, b2.p.serialNum)
self.assertEqual(
self.r.core.childrenByLocator[0, 0, 0].p.serialNum,
r2.core.childrenByLocator[0, 0, 0].p.serialNum,
)
def test_readWithoutInputs(self):
with Database(self.dbName, "r") as db:
r2 = db.load(0, 0)
for b1, b2 in zip(self.r.core.iterBlocks(), r2.core.iterBlocks()):
for c1, c2 in zip(sorted(b1), sorted(b2)):
self.assertEqual(c1.name, c2.name)
for bi, b in enumerate(r2.core.iterBlocks()):
assert_allclose(b.p.flux, 1e6 * bi)
def test_variousTypesWork(self):
with Database(self.dbName, "r") as db:
r2 = db.load(1, 1)
b1 = self.r.core.getFirstBlock(Flags.FUEL)
b2 = r2.core.getFirstBlock(Flags.FUEL)
self.assertIsInstance(b1.p.mgFlux, np.ndarray)
self.assertIsInstance(b2.p.mgFlux, np.ndarray)
assert_allclose(b1, b2)
c1 = b1.getComponent(Flags.FUEL)
c2 = b2.getComponent(Flags.FUEL)
for i, v1 in enumerate(c1.p.numberDensities):
self.assertAlmostEqual(v1, c2.p.numberDensities[i])
def test_timesteps(self):
with Database(self.dbName, "r") as db:
# build time steps in the DB file
timesteps = []
for cycle in range(self.nCycles):
for bStep in range(3):
timesteps.append(f"/c0{cycle}n0{bStep}")
timesteps.append("/c01n02EOL")
# verify the timesteps are correct, including the EOL
self.assertEqual(list(db.keys()), timesteps)
class TestBadName(unittest.TestCase):
def test_badDBName(self):
cs = settings.Settings(os.path.join(TEST_ROOT, "armiRun.yaml"))
cs = cs.modified(newSettings={"reloadDBName": "aRmIRuN.h5"})
dbi = DatabaseInterface(None, cs)
with self.assertRaises(ValueError):
# an error should be raised when the database loaded from
# has the same name as the run to avoid overwriting.
dbi.initDB()
class TestStandardFollowOn(unittest.TestCase):
"""Tests related to doing restart runs (loading from DB with Standard operator)."""
@classmethod
def setUpClass(cls):
cls.td = directoryChangers.TemporaryDirectoryChanger()
cls.td.__enter__()
# make DB to load from
o = cls._getOperatorThatChangesVariables(settings.Settings(os.path.join(TEST_ROOT, "armiRun.yaml")))
with o:
o.operate()
cls.FIRST_END_TIME = o.r.p.time
if cls.FIRST_END_TIME == 0:
# Can't use self.assertEqual in the class method but we still need this information
raise RuntimeError("Time should have advanced by the end of the run.")
cls.LOAD_DB_PATH = "loadFrom.h5"
os.rename("armiRun.h5", cls.LOAD_DB_PATH)
@classmethod
def tearDownClass(cls):
cls.td.__exit__(None, None, None)
@staticmethod
def _getOperatorThatChangesVariables(cs):
"""
Return an operator that advances time so that restart runs can be tested.
Notes
-----
Ensures that parameters are consistent between Standard runs and restart runs.
"""
o, cs = getSimpleDBOperator(cs)
mock = MockInterface(o.r, o.cs, None)
def interactEveryNode(self, cycle, node):
# Could use just += 1 but this will show more errors since it is less
# susceptible to cancellation of errors off by one.
self.r.p.time += self.r.p.timeNode + 1
# Magic to change the method only on this instance of the class.
mock.interactEveryNode = types.MethodType(interactEveryNode, mock)
# insert 1 before the database interface so that changes are written to db.
o.interfaces.insert(1, mock)
return o
def test_standardRestart(self):
o = self._getRestartOperator()
# the interact BOL has historically failed due to trying to write inputs
# which are already in the DB from the _mergeStandardRunDB call
with o:
o.operate()
self.assertEqual(
self.FIRST_END_TIME,
o.r.p.time,
"End time should have been the same for the restart run.\n"
"First end time: {},\nSecond End time: {}".format(self.FIRST_END_TIME, o.r.p.time),
)
def _getRestartOperator(self):
cs = settings.Settings(os.path.join(TEST_ROOT, "armiRun.yaml"))
newSettings = {}
newSettings["loadStyle"] = "fromDB"
newSettings["reloadDBName"] = self.LOAD_DB_PATH
newSettings["startCycle"] = 0
newSettings["startNode"] = 1
cs = cs.modified(newSettings=newSettings)
o = self._getOperatorThatChangesVariables(cs)
return o
================================================
FILE: armi/bookkeeping/db/tests/test_jaggedArray.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the JaggedArray class."""
import unittest
import h5py
import numpy as np
from armi.bookkeeping.db.jaggedArray import JaggedArray
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestJaggedArray(unittest.TestCase):
"""Tests for the JaggedArray class."""
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
def test_roundTrip(self):
"""Basic test that we handle Nones correctly in database read/writes."""
dataSet = [1, 2.0, None, [], [3, 4], (5, 6, 7), np.array([8, 9, 10, 11])]
self._compareRoundTrip(dataSet, "test-numbers")
def test_roundTripBool(self):
"""Basic test that we handle Nones correctly in database read/writes."""
dataSet = [True, True, [False, True, False]]
self._compareRoundTrip(dataSet, "test-bool")
def test_flatten(self):
"""Test the recursive flattening static method."""
testdata = [(1, 2), [3, 4, 5], [], None, 6, np.array([7, 8, 9])]
flatArray = JaggedArray.flatten(testdata)
self.assertEqual(flatArray, [1, 2, 3, 4, 5, None, 6, 7, 8, 9])
def test_backwardsCompatible(self):
"""
Test that the new JaggedArray can unpack the old database jagged data format.
The "old" database format contains shapes and offsets for locations that have None.
The "new" database format only contains shapes and offsets for non-None values.
The "new" unpacking routine is able to read either format.
"""
paramName = "test_old"
data = [[1, 2], None, [3, 4, 5], None, None, [6, 7, 8, 9]]
flattenedArray = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
shapes = [(2,), (0,), (3,), (0,), (0,), (4,)]
offsets = [0, 2, 2, 5, 5, 5, 5]
nones = [1, 3, 4]
h5file = "test_oldFormat.h5"
with h5py.File(h5file, "w") as hf:
dset = hf.create_dataset(
data=flattenedArray,
name=paramName,
)
dset.attrs["jagged"] = True
dset.attrs["offsets"] = offsets
dset.attrs["shapes"] = shapes
dset.attrs["noneLocations"] = nones
with h5py.File(h5file, "r") as hf:
dataset = hf[paramName]
values = dataset[()]
offsets = dataset.attrs["offsets"]
shapes = dataset.attrs["shapes"]
nones = dataset.attrs["noneLocations"]
roundTrip = JaggedArray.fromH5(
values,
offsets,
shapes,
nones,
dtype=flattenedArray.dtype,
paramName=paramName,
)
self._compareArrays(data, roundTrip)
def _compareRoundTrip(self, data, paramName):
"""Make sure that data is unchanged by packing/unpacking."""
jaggedArray = JaggedArray(data, paramName)
# write to HDF5
h5file = "test_jaggedArray.h5"
with h5py.File(h5file, "w") as hf:
dset = hf.create_dataset(
data=jaggedArray.flattenedArray,
name=jaggedArray.paramName,
)
dset.attrs["jagged"] = True
dset.attrs["offsets"] = jaggedArray.offsets
dset.attrs["shapes"] = jaggedArray.shapes
dset.attrs["noneLocations"] = jaggedArray.nones
with h5py.File(h5file, "r") as hf:
dataset = hf[paramName]
values = dataset[()]
offsets = dataset.attrs["offsets"]
shapes = dataset.attrs["shapes"]
nones = dataset.attrs["noneLocations"]
roundTrip = JaggedArray.fromH5(
values,
offsets,
shapes,
nones,
dtype=jaggedArray.flattenedArray.dtype,
paramName=paramName,
)
self._compareArrays(data, roundTrip)
def _compareArrays(self, ref, src):
"""
Compare two numpy arrays.
Comparing numpy arrays that may have unsavory data (NaNs, Nones, jagged
data, etc.) is really difficult. For now, convert to a list and compare
element-by-element.
Several types of data do not survive a round trip. The if-elif branch
here converts the initial data into the format expected to be produced
by the round trip. The conversions are:
- For scalar values (int, float, etc.), the data becomes a numpy
array with a dimension of 1 after the round trip.
- Tuples and lists become numpy arrays
- Empty lists become `None`
"""
self.assertEqual(type(src), JaggedArray)
if isinstance(ref, np.ndarray):
ref = ref.tolist()
src = src.tolist()
for v1, v2 in zip(ref, src):
# Entries may be None
if isinstance(v1, np.ndarray):
v1 = v1.tolist()
elif isinstance(v1, tuple):
v1 = list(v1)
elif isinstance(v1, int):
v1 = np.array([v1])
elif isinstance(v1, float):
v1 = np.array([v1], dtype=np.float64)
elif v1 is None:
pass
elif len(v1) == 0:
v1 = None
if isinstance(v2, np.ndarray):
v2 = v2.tolist()
self.assertEqual(v1, v2)
================================================
FILE: armi/bookkeeping/db/tests/test_layout.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the db Layout and associated tools."""
import os
import unittest
from armi import context
from armi.bookkeeping.db import database, layout
from armi.reactor import grids
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestLocationPacking(unittest.TestCase):
"""Tests for database location."""
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
def test_locationPacking(self):
loc1 = grids.IndexLocation(1, 2, 3, None)
loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None)
loc3 = grids.MultiIndexLocation(None)
loc3.append(grids.IndexLocation(7, 8, 9, None))
loc3.append(grids.IndexLocation(10, 11, 12, None))
locs = [loc1, loc2, loc3]
tp, data = layout._packLocations(locs)
self.assertEqual(tp[0], layout.LOC_INDEX)
self.assertEqual(tp[1], layout.LOC_COORD)
self.assertEqual(tp[2], layout.LOC_MULTI + "2")
unpackedData = layout._unpackLocations(tp, data)
self.assertEqual(unpackedData[0], (1, 2, 3))
self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0))
self.assertEqual(unpackedData[2], [(7, 8, 9), (10, 11, 12)])
def test_locationPackingOlderVersions(self):
for version in [1, 2]:
loc1 = grids.IndexLocation(1, 2, 3, None)
loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None)
loc3 = grids.MultiIndexLocation(None)
loc3.append(grids.IndexLocation(7, 8, 9, None))
loc3.append(grids.IndexLocation(10, 11, 12, None))
locs = [loc1, loc2, loc3]
tp, data = layout._packLocations(locs, minorVersion=version)
self.assertEqual(tp[0], "IndexLocation")
self.assertEqual(tp[1], "CoordinateLocation")
self.assertEqual(tp[2], "MultiIndexLocation")
unpackedData = layout._unpackLocations(tp, data, minorVersion=version)
self.assertEqual(unpackedData[0], (1, 2, 3))
self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0))
self.assertEqual(unpackedData[2][0].tolist(), [7, 8, 9])
self.assertEqual(unpackedData[2][1].tolist(), [10, 11, 12])
def test_locationPackingOldVersion(self):
version = 3
loc1 = grids.IndexLocation(1, 2, 3, None)
loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None)
loc3 = grids.MultiIndexLocation(None)
loc3.append(grids.IndexLocation(7, 8, 9, None))
loc3.append(grids.IndexLocation(10, 11, 12, None))
locs = [loc1, loc2, loc3]
tp, data = layout._packLocations(locs, minorVersion=version)
self.assertEqual(tp[0], "I")
self.assertEqual(tp[1], "C")
self.assertEqual(tp[2], "M:2")
unpackedData = layout._unpackLocations(tp, data, minorVersion=version)
self.assertEqual(unpackedData[0], (1, 2, 3))
self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0))
self.assertEqual(unpackedData[2][0], (7, 8, 9))
self.assertEqual(unpackedData[2][1], (10, 11, 12))
def test_close(self):
intendedFileName = "xyz.h5"
db = database.Database(intendedFileName, "w")
self.assertEqual(db._fileName, intendedFileName)
self.assertIsNone(db._fullPath) # this isn't set until the db is opened
db.open()
self.assertEqual(db._fullPath, os.path.join(context.getFastPath(), intendedFileName))
db.close() # this should move the file out of the FAST_PATH
self.assertEqual(db._fullPath, os.path.join(os.path.abspath("."), intendedFileName))
================================================
FILE: armi/bookkeeping/db/tests/test_passiveDBLoadPlugin.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides functionality for testing the PassiveDBLoadPlugin."""
import unittest
from copy import deepcopy
from io import StringIO
from ruamel.yaml import RoundTripLoader
from ruamel.yaml.nodes import MappingNode, ScalarNode
from armi import context, getApp
from armi.bookkeeping.db.passiveDBLoadPlugin import (
PassiveDBLoadPlugin,
PassThroughYamlize,
)
from armi.reactor.blocks import Block
class TestPassiveDBLoadPlugin(unittest.TestCase):
def setUp(self):
"""
Manipulate the standard App. We can't just configure our own, since the
pytest environment bleeds between tests.
"""
self.app = getApp()
self._backupApp = deepcopy(self.app)
self._cacheBPSections = PassiveDBLoadPlugin.SKIP_BP_SECTIONS
self._cacheUnkownParams = PassiveDBLoadPlugin.UNKNOWN_PARAMS
PassiveDBLoadPlugin.SKIP_BP_SECTIONS = []
PassiveDBLoadPlugin.UNKNOWN_PARAMS = {}
def tearDown(self):
"""Restore the App to its original state."""
import armi
armi._app = self._backupApp
context.APP_NAME = "armi"
PassiveDBLoadPlugin.SKIP_BP_SECTIONS = self._cacheBPSections
PassiveDBLoadPlugin.UNKNOWN_PARAMS = self._cacheUnkownParams
def test_passiveDBLoadPlugin(self):
plug = PassiveDBLoadPlugin()
# default case
bpSections = plug.defineBlueprintsSections()
self.assertEqual(len(bpSections), 0)
params = plug.defineParameters()
self.assertEqual(len(params), 0)
# non-empty cases
PassiveDBLoadPlugin.SKIP_BP_SECTIONS = ["hi", "mom"]
PassiveDBLoadPlugin.UNKNOWN_PARAMS = {Block: ["fake1", "fake2"]}
bpSections = plug.defineBlueprintsSections()
self.assertEqual(len(bpSections), 2)
self.assertTrue(type(bpSections[0]), tuple)
self.assertEqual(bpSections[0][0], "hi")
self.assertTrue(type(bpSections[1]), tuple)
self.assertEqual(bpSections[1][0], "mom")
params = plug.defineParameters()
self.assertEqual(len(params), 1)
self.assertIn(Block, params)
class TestPassThroughYamlize(unittest.TestCase):
def test_passThroughYamlizeExample1(self):
# create node from known BP-style YAML object
node = MappingNode(
"test_passThroughYamlizeExample1",
[
(
ScalarNode(tag="tag:yaml.org,2002:str", value="core-wide"),
MappingNode(
tag="tag:yaml.org,2002:map",
value=[
(
ScalarNode(
tag="tag:yaml.org,2002:str",
value="fuel axial expansion",
),
ScalarNode(tag="tag:yaml.org,2002:bool", value="False"),
),
(
ScalarNode(
tag="tag:yaml.org,2002:str",
value="grid plate radial expansion",
),
ScalarNode(tag="tag:yaml.org,2002:bool", value="True"),
),
],
),
)
],
)
# test that node is non-zero and has the "core-wide" section
self.assertEqual(node.value[0][0].value, "core-wide")
# pass the YAML string through the known YAML
pty = PassThroughYamlize()
loader = RoundTripLoader(StringIO(""))
_p = pty.from_yaml(loader, node)
# prove the section has been cleared
self.assertEqual(len(node.value), 0)
================================================
FILE: armi/bookkeeping/db/typedefs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Tuple
from armi.reactor.composites import ArmiObject
from armi.reactor.grids import LocationBase
# Return type for the getHistories() method
# param time node value
History = Dict[str, Dict[Tuple[int, int], Any]]
Histories = Dict[ArmiObject, History]
LocationHistories = Dict[LocationBase, History]
================================================
FILE: armi/bookkeeping/historyTracker.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The History Tracker is a bookkeeping interface that accesses and reports time-dependent state information from the
database.
At the end of a run, these write text files to show the histories for various follow-on mechanical analysis, fuel
performance analysis, etc.
Other interfaces may find this useful as well, to get an assembly history for fuel performance analysis, etc. This is
particularly useful in equilibrium runs, where the ``EqHistoryTrackerInterface`` will unravel the full history from a
single equilibrium cycle.
Getting history information
---------------------------
Loop over blocks, keys, and timesteps of interest and use commands like this::
history.getBlockHistoryVal(armiBlock.getName(), key, ts)
Using the database-based history trackers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can pre-load information before gathering it to get much better performance::
history.preloadBlockHistoryVals(blockNames, historyKeys, timeSteps)
This is essential for performance when history information is going to be accessed in loops over assemblies or blocks.
Reading each param directly from the database individually in loops is paralyzingly slow.
Specifying parameters to add to the EOL history report
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To add state parameters to the list of things that get their history reported, you need to define an interface method
called `getHistoryParams`. It should return a list of block parameters that will become available. For example::
def getHistoryParams(self):
return ["flux", "percentBu"]
When you'd like to access history information, you need to grab the history interface. The history interfaces is present
by default in your interface stack. To get it, just call::
history = self.getInterface("history")
Now you can do a few things, such as::
# get some info about what's stored in the history
assemsWithHistory = history.getDetailAssemblies()
timeStepsAvailable = history.getTimeIndices()
# now go out and get some time-dependent block params:
fluxAtTimeStep3 = history.getBlockHistoryVal("B1003A", "flux", 3)
Specifying blocks and assemblies to track
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
See :ref:`detail-assems`.
"""
import traceback
from typing import TYPE_CHECKING, List
from armi import interfaces, operators, runLog
from armi.reactor import grids
from armi.reactor.flags import Flags
from armi.utils import tabulate
ORDER = 2 * interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.BOOKKEEPING
if TYPE_CHECKING:
from armi.reactor.assemblies import Assembly
from armi.reactor.blocks import Block
def describeInterfaces(cs):
"""Function for exposing interface(s) to other code."""
if cs["runType"] not in (operators.RunTypes.EQUILIBRIUM):
klass = HistoryTrackerInterface
return (klass, {})
return None
class HistoryTrackerInterface(interfaces.Interface):
"""
Makes reports of the state that individual assemblies encounter.
.. impl:: This interface allows users to retrieve run data from somewhere other
than the database.
:id: I_ARMI_HIST_TRACK
:implements: R_ARMI_HIST_TRACK
This is a special :py:class:`Interface ` that is designed to store assembly and cross
section data throughout time. This is done directly, with time-based lists of assembly data, and dictionaries of
cross- section data. Users turn this feature on or off using the ``"detailAllAssems"`` setting.
Notes
-----
This pre-dates the ARMI database system, and we would like to stop supporting this. Please do not find new uses for
this; use the databases.
Attributes
----------
detailAssemblyNames : list
List of detail assembly names in the reactor
time : list
list of reactor time in years
"""
name = "history"
DETAILED_ASSEMBLY_FLAGS = [Flags.FUEL, Flags.CONTROL]
def __init__(self, r, cs):
"""
HistoryTracker that uses the database to look up parameter history rather than storing them in memory.
Warning
-------
If the current timestep history is requested and the database has not yet been written this timestep, the
current value of the requested parameter is provided. It is possible that this is not the value that will be
written to the database during this time step since many interfaces that change parameters may interact between
this call and the database write.
"""
interfaces.Interface.__init__(self, r, cs)
self.detailAssemblyNames = []
self._preloadedBlockHistory = None
def interactBOL(self):
self.addDetailAssembliesBOL()
def interactBOC(self, cycle=None):
"""Look for any new assemblies that are asked for and add them to tracking."""
self.addDetailAssemsByAssemNums()
if self.cs["detailAllAssems"]:
self.addAllDetailedAssems()
def interactEOL(self):
"""Generate the history reports."""
self._writeDetailAssemblyHistories()
def addDetailAssembliesBOL(self):
"""Find and activate assemblies that the user requested detailed treatment of."""
if self.cs["detailAssemLocationsBOL"]:
for locLabel in self.cs["detailAssemLocationsBOL"]:
ring, pos, _axial = grids.locatorLabelToIndices(locLabel)
i, j = self.r.core.spatialGrid.getIndicesFromRingAndPos(ring, pos)
aLoc = self.r.core.spatialGrid[i, j, 0]
try:
a = self.r.core.childrenByLocator[aLoc]
except KeyError:
runLog.error(
f"Detail assembly in location {locLabel} (requested via `detailAssemLocationsBOL`) is not in "
"the core. Update settings."
)
raise
self.addDetailAssembly(a)
if self.cs["detailAllAssems"]:
self.addAllDetailedAssems()
# This also gets called at BOC but we still do it here for operators that do not call BOC.
self.addDetailAssemsByAssemNums()
def addAllDetailedAssems(self):
"""Add all assems who have the DETAILED_ASSEMBLY_FLAGS as detail assems."""
for a in self.r.core:
if a.hasFlags(self.DETAILED_ASSEMBLY_FLAGS):
self.addDetailAssembly(a)
def addDetailAssemsByAssemNums(self):
"""
Activate detail assemblies from input based on assembly number.
This is used to activate detail assembly tracking on assemblies that are not present in the core at BOL.
See Also
--------
addDetailAssembliesBOL : Similar but for BOL
"""
detailAssemNums = self.cs["detailAssemNums"]
if not detailAssemNums:
return
for a in self.r.core:
thisNum = a.getNum()
# check for new detail assemblies
if thisNum in detailAssemNums:
self.addDetailAssembly(a)
def _writeDetailAssemblyHistories(self):
"""Write data file with assembly histories."""
detailAssems = self.getDetailAssemblies()
if len(detailAssems) == 0:
return
allBlockHistories = self.getAssemHistories(detailAssems)
dbi = self.getInterface("database")
locHistory = dbi.getHistories(detailAssems, ["location"])
assemLocations = {a: locHistory[a]["location"] for a in detailAssems}
self.writeAssemHistories(detailAssems, allBlockHistories, assemLocations)
def _getAssemHistoryFileName(self, assem):
return self._getHistoryFileName(assem.getName(), "a")
def _getHistoryFileName(self, label, letter):
return f"{self.cs.caseTitle}-{label}-{letter}Hist.txt"
def getTrackedParams(self):
"""Give the list of block parameters that are being tracked."""
trackedParams = {"residence", "ztop", "zbottom"}
# loop through interfaces to allow them to add custom params.
for i in self.o.getInterfaces():
for newParam in i.getHistoryParams():
if newParam not in trackedParams:
trackedParams.add(newParam)
return sorted(trackedParams)
def addDetailAssembly(self, a: "Assembly"):
"""Track the name of assemblies that are flagged for detailed treatment."""
aName = a.getName()
if aName not in self.detailAssemblyNames:
self.detailAssemblyNames.append(aName)
def getDetailAssemblies(self) -> list["Assembly"]:
"""Returns the assemblies that have been signaled as detail assemblies."""
assems = []
if not self.detailAssemblyNames:
runLog.info("No detail assemblies HistoryTrackerInterface")
for name in self.detailAssemblyNames:
try:
assems.append(self.r.core.getAssemblyByName(name))
except KeyError:
if name in {a.name for a in self.r.core}:
raise Exception("Found it")
runLog.warning(
"Cannot find detail assembly {} in assemblies-by-name lookup table, which has {} entries".format(
name, len(self.r.core.assembliesByName)
)
)
return assems
def getDetailBlocks(self) -> list["Block"]:
"""Get all blocks in all detail assemblies."""
return [block for a in self.getDetailAssemblies() for block in a]
def nonStationaryBlocks(self, a: "Assembly"):
return [b for b in a if not any(b.hasFlags(sbf) for sbf in self.r.core.stationaryBlockFlagsList)]
def getAssemHistories(self, assemList: List["Assembly"]):
"""Get the histories for all blocks in detailed assemblies."""
return self.getInterface("database").getHistories(
[b for a in assemList for b in self.nonStationaryBlocks(a)],
self.getTrackedParams(),
)
def writeAssemHistories(self, detailAssems, allBlockHistories, assemLocations):
"""Write detailed assembly histories to text files."""
dbi = self.getInterface("database")
times = dbi.getHistory(self.r, ["time"])["time"]
params = self.getTrackedParams()
for a in detailAssems:
fName = self._getAssemHistoryFileName(a)
with open(fName, "w") as out:
# ts is a tuple, remove the spaces from the string representation so it is easy to load into a
# spreadsheet or whatever
headers = [str(ts).replace(" ", "") for ts in times.keys()]
out.write(
tabulate.tabulate(
data=(times.values(),),
headers=headers,
tableFmt="plain",
floatFmt="11.5E",
)
)
out.write("\n")
for param in params:
out.write("\n\nkey: {0}\n".format(param))
data = [allBlockHistories[b][param].values() for b in self.nonStationaryBlocks(a)]
out.write(tabulate.tabulate(data, tableFmt="plain", floatFmt="11.5E"))
out.write("\n")
# loc is a tuple, remove the spaces from the string representation so it is easy to load into a
# spreadsheet or whatever
location = [str(loc).replace(" ", "") for loc in assemLocations[a].values()]
out.write("\n\nkey: location\n")
out.write(tabulate.tabulate((location,), tableFmt="plain"))
out.write("\n\n\n")
headers = "EOL bottom top center".split()
data = [("", b.p.zbottom, b.p.ztop, b.p.z) for b in self.nonStationaryBlocks(a)]
out.write(tabulate.tabulate(data, headers=headers, tableFmt="plain", floatFmt="10.3f"))
out.write("\n\n\nAssembly info\n")
out.write(f"{a.getName()} {a.getType()}\n")
for b in self.nonStationaryBlocks(a):
out.write(f'"{b.getType()}" {b.p.xsType} {b.p.envGroup}\n')
def preloadBlockHistoryVals(self, names, keys, timesteps):
"""
Pre-load block data so it can be more quickly accessed in the future.
Notes
-----
Pre-loading has value because the database is organized in a fashion that is easy/inexpensive to look up data
for many of time steps simultaneously. These can then be stored and provided when the specific timestep is
requested. The method ``getBlockHistoryVal`` still looks at the database if the preloaded values don't have the
needed data, so the same results should be given if this method is not called.
"""
try:
dbi = self.getInterface("database")
blocks = [self.r.core.getBlockByName(name) for name in names]
# weird special stuff for loc, just leave it be.
keys = [key for key in keys if key != "loc"]
data = dbi.getHistories(blocks, keys, timesteps)
self._preloadedBlockHistory = data
except Exception:
# fails during the beginning of standard runs, but that's ok
runLog.info(f"Unable to pre-load block history values due to error:\n{traceback.format_exc()}")
self.unloadBlockHistoryVals()
def unloadBlockHistoryVals(self):
"""Remove all cached db reads."""
self._preloadedBlockHistory = None
def getBlockHistoryVal(self, name: str, paramName: str, ts: tuple[int, int]):
"""
Use the database interface to return the parameter values for the supplied block names, and timesteps.
Notes
-----
If the current timestep history is requested and the database has not yet been written this timestep, the
current value of the requested parameter is returned.
Parameters
----------
name
name of block
paramName
parameter keys of interest
ts
cycle and node from which to load data
Raises
------
KeyError
When param not found in database.
"""
block = self.r.core.getBlockByName(name)
if self._isCurrentTimeStep(ts) and not self._databaseHasDataForTimeStep(ts):
# Current timenode may not have been written to the DB. Use the current value in the param system. Works for
# fuel performance, for some params, e.g. burnup, dpa.
return block.p[paramName]
try:
val = self._preloadedBlockHistory[block][paramName][ts]
# not in preloaded or preloaded failed
except (TypeError, ValueError, KeyError, IndexError):
dbi = self.getInterface("database")
try:
data = dbi.database.getHistory(block, [paramName], [ts])
val = data[paramName][ts]
except KeyError:
runLog.error(f"No value in DB. param name: {paramName} requested index: {ts}")
raise
return val
def _isCurrentTimeStep(self, ts: tuple[int, int]) -> bool:
"""Return True if the timestep requested is the current time step."""
return ts == (self.r.p.cycle, self.r.p.timeNode)
def _databaseHasDataForTimeStep(self, ts) -> bool:
"""Return True if the database has data for the requested time step."""
dbi = self.getInterface("database")
return ts in dbi.database.genTimeSteps()
def getTimeSteps(self, a: "Assembly" = None) -> list[float]:
"""
Given a fuel assembly, return list of time steps values (in years) that are available.
Parameters
----------
a
A fuel assembly that has been designated a detail assem. If passed, only timesteps where this assembly is in
the core will be tracked.
Returns
-------
timeSteps
times in years that are available in the history
See Also
--------
getTimeIndices : gets indices where an assembly is in the core
"""
dbi = self.getInterface("database")
timeInYears = dbi.getHistory(self.r, ["time"])["time"]
# remove the time step info. Clients don't want it
timeInYears = [t[1] for t in timeInYears]
if a:
b = self._getBlockInAssembly(a)
ids = dbi.getHistory(["id"])["id"]
timeInYears = [time for time, ids in zip(timeInYears, ids) if b.p.id in ids]
return timeInYears
@staticmethod
def _getBlockInAssembly(a: "Assembly") -> "Block":
"""Get a representative fuel block from a fuel assembly."""
b = a.getFirstBlock(Flags.FUEL)
if not b:
runLog.error(f"Assembly {a} does not contain fuel")
for b in a:
runLog.error(f"Block {b}")
raise RuntimeError(
"A tracked assembly does not contain fuel and has caused this error, see the details in stdout."
)
return b
================================================
FILE: armi/bookkeeping/mainInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module performs some file manipulations, cleanups, state loads, etc.
It's a bit of a catch-all interface, and it's name is admittedly not very descriptive.
"""
import glob
import itertools
import os
import re
from armi import context, interfaces, runLog, utils
from armi.bookkeeping.db.database import Database
from armi.settings.fwSettings.globalSettings import (
CONF_COPY_FILES_FROM,
CONF_COPY_FILES_TO,
CONF_ZONE_DEFINITIONS,
CONF_ZONES_FILE,
)
from armi.utils import pathTools
from armi.utils.customExceptions import InputError
ORDER = interfaces.STACK_ORDER.PREPROCESSING
def describeInterfaces(_cs):
"""Function for exposing interface(s) to other code."""
return (MainInterface, {"reverseAtEOL": True})
class MainInterface(interfaces.Interface):
"""
Do some basic manipulations, calls, Instantiates the database.
Notes
-----
Interacts early so that the database is accessible as soon as possible in the run. The database
interfaces runs near the end of the interface stack, but the main interface interacts first.
"""
name = "main"
@staticmethod
def specifyInputs(cs):
return {CONF_ZONES_FILE: [cs[CONF_ZONES_FILE]]}
def interactBOL(self):
interfaces.Interface.interactBOL(self)
self._moveFiles()
def _moveFiles(self):
"""
At the start of each run, arbitrary lists of user-defined files can be copied around.
This logic is controlled by the settings ``copyFilesFrom`` & ``copyFilesTo``.
``copyFilesFrom`` :
- List of files to copy (cannot be directories).
- Can be of length zero (that just means no files will be copied).
- The file names listed can use the ``*`` glob syntax, to reference multiple files.
``copyFilesTo`` :
- List of directories to copy the files into.
- Can be of length zero; all files will be copied to the local dir.
- Can be of length one; all files will be copied to that dir.
- The only other valid length for this list _must_ be the same length as the "from" list.
Notes
-----
If a provided "from" file is missing, this method will silently pass over that. It will only
check if the length of the "from" and "to" lists are valid in the end.
"""
# handle a lot of asterisks and missing files
copyFilesFrom = [
filePath for possiblePath in self.cs[CONF_COPY_FILES_FROM] for filePath in glob.glob(possiblePath)
]
copyFilesTo = self.cs[CONF_COPY_FILES_TO]
if len(copyFilesTo) in (len(copyFilesFrom), 0, 1):
# if any files to copy, then use the first as the default, i.e. len() == 1,
# otherwise assume '.'
default = copyFilesTo[0] if any(copyFilesTo) else "."
for filename, dest in itertools.zip_longest(copyFilesFrom, copyFilesTo, fillvalue=default):
pathTools.copyOrWarn(CONF_COPY_FILES_FROM, filename, dest)
else:
runLog.error(
f"cs['{CONF_COPY_FILES_TO}'] must either be length 0, 1, or have the same number "
f"of entries as cs['{CONF_COPY_FILES_FROM}']. Actual values:\n"
f" {CONF_COPY_FILES_TO} : {copyFilesTo}\n"
f" {CONF_COPY_FILES_FROM} : {copyFilesFrom}"
)
raise InputError(f"Failed to process {CONF_COPY_FILES_FROM}/{CONF_COPY_FILES_TO}")
def interactBOC(self, cycle=None):
"""Typically the first interface to interact beginning of cycle."""
runLog.important(f"Beginning of Cycle {cycle}")
runLog.LOG.clearSingleLogs()
if self.cs["rmExternalFilesAtBOC"]:
self.cleanLastCycleFiles()
def interactEveryNode(self, cycle, node):
"""Loads from db if necessary."""
if self.cs["loadStyle"] == "fromDB" and self.cs["loadFromDBEveryNode"]:
if cycle == 0 and node == 0:
# skip at BOL because interactBOL handled it.
pass
else:
with Database(self.cs["reloadDBName"], "r") as db:
r = db.load(cycle, node, self.cs)
self.o.reattach(r, self.cs)
if self.cs[CONF_ZONES_FILE] or self.cs[CONF_ZONE_DEFINITIONS]:
self.r.core.buildManualZones(self.cs)
def interactEOL(self):
if self.cs["rmExternalFilesAtEOL"]:
# successful run with rmExternalFilesAtEOL activated. Clean things up.
self.cleanARMIFiles()
runLog.warningReport()
def cleanARMIFiles(self):
"""
Delete temporary ARMI run files like simulation inputs/outputs.
Useful if running a clean job that doesn't require restarts.
"""
if context.MPI_RANK != 0:
# avoid inadvertently calling from worker nodes which could cause filesystem lockups.
raise ValueError("Only the master node is allowed to clean files here.")
runLog.important("Cleaning ARMI files due to rmExternalFilesAtEOL option")
for fileName in os.listdir(os.getcwd()):
# clean simulation inputs and outputs
for candidate in [".BCD", ".inp", ".out", "ISOTXS-"]:
if candidate in fileName:
if ".htos.out" in fileName:
continue
if "sassys.inp" in fileName:
continue
os.remove(fileName)
if re.search("ISO..F?$", fileName):
# clean intermediate XS
os.remove(fileName)
for snapText in self.cs["dumpSnapshot"]:
# snapText is a CCCNNN with C=cycle and N=node
cycle = int(snapText[0:3])
node = int(snapText[3:])
newFolder = "snapShot{0}_{1}".format(cycle, node)
utils.pathTools.cleanPath(newFolder, forceClean=True)
# delete database if it's SQLlite
# no need to delete because the database won't have copied it back if using fastpath.
# clean temp directories.
if os.path.exists("shuffleBranches"):
utils.pathTools.cleanPath("shuffleBranches")
# Potentially, wait for all the processes to catch up.
if os.path.exists("failedRuns"):
utils.pathTools.cleanPath("failedRuns")
def cleanLastCycleFiles(self):
"""Delete ARMI files from previous cycle that aren't necessary for the next cycle.
Unless you're doing reloads, of course.
"""
runLog.important("Cleaning ARMI files due to rmExternalFilesAtBOC option")
for fileName in os.listdir(os.getcwd()):
# clean MC**2 and REBUS inputs and outputs
for candidate in [".BCD", ".inp", ".out", "ISOTXS-"]:
if candidate in fileName:
# Do not remove .htos.out files.
if ".htos.out" in fileName:
continue
if re.search(r"mcc[A-Z0-9]+\.inp", fileName):
continue
# don't remove mccIA1.inp stuff in case we go out of a burnup bound.
try:
os.remove(fileName)
except OSError:
runLog.warning(
"Error removing file {0} during cleanup. It is still in use, probably".format(fileName)
)
================================================
FILE: armi/bookkeeping/memoryProfiler.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Interface to help diagnose memory issues during debugging/development.
There are many approaches to memory profiling.
1. You can ask psutil for the memory used by the process from an OS perspective.
This is great for top-down analysis. This module provides printouts
that show info from every process running. This is very fast.
2. You can use ``gc.get_objects()`` to list all objects that the garbage collector is tracking. If you want, you
can filter it down and get the counts and sizes of objects of interest (e.g. all armi objects).
This module has tools to do all of this. It should help you out.
NOTE: Psutil and sys.getsizeof will certainly report slightly different results.
NOTE: In Windows, it seems that even if your garbage is collected, Windows does not de-allocate all the memory.
So if you are a worker and you just got a 2GB reactor but then deleted it, Windows will keep you at 2GB for a while.
See Also
--------
https://pythonhosted.org/psutil/
https://docs.python.org/3/library/gc.html#gc.garbage
"""
import gc
import sys
from os import cpu_count
from typing import Optional
from armi import context, interfaces, mpiActions, runLog
from armi.reactor.composites import ArmiObject
from armi.utils import tabulate
from armi.utils.customExceptions import NonexistentSetting
try:
# psutil is an optional requirement, since it doesn't support MacOS very well
import psutil
_havePsutil = True
except ImportError:
runLog.warning("Failed to import psutil; MemoryProfiler will not provide meaningful data.")
_havePsutil = False
ORDER = interfaces.STACK_ORDER.POSTPROCESSING
REPORT_COUNT = 100000
def describeInterfaces(cs):
"""Function for exposing interface(s) to other code."""
return (MemoryProfiler, {})
def getTotalJobMemory(nTasks, cpusPerTask):
"""Function to calculate the total memory of a job. This is a constant during a simulation."""
cpuPerNode = cpu_count()
ramPerCpuGB = psutil.virtual_memory().total / (1024**3) / cpuPerNode
jobMem = nTasks * cpusPerTask * ramPerCpuGB
return jobMem
def getCurrentMemoryUsage():
"""This scavenges the memory profiler in ARMI to get the current memory usage."""
memUsageAction = PrintSystemMemoryUsageAction()
memUsageAction.broadcast()
smpu = SystemAndProcessMemoryUsage()
memUsages = memUsageAction.gather(smpu)
# Grab virtual memory instead of physical. There is a large discrepancy, we will be conservative
memoryUsageInMB = sum([mu.processVirtualMemoryInMB for mu in memUsages])
return memoryUsageInMB
class MemoryProfiler(interfaces.Interface):
name = "memoryProfiler"
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
self.sizes = {}
def interactBOL(self):
interfaces.Interface.interactBOL(self)
self.printCurrentMemoryState()
mpiAction = PrintSystemMemoryUsageAction()
mpiAction.broadcast().invoke(self.o, self.r, self.cs)
mpiAction.printUsage("BOL SYS_MEM")
# so we can debug mem profiler quickly
if self.cs["debugMem"]:
mpiAction = ProfileMemoryUsageAction("EveryNode")
mpiAction.broadcast().invoke(self.o, self.r, self.cs)
def interactEveryNode(self, cycle, node):
self.printCurrentMemoryState()
mp = PrintSystemMemoryUsageAction()
mp.broadcast()
mp.invoke(self.o, self.r, self.cs)
mp.printUsage("c{} n{} SYS_MEM".format(cycle, node))
self.r.core.p.minProcessMemoryInMB = round(mp.minProcessMemoryInMB * 10) / 10.0
self.r.core.p.maxProcessMemoryInMB = round(mp.maxProcessMemoryInMB * 10) / 10.0
if self.cs["debugMem"]:
mpiAction = ProfileMemoryUsageAction("EveryNode")
mpiAction.broadcast().invoke(self.o, self.r, self.cs)
def interactEOL(self):
"""End of life hook. Good place to wrap up or print out summary outputs."""
if self.cs["debugMem"]:
mpiAction = ProfileMemoryUsageAction("EOL")
mpiAction.broadcast().invoke(self.o, self.r, self.cs)
def printCurrentMemoryState(self):
"""Print the current memory footprint and available memory."""
try:
cpusPerTask = self.cs["cpusPerTask"]
except NonexistentSetting:
runLog.extra(
"To view memory consumed, remaining available, and total allocated for a case, "
"add the setting 'cpusPerTask' to your application."
)
return
nTasks = self.cs["nTasks"]
totalMemoryInGB = getTotalJobMemory(nTasks, cpusPerTask)
currentMemoryUsageInGB = getCurrentMemoryUsage() / 1024
availableMemoryInGB = totalMemoryInGB - currentMemoryUsageInGB
runLog.info(
f"Currently using {currentMemoryUsageInGB} GB of memory. "
f"There is {availableMemoryInGB} GB of memory left. "
f"There is a total allocation of {totalMemoryInGB} GB."
)
def displayMemoryUsage(self, timeDescription):
r"""
Print out some information to stdout about the memory usage of ARMI.
Useful when the debugMem setting is set to True.
Turn these on as appropriate to find all your problems.
"""
runLog.important("----- Memory Usage Report at {} -----".format(timeDescription))
self._printFullMemoryBreakdown(reportSize=self.cs["debugMemSize"])
self._reactorAssemblyTrackingBreakdown()
runLog.important("----- End Memory Usage Report at {} -----".format(timeDescription))
def _reactorAssemblyTrackingBreakdown(self):
runLog.important("Reactor attribute ArmiObject tracking count")
for attrName, attrObj in self.r.core.__dict__.items():
if not attrObj:
continue
if isinstance(attrObj, list) and isinstance(attrObj[0], ArmiObject):
runLog.important("List {:30s} has {:4d} ArmiObjects".format(attrName, len(attrObj)))
if isinstance(attrObj, dict) and isinstance(list(attrObj.values())[0], ArmiObject):
runLog.important("Dict {:30s} has {:4d} ArmiObjects".format(attrName, len(attrObj)))
if self.r.excore.get("sfp") is not None:
runLog.important("SFP has {:4d} ArmiObjects".format(len(self.r.excore["sfp"])))
def checkForDuplicateObjectsOnArmiModel(self, attrName, refObject):
"""Scans through ARMI model for duplicate objects."""
if self.r is None:
return
uniqueIds = set()
uniqueObjTypes = set()
def checkAttr(subObj):
if getattr(subObj, attrName, refObject) != refObject:
uniqueIds.add(id(getattr(subObj, attrName)))
uniqueObjTypes.add(subObj.__class__.__name__)
for a in self.r.core.getAssemblies(includeAll=True):
checkAttr(a)
for b in a:
checkAttr(b)
for c in b:
checkAttr(c)
checkAttr(c.material)
for i in self.o.getInterfaces():
checkAttr(i)
if i.name == "xsGroups":
for _, block in i.representativeBlocks.items():
checkAttr(block)
if len(uniqueIds) == 0:
runLog.important("There are no duplicate `.{}` attributes".format(attrName))
else:
runLog.error(
"There are {} unique objects stored as `.{}` attributes!\n"
"Expected id {}, but got {}.\nExpected object:{}\n"
"These types of objects had unique attributes: {}".format(
len(uniqueIds) + 1,
attrName,
id(refObject),
uniqueIds,
refObject,
", ".join(uniqueObjTypes),
)
)
raise RuntimeError
def _printFullMemoryBreakdown(self, reportSize=True, printReferrers=False):
"""
Looks for any class from any module in the garbage collector and prints their count and size.
Parameters
----------
reportSize : bool, optional
calculate size as well as counting individual objects.
Notes
-----
Just because you use startsWith=armi doesn't mean you'll capture all ARMI objects. Some are in lists
and dictionaries.
"""
cs = self.cs
operator = self.o
reactor = self.r
if reportSize:
self.o.detach()
gc.collect()
allObjects = gc.get_objects()
runLog.info("GC returned {} objects".format(len(allObjects)))
instanceCounters = KlassCounter(reportSize)
instanceCounters.countObjects(allObjects)
for counter in sorted(instanceCounters.counters.values()):
runLog.info(
"UNIQUE_INSTANCE_COUNT: {:60s} {:10d} {:10.1f} MB".format(
counter.classType.__name__,
counter.count,
counter.memSize / (1024**2.0),
)
)
if printReferrers and counter.memSize / (1024**2.0) > 100:
referrers = gc.get_referrers(counter.first)
runLog.info(" Referrers of first one: ")
for referrer in referrers:
runLog.info(" {}".format(repr(referrer)[:150]))
runLog.info("gc garbage: {}".format(gc.garbage))
if printReferrers:
# if you want more info on the garbage referrers, run this. WARNING, it's generally like 1000000 lines.
runLog.info("referrers")
for o in gc.garbage:
for r in gc.get_referrers(o):
runLog.info("ref for {}: {}".format(o, r))
if reportSize:
operator.reattach(reactor, cs)
@staticmethod
def getReferrers(obj):
"""Print referrers in a useful way (as opposed to gigabytes of text."""
runLog.info("Printing first 100 character of first 100 referrers")
for ref in gc.get_referrers(obj)[:100]:
runLog.important("ref for {}: {}".format(obj, repr(ref)[:100]))
class KlassCounter:
"""
Helper class, to allow us to count instances of various classes in the
Python standard library garbage collector (gc).
Counting can be done simply, or by memory footprint.
"""
def __init__(self, reportSize):
self.counters = dict()
self.reportSize = reportSize
self.count = 0
def __getitem__(self, classType):
if classType not in self.counters:
self.counters[classType] = InstanceCounter(classType, self.reportSize)
return self.counters[classType]
def countObjects(self, ao):
"""
Recursively find objects inside arbitrarily-deeply-nested containers.
This is designed to work with the garbage collector, so it focuses on
objects potentially being held in dict, tuple, list, or sets.
"""
counter = self[type(ao)]
if counter.add(ao):
self.count += 1
if self.count % REPORT_COUNT == 0:
runLog.info("Counted {} items".format(self.count))
if isinstance(ao, dict):
for k, v in ao.items():
self.countObjects(k)
self.countObjects(v)
elif isinstance(ao, (list, tuple, set)):
for v in iter(ao):
self.countObjects(v)
class InstanceCounter:
def __init__(self, classType, reportSize):
self.classType = classType
self.count = 0
self.reportSize = reportSize
if reportSize:
self.memSize = 0
else:
self.memSize = float("nan")
self.items = set()
self.ids = set()
self.first = None
def add(self, item):
itemId = id(item)
if itemId in self.ids:
return False
self.ids.add(itemId)
if self.reportSize:
self.memSize += sys.getsizeof(item)
self.count += 1
return True
def __cmp__(self, that):
return (self.count > that.count) - (self.count < that.count)
def __ls__(self, that):
return self.count < that.count
def __gt__(self, that):
return self.count > that.count
class ProfileMemoryUsageAction(mpiActions.MpiAction):
def __init__(self, timeDescription):
mpiActions.MpiAction.__init__(self)
self.timeDescription = timeDescription
def invokeHook(self):
mem = self.o.getInterface("memoryProfiler")
mem.displayMemoryUsage(self.timeDescription)
class SystemAndProcessMemoryUsage:
def __init__(self):
self.nodeName = context.MPI_NODENAME
self.percentNodeRamUsed: Optional[float] = None
self.processMemoryInMB: Optional[float] = None
self.processVirtualMemoryInMB: Optional[float] = None
# no psutil, no memory diagnostics
if _havePsutil:
self.percentNodeRamUsed = psutil.virtual_memory().percent
self.processMemoryInMB = psutil.Process().memory_info().rss / (1024.0**2)
self.processVirtualMemoryInMB = psutil.Process().memory_info().vms / (1024.0**2)
def __isub__(self, other):
if self.percentNodeRamUsed is not None and other.percentNodeRamUsed is not None:
self.percentNodeRamUsed -= other.percentNodeRamUsed
self.processMemoryInMB -= other.processMemoryInMB
self.processVirtualMemoryInMB -= other.processVirtualMemoryInMB
return self
class PrintSystemMemoryUsageAction(mpiActions.MpiAction):
def __init__(self):
mpiActions.MpiAction.__init__(self)
self.usages = []
self.percentNodeRamUsed: Optional[float] = None
def __iter__(self):
return iter(self.usages)
def __isub__(self, other):
if self.percentNodeRamUsed is not None and other.percentNodeRamUsed is not None:
self.percentNodeRamUsed -= other.percentNodeRamUsed
for mine, theirs in zip(self, other):
mine -= theirs
return self
@property
def minProcessMemoryInMB(self):
if len(self.usages) == 0:
return 0.0
return min(mu.processMemoryInMB or 0.0 for mu in self)
@property
def maxProcessMemoryInMB(self):
if len(self.usages) == 0:
return 0.0
return max(mu.processMemoryInMB or 0.0 for mu in self)
def invokeHook(self):
spmu = SystemAndProcessMemoryUsage()
self.percentNodeRamUsed = spmu.percentNodeRamUsed
self.usages = self.gather(spmu)
def printUsage(self, description=None):
"""This method prints the usage of all MPI nodes.
The printout looks something like:
SYS_MEM HOSTNAME 14.4% RAM. Proc mem (MB): 491 472 471 471 471 470
SYS_MEM HOSTNAME 13.9% RAM. Proc mem (MB): 474 473 472 471 460 461
SYS_MEM HOSTNAME ...
SYS_MEM HOSTNAME ...
"""
printedNodes = set()
prefix = description or "SYS_MEM"
memoryData = []
for memoryUsage in self:
if memoryUsage.nodeName in printedNodes:
continue
printedNodes.add(memoryUsage.nodeName)
nodeUsages = [mu for mu in self if mu.nodeName == memoryUsage.nodeName]
sysMemAvg = sum(mu.percentNodeRamUsed or 0.0 for mu in nodeUsages) / len(nodeUsages)
memoryData.append(
(
"{:<24}".format(memoryUsage.nodeName),
"{:5.1f}%".format(sysMemAvg),
"{}".format(" ".join("{:5.0f}".format(mu.processMemoryInMB or 0.0) for mu in nodeUsages)),
)
)
runLog.info(
"Summary of the system memory usage at `{}`:\n".format(prefix)
+ tabulate.tabulate(
memoryData,
headers=[
"Machine",
"Average System RAM Usage",
"Processor Memory Usage (MB)",
],
tableFmt="armi",
)
)
================================================
FILE: armi/bookkeeping/report/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for generating reports as printable groups and HTML in ARMI."""
from armi.bookkeeping.report import data
def setData(name, value, group=None, reports=None):
"""
Stores data in accordance with the specified parameters for use later.
Parameters
----------
name : str
value : Object
Any value desired.
group : data.Group
reports : data.Report
"""
from armi.bookkeeping.report.reportInterface import ReportInterface
if not name or not isinstance(name, str):
raise AttributeError(f"Given name {name} not acceptable.")
group = group or UNGROUPED
if not isinstance(group, data.Group):
raise AttributeError(f"Given group {group} not acceptable/approved.")
reports = reports or []
if not isinstance(reports, (list, set, tuple)):
reports = [reports]
if ALL not in reports:
reports.append(ALL)
if not all(isinstance(tag, data.Report) for tag in reports):
raise AttributeError(f"Unapproved reports for {name}")
for report in reports:
if report not in ReportInterface.reports:
ReportInterface.reports.add(report)
report.addToReport(group, name, value)
# --------------------------------------------
# GROUP DEFINITIONS
# --------------------------------------------
BLOCK_AREA_FRACS = data.Table(
"Assembly Area Fractions",
" Of First Fuel Block",
header=["Component", "Area (cm^2)", "Fraction"],
)
BOND_DIMS = data.Table("Bond Dimensions", " Of First Fuel Block")
CASE_CONTROLS = data.Table("Case Controls")
CASE_PARAMETERS = data.Table("Case Parameters")
CLAD_DIMS = data.Table("Cladding Dimensions", " Of First Fuel Block")
COOLANT_DIMS = data.Table("Coolant Dimensions", " Of First Fuel Block")
DUCT_DIMS = data.Table("Duct Dimensions", " Of First Fuel Block")
FUEL_DIMS = data.Table("Fuel Dimensions", " Of First Fuel Block")
GAP_DIMS = data.Table("Gap Dimensions", " Of First Fuel Block")
INTERCOOLANT_DIMS = data.Table("Intercoolant Dimensions", " Of First Fuel Block")
LINER_DIMS = data.Table("Liner Dimensions", " Of First Fuel Block")
NEUT_LOSS = data.Table("Neutron Loss")
NEUT_PROD = data.Table("Full Core Neutron Production", header=["", "n/s"])
PIN_ASSEM_DESIGN = data.Table("Pin/Assembly Design Summary (averages)")
RUN_META = data.Table("Run Meta")
UNGROUPED = data.Table("Ungrouped", "No grouping specified for the following information.")
WIRE_DIMS = data.Table("Wire Dimensions", " Of First Fuel Block")
# -----------------------------------------
ASSEM_TYPES = data.Image(
"Assembly Types",
"The axial block and enrichment distributions of assemblies in the core at "
"beginning of life. The percentage represents the block enrichment (U-235 or B-10), where as "
"the additional character represents the cross section id of the block. "
"The number of fine-mesh subdivisions are provided on the secondary y-axis.",
)
FACE_MAP = data.Image("Reactor Face Map", "The surface map of the reactor.")
FLUX_PLOT = data.Image("Plot of flux", "flux plot")
KEFF_PLOT = data.Image("Plot of K-Effective vs. Time", "k-eff vs. time")
MOVES_PLOT = data.Image("Plot of Moves vs. Time", "moves vs. time")
TIME_PLOT = data.Image("Plot of Value vs. Time", "value vs. time")
TIMELINE = data.Image("Timeline", "Time occupied by certain method invocations in run")
XS_PLOT = data.Image("Plot of Xs vs. Time", "xs vs. time")
# --------------------------------------------
# REPORT DEFINITIONS
# --------------------------------------------
ALL = data.Report(
"Comprehensive Core Report",
"Every piece of reported information about the ARMI run.",
)
DESIGN = data.Report("Core Design Report", "Information related to the core design parameters")
# --------------------------------------------
# FURTHER STYLIZATION
# --------------------------------------------
# have every report render these in the following order if present
data.Report.groupsOrderFirst = [
FACE_MAP,
RUN_META,
CASE_PARAMETERS,
CASE_CONTROLS,
ASSEM_TYPES,
]
# This a grouping of components which span the entire html page rather than being sectioned into
# smaller columns.
data.Report.componentWellGroups = [
FACE_MAP,
ASSEM_TYPES,
CLAD_DIMS,
WIRE_DIMS,
DUCT_DIMS,
COOLANT_DIMS,
INTERCOOLANT_DIMS,
FUEL_DIMS,
BOND_DIMS,
]
================================================
FILE: armi/bookkeeping/report/data.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data formats for reports."""
import collections
import copy
import re
from armi import runLog
class Report:
"""Storage for data separated out for a particular kind of user."""
# stubs for "further stylization" in the report package init
groupsOrderFirst = []
componentWellGroups = []
def __init__(self, title, description):
self.title = title
self.description = description
self.groups = {} # {Global Instance : Local Instance}
@property
def _groupRenderOrder(self):
"""Helper method to the rendering methods on this class for rendering order of contained info."""
presentGroupsOrderFirst = [group for group in self.groupsOrderFirst if group in self.groups]
completeGroupOrder = presentGroupsOrderFirst + [
group for group in self.groups.keys() if group not in presentGroupsOrderFirst
]
specialsRemovedOrder = [group for group in completeGroupOrder if group not in self.componentWellGroups]
return specialsRemovedOrder
def __str__(self):
str_ = "\n{} - (REPORT) {}\n".format(self.title, self.description)
for global_group in self.groups.values():
str_ += re.sub("\n", "\n\t", "{}".format(Group.__str__(global_group))) # Don't use subclassed methods
return str_
def addToReport(self, group, name, value):
"""Inserts the datum into the correct group of the report."""
if group not in self.groups:
self.groups[group] = copy.deepcopy(group)
self.groups[group][name] = value
def __getitem__(self, group):
try:
return self.groups[group]
except KeyError:
runLog.warning("Cannot locate group {} in report {}".format(group.title, self.title))
return None
class Group:
"""Abstract class, when extended is used for storage for data within a report.
Only accepts things wrapped in the ReportDatum class.
"""
def __init__(self, title, description=""):
self.title = title
self.description = description
self.data = collections.OrderedDict()
self.descStyle = "font-weight: normal; font-style: italic; font-size: 14px; padding-left: 5px;"
self.titleStyle = "font-weight: bold; padding-top: 20px;"
def __str__(self):
str_ = "\n{} - (GROUP) {}\n".format(self.title, self.description)
for name, value in self.data.items():
str_ += "\t{:<30} {}\n".format(name, value)
return str_
def __getitem__(self, name):
try:
return self.data[name]
except KeyError:
runLog.warning("Given name {} not present in report group {}".format(name, self.title))
return None
def __setitem__(self, name, value):
self.data[name] = value
class Table(Group):
def __init__(self, title, description="", header=None):
Group.__init__(self, title, description=description)
self.header = header
def __str__(self):
"""Truer to content representation."""
# error handling
if not len(self.data):
return ""
# set up
prototypical_data = list(self.data.values())[0]
num_cols = len(prototypical_data) + 1
border_dashes = "-" * (num_cols * 31) + "\n"
# create header
str_ = border_dashes
str_ += "{} - {}\n".format(self.title, self.description)
if self.header:
for column_title in self.header:
str_ += "{:<30} ".format(column_title)
str_ += "\n"
str_ += border_dashes
# create table body
for name, value in sorted(self.data.items(), key=self._lowerCaseSortForTuples):
str_ += "{:<30} ".format(name)
for item in value:
str_ += "{:<30} ".format(item)
str_ += "\n"
return str_
@staticmethod
def _lowerCaseSortForTuples(nameValPair):
"""Force the key in a key-value pair to lower case."""
return nameValPair[0].lower()
def __setitem__(self, name, value):
if not isinstance(value, list):
value = [value]
Group.__setitem__(self, name, value)
class Image(Group):
def __init__(self, title, description=""):
Group.__init__(self, title, description=description)
self._shortformTitle = title.replace(" ", "").lower()
================================================
FILE: armi/bookkeeping/report/reportInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This interface serves the reporting needs of ARMI.
If there is any information that a user desires to show in PDF form to
others this is the place to do it.
"""
import re
from armi import interfaces, runLog
from armi.bookkeeping import report
from armi.bookkeeping.report import reportingUtils
from armi.physics import neutronics
from armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE
from armi.reactor.flags import Flags
from armi.utils import reportPlotting, units
ORDER = interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.BOOKKEEPING
def describeInterfaces(cs):
"""Function for exposing interface(s) to other code."""
if cs["genReports"]:
return (ReportInterface, {})
return None
class ReportInterface(interfaces.Interface):
"""An interface to manage the use of the report system."""
name = "report"
reports = set()
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
self.fuelCycleSummary = {"bocFissile": 0.0}
def distributable(self):
"""Disables distributing of this report by broadcast MPI."""
return self.Distribute.SKIP
def interactBOL(self):
interfaces.Interface.interactBOL(self)
runLog.important("Beginning of BOL Reports")
reportingUtils.makeCoreAndAssemblyMaps(self.r, self.cs)
reportingUtils.writeAssemblyMassSummary(self.r)
if self.cs["summarizeAssemDesign"]:
reportingUtils.summarizePinDesign(self.r.core)
runLog.info(report.ALL[report.RUN_META])
def interactEveryNode(self, cycle, node):
self.r.core.calcBlockMaxes()
reportingUtils.summarizePowerPeaking(self.r.core)
runLog.important("Cycle {}, node {} Summary: ".format(cycle, node))
runLog.important(
" time= {0:8.2f} years, keff= {1:.12f} maxPD= {2:-8.2f} MW/m^2, maxBuI= {3:-8.4f} maxBuF= {4:8.4f}".format(
self.r.p.time,
self.r.core.p.keff,
self.r.core.p.maxPD,
self.r.core.p.maxBuI,
self.r.core.p.maxBuF,
)
)
if self.cs["plots"]:
adjoint = self.cs[CONF_NEUTRONICS_TYPE] == neutronics.ADJREAL_CALC
figName = self.cs.caseTitle + "_{0}_{1}".format(cycle, node) + ".mgFlux." + self.cs["outputFileExtension"]
if self.r.core.getFirstBlock(Flags.FUEL).p.mgFlux is not None:
from armi.reactor import blocks
blocks.Block.plotFlux(self.r.core, fName=figName, peak=True, adjoint=adjoint)
else:
runLog.warning("No mgFlux to plot in reports")
def interactBOC(self, cycle=None):
self.fuelCycleSummary["bocFissile"] = self.r.core.getTotalBlockParam("kgFis")
def interactEOC(self, cycle=None):
reportingUtils.writeCycleSummary(self.r.core)
runLog.info(self.o.timer.report(inclusionCutoff=0.001))
def generateDesignReport(self, generateFullCoreMap, showBlockAxMesh):
reportingUtils.makeCoreDesignReport(self.r.core, self.cs)
reportingUtils.makeCoreAndAssemblyMaps(self.r, self.cs, generateFullCoreMap, showBlockAxMesh)
reportingUtils.makeBlockDesignReport(self.r)
def interactEOL(self):
"""Adds the data to the report, and generates it."""
b = self.r.core.getFirstBlock(Flags.FUEL)
b.setAreaFractionsReport()
dbi = self.o.getInterface("database")
buGroups = self.cs["buGroups"]
history = self.o.getInterface("history")
reportPlotting.plotReactorPerformance(
self.r,
dbi,
buGroups,
extension=self.cs["outputFileExtension"],
history=history,
)
reportingUtils.setNeutronBalancesReport(self.r.core)
self.writeRunSummary()
self.o.timer.stopAll() # consider the run done
runLog.info(self.o.timer.report(inclusionCutoff=0.001, totalTime=True))
_timelinePlot = self.o.timer.timeline(self.cs.caseTitle, 0.03, totalTime=True)
runLog.info(self.printReports())
def printReports(self):
"""Report Interface Specific."""
str_ = ""
for report_ in self.reports:
str_ += re.sub("\n", "\n\t", "{}".format(report_))
return "---------- REPORTS BEGIN ----------\n" + str_ + "\n----------- REPORTS END -----------"
def writeRunSummary(self):
"""Make a summary of the run."""
# spent fuel pool report
if self.r.excore.get("sfp") is not None:
self.reportSFP(self.r.excore["sfp"])
self.countAssembliesSFP(self.r.excore["sfp"])
@staticmethod
def reportSFP(sfp):
"""A high-level summary of the Spent Fuel Pool."""
title = "SpentFuelPool Report"
runLog.important("-" * len(title))
runLog.important(title)
runLog.important("-" * len(title))
totFis = 0.0
for a in sfp:
runLog.important(
"{assembly:15s} discharged at t={dTime:10f} after {residence:10f} yrs. It entered at cycle: {cycle}. "
"It has {fiss:10f} kg (x {mult}) fissile and peak BU={bu:.2f} %.".format(
assembly=a,
dTime=a.p.dischargeTime,
residence=(a.p.dischargeTime - a.p.chargeTime),
cycle=a.p.chargeCycle,
fiss=a.getFissileMass(),
bu=a.getMaxParam("percentBu"),
mult=a.p.multiplicity,
)
)
totFis += a.getFissileMass() * a.p.multiplicity / 1000 # convert to kg
runLog.important("Total SFP fissile inventory of {0} is {1:.4E} MT".format(sfp, totFis / 1000.0))
@staticmethod
def countAssembliesSFP(sfp):
"""Report on the count of assemblies in the SFP at each timestep."""
if not len(sfp):
return
runLog.important("Count:")
totCount = 0
thisTimeCount = 0
a = sfp[0]
lastTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime
for a in sfp:
thisTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime
if thisTime != lastTime:
runLog.important(
"Number of assemblies moved at t={0:6.2f}: {1:04d}. Cumulative: {2:04d}".format(
lastTime, thisTimeCount, totCount
)
)
lastTime = thisTime
thisTimeCount = 0
totCount += 1 # noqa: SIM113
thisTimeCount += 1
================================================
FILE: armi/bookkeeping/report/reportingUtils.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of miscellaneous functions used by ReportInterface to generate various reports."""
import collections
import os
import pathlib
import re
import subprocess
import sys
import textwrap
import time
from copy import copy
import numpy as np
from armi import context, interfaces, runLog
from armi.bookkeeping import report
from armi.operators import RunTypes
from armi.reactor.components import ComponentType
from armi.reactor.flags import Flags
from armi.utils import (
getFileSHA1Hash,
iterables,
plotting,
tabulate,
textProcessors,
units,
)
# Set to prevent the image and text from being too small to read.
MAX_ASSEMS_PER_ASSEM_PLOT = 6
# String constants
Operator_CaseTitle = "Case Title:"
Operator_TypeOfRun = "Run Type:"
Operator_NumProcessors = "Number of Processors:"
Operator_WorkingDirectory = "Working Directory:"
Operator_CurrentUser = "Current User:"
Operator_PythonInterperter = "Python Interpreter:"
Operator_PythonExecutable = "Python Executable:"
Operator_ArmiCodebase = "ARMI Location:"
Operator_MasterMachine = "Master Machine:"
Operator_Date = "Date and Time:"
Operator_CaseDescription = "Case Description:"
def writeWelcomeHeaders(o, cs):
"""Write welcome information using the Operator and the Case Settings."""
def _writeCaseInformation(o, cs):
"""Create a table that contains basic case information."""
caseInfo = [
(Operator_CaseTitle, cs.caseTitle),
(
Operator_CaseDescription,
"{0}".format(textwrap.fill(cs["comment"], break_long_words=False)),
),
(
Operator_TypeOfRun,
"{} - {}".format(cs["runType"], o.__class__.__name__),
),
(Operator_CurrentUser, context.USER),
(Operator_ArmiCodebase, context.ROOT),
(Operator_WorkingDirectory, os.getcwd()),
(Operator_PythonInterperter, sys.version),
(Operator_PythonExecutable, sys.executable),
(Operator_MasterMachine, getNodeName()),
(Operator_NumProcessors, context.MPI_SIZE),
(Operator_Date, context.START_TIME),
]
runLog.header("=========== Case Information ===========")
runLog.info(tabulate.tabulate(caseInfo, tableFmt="armi"))
def _listInputFiles(cs):
"""
Gathers information about the input files of this case.
Returns
-------
inputInfo : list
(label, fileName, shaHash) tuples
"""
from armi.physics.neutronics.settings import CONF_LOADING_FILE
pathToLoading = pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE]
if pathToLoading.is_file():
if pathToLoading.suffix.lower() in (".h5", ".hdf5"):
# The blueprints are in a database, there aren't multiple included files
includedBlueprints = [pathToLoading]
else:
includedBlueprints = [inclusion[0] for inclusion in textProcessors.findYamlInclusions(pathToLoading)]
else:
includedBlueprints = []
inputInfo = []
inputFiles = [
(
"Case Settings",
os.path.basename(cs.path) if cs.path else cs.caseTitle + ".yaml",
), # This could be a YAML or an h5.
("Blueprints", cs[CONF_LOADING_FILE]),
] + [("Included blueprints", inclBp) for inclBp in includedBlueprints]
activeInterfaces = interfaces.getActiveInterfaceInfo(cs)
for klass, kwargs in activeInterfaces:
if not kwargs.get("enabled", True):
# Don't consider disabled interfaces
continue
interfaceFileNames = klass.specifyInputs(cs)
for label, fileNames in interfaceFileNames.items():
for fName in fileNames:
inputFiles.append((label, fName))
if cs["reloadDBName"] and cs["runType"] == RunTypes.SNAPSHOTS:
inputFiles.append(("Database", cs["reloadDBName"]))
for label, fName in inputFiles:
shaHash = "MISSING" if (not fName or not os.path.exists(fName)) else getFileSHA1Hash(fName, digits=10)
inputInfo.append((label, fName, shaHash))
# bonus: grab the files stored in the crossSectionControl section
for xsID, xsSetting in cs["crossSectionControl"].items():
fNames = []
# Users shouldn't ever have both of these defined, but this is not the place
# for code to fail if they do. Allow for both to not be None.
if xsSetting.xsFileLocation is not None:
# possibly a list of files
if isinstance(xsSetting.xsFileLocation, list):
fNames.extend(xsSetting.xsFileLocation)
else:
fNames.append(xsSetting.xsFileLocation)
if xsSetting.fluxFileLocation is not None:
# single file
fNames.append(xsSetting.fluxFileLocation)
for fName in fNames:
label = f"crossSectionControl-{xsID}"
if fName and os.path.exists(fName):
shaHash = getFileSHA1Hash(os.path.abspath(fName), digits=10)
inputInfo.append((label, fName, shaHash))
return inputInfo
def _writeInputFileInformation(cs):
"""Create a table that contains basic input file information."""
inputFileData = []
for label, fileName, shaHash in _listInputFiles(cs):
inputFileData.append((label, fileName, shaHash))
runLog.header("=========== Input File Information ===========")
runLog.info(
tabulate.tabulate(
inputFileData,
headers=["Input Type", "Path", "SHA-1 Hash"],
tableFmt="armi",
)
)
def _writeMachineInformation():
"""Create a table that contains basic machine and rank information."""
processorNames = context.MPI_NODENAMES
uniqueNames = set(processorNames)
nodeMappingData = []
sysInfo = ""
for uniqueName in uniqueNames:
matchingProcs = [str(rank) for rank, procName in enumerate(processorNames) if procName == uniqueName]
numProcessors = str(len(matchingProcs))
nodeMappingData.append((uniqueName, numProcessors, ", ".join(matchingProcs)))
sysInfo += getSystemInfo()
runLog.header("=========== Machine Information ===========")
runLog.info(
tabulate.tabulate(
nodeMappingData,
headers=["Machine", "Number of Processors", "Ranks"],
tableFmt="armi",
)
)
if sysInfo:
runLog.header("=========== System Information ===========")
runLog.info(sysInfo)
def _writeReactorCycleInformation(o, cs):
"""Verify that all the operating parameters are defined for the same number of cycles."""
operatingData = [
("Reactor Thermal Power (MW):", cs["power"] / units.WATTS_PER_MW),
("Number of Cycles:", cs["nCycles"]),
]
operatingParams = {
"Cycle Lengths:": o.cycleLengths,
"Availability Factors:": o.availabilityFactors,
"Power Fractions:": o.powerFractions,
"Step Lengths (days):": o.stepLengths,
}
for name, param in operatingParams.items():
paramStr = [str(p) for p in param]
operatingData.append((name, textwrap.fill(", ".join(paramStr))))
runLog.header("=========== Reactor Cycle Information ===========")
runLog.info(tabulate.tabulate(operatingData, tableFmt="armi"))
if context.MPI_RANK > 0:
return # prevent the worker nodes from printing the same thing
_writeCaseInformation(o, cs)
_writeInputFileInformation(cs)
_writeMachineInformation()
_writeReactorCycleInformation(o, cs)
def getNodeName():
"""Get the name of this compute node.
First, look in context.py. Then try various Linux tools. Then try Windows commands.
Returns
-------
str
Compute node name.
"""
hostNames = [
context.MPI_NODENAME,
context.MPI_NODENAMES[0],
subprocess.run("hostname", capture_output=True, text=True, shell=True).stdout,
subprocess.run("uname -n", capture_output=True, text=True, shell=True).stdout,
os.environ.get("COMPUTERNAME", context.LOCAL),
]
for nodeName in hostNames:
if nodeName and nodeName != context.LOCAL:
return nodeName
return context.LOCAL
def _getSystemInfoWindows():
"""Get system information, assuming the system is Windows.
Returns
-------
str
Basic system information: OS name, OS version, basic processor information
Examples
--------
Example results:
OS Name: Microsoft Windows 10 Enterprise
OS Version: 10.0.19041 N/A Build 19041
Processor(s): 1 Processor(s) Installed.
[01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~801 Mhz
"""
cmd = (
'systeminfo | findstr /B /C:"OS Name" /B /C:"OS Version" /B /C:"Processor" && systeminfo | findstr /E /C:"Mhz"'
)
return subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout
def _getSystemInfoMac():
"""Get system information, assuming the system is MacOS.
Returns
-------
str
Basic system information: OS name, OS version, basic processor information
Examples
--------
Example results:
System Software Overview:
System Version: macOS 12.1 (21C52)
Kernel Version: Darwin 21.2.0
...
Hardware Overview:
Model Name: MacBook Pro
...
"""
cmd = "system_profiler SPSoftwareDataType SPHardwareDataType"
return subprocess.check_output(cmd, shell=True).decode("utf-8")
def _getSystemInfoLinux():
"""Get system information, assuming the system is Linux.
This method uses multiple, redundant variations on common Linux command utilities to get the
information necessary. While it is not possible to guarantee what programs or files will be
available on "all Linux operating system", this collection of tools is widely supported and
should provide a reasonably broad-distribution coverage.
Returns
-------
str
Basic system information: OS name, OS version, basic processor information
Examples
--------
Example results:
OS Info: Ubuntu 22.04.3 LTS
Processor(s):
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 126
model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz
...
"""
# get OS name / version
linuxOsCommands = [
'cat /etc/os-release | grep "^PRETTY_NAME=" | cut -d = -f 2',
"uname -a",
"lsb_release -d | cut -d : -f 2",
'hostnamectl | grep "Operating System" | cut -d : -f 2',
]
osInfo = ""
for cmd in linuxOsCommands:
osInfo = subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout.strip()
if osInfo:
break
if not osInfo:
runLog.warning("Linux OS information not found.")
return ""
# get processor information
linuxProcCommands = ["lscpu", "cat /proc/cpuinfo", "lshw -class CPU"]
procInfo = ""
for cmd in linuxProcCommands:
procInfo = subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout
if procInfo:
break
if not procInfo:
runLog.warning("Linux processor information not found.")
return ""
# build output string
out = "OS Info: "
out += osInfo.strip()
out += "\nProcessor(s):\n "
out += procInfo.strip().replace("\n", "\n ")
out += "\n"
return out
def getSystemInfo():
"""Get system information, assuming the system is Linux, MacOS, and Windows.
Notes
-----
The format of the system information will be different on Linux, MacOS, and Windows.
Returns
-------
str
Basic system information: OS name, OS version, basic processor information
"""
# Get basic system information (on Linux, MacOS, and Windows)
if "darwin" in sys.platform:
return _getSystemInfoMac()
elif "win" in sys.platform:
return _getSystemInfoWindows()
elif "linux" in sys.platform:
return _getSystemInfoLinux()
else:
runLog.warning(
f"Cannot get system information for {sys.platform} because ARMI only "
+ "supports Linux, MacOS, and Windows."
)
return ""
def getInterfaceStackSummary(o):
data = []
for ii, i in enumerate(o.interfaces, start=1):
data.append(
(
"{:02d}".format(ii),
i.__class__.__name__.replace("Interface", ""),
i.name,
i.purpose,
"Yes" if i.enabled() else "No",
"Reversed" if i.reverseAtEOL else "Normal",
"Yes" if i.bolForce() else "No",
)
)
text = tabulate.tabulate(
data,
headers=(
"Index",
"Type",
"Name",
"Purpose",
"Enabled",
"EOL order",
"BOL forced",
),
tableFmt="armi",
)
text = text
return text
def writeTightCouplingConvergenceSummary(convergenceSummary):
runLog.info("Tight Coupling Convergence Summary")
runLog.info(tabulate.tabulate(convergenceSummary, headers="keys", showIndex=True, tableFmt="armi"))
def writeAssemblyMassSummary(r):
"""Print out things like Assembly weights to the runLog.
Parameters
----------
r : armi.reactor.reactors.Reactor
"""
massSum = []
for a in r.blueprints.assemblies.values():
mass = 0.0
hmMass = 0.0
fissileMass = 0.0
coolantMass = 0.0 # to calculate wet vs. dry weight.
types = []
for b in a:
# get masses in kg
# skip stationary blocks (grid plate doesn't count)
if b.hasFlags(Flags.GRID_PLATE):
continue
mass += b.getMass() / 1000.0
hmMass += b.getHMMass() / 1000.0
fissileMass += b.getFissileMass() / 1000.0
coolants = b.getComponents(Flags.COOLANT, exact=True) + b.getComponents(Flags.INTERCOOLANT, exact=True)
coolantMass += sum(coolant.getMass() for coolant in coolants) / 1000.0
blockType = b.getType()
if blockType not in types:
types.append(blockType)
# If the BOL fuel assem is in the center of the core, its area is 1/3 of the full area b/c
# its a sliced assem.
# count assemblies
core = r.core
thisTypeList = core.getChildrenOfType(a.getType())
count = 0
for t in thisTypeList:
ring, _pos = t.spatialLocator.getRingPos()
if ring == 1:
# only count center location once.
count += 1
else:
# add 3 if it's 1/3 core, etc.
count += core.powerMultiplier
# Get the dominant materials
pinMaterialKey = "pinMaterial"
pinMaterialObj = a.getDominantMaterial([Flags.FUEL, Flags.CONTROL])
if pinMaterialObj is None:
pinMaterialObj = a.getDominantMaterial()
pinMaterialKey = "dominantMaterial"
pinMaterial = pinMaterialObj.name
else:
pinMaterial = pinMaterialObj.name
struct = a.getDominantMaterial([Flags.CLAD, Flags.DUCT, Flags.SHIELD])
if struct:
structuralMaterial = struct.name
else:
structuralMaterial = "[None]"
cool = a.getDominantMaterial([Flags.COOLANT])
if cool:
coolantMaterial = cool.name
else:
coolantMaterial = "[None]"
# Get pins per assembly
pinsPerAssembly = 0
for candidate in (Flags.FUEL, Flags.CONTROL, Flags.SHIELD):
b = a.getFirstBlock(candidate)
if b:
pinsPerAssembly = b.getNumPins()
if pinsPerAssembly:
break
massSum.append(
{
"type": a.getType(),
"wetMass": mass,
"hmMass": hmMass,
"fissileMass": fissileMass,
"dryMass": mass - coolantMass,
"count": count,
"components": types,
pinMaterialKey: pinMaterial,
"structuralMaterial": structuralMaterial,
"coolantMaterial": coolantMaterial,
"pinsPerAssembly": pinsPerAssembly,
}
)
runLog.important(_makeBOLAssemblyMassSummary(massSum))
runLog.important(_makeTotalAssemblyMassSummary(massSum))
def _makeBOLAssemblyMassSummary(massSum):
str_ = ["--- BOL Assembly Mass Summary (kg) ---"]
dataLabels = ["wetMass", "dryMass", "fissileMass", "hmMass", "count"]
# print header for the printout of each assembly type
str_.append(" " * 12 + "".join(["{0:25s}".format(s["type"]) for s in massSum]))
for val in dataLabels:
line = ""
for s in massSum:
line += "{0:<25.3f}".format(s[val])
str_.append("{0:12s}{1}".format(val, line))
# print blocks in this assembly up to 10
for i in range(10):
line = " " * 12
for s in massSum:
try:
line += "{0:25s}".format(s["components"][i])
except IndexError:
line += " " * 25
if re.search(r"\S", line): # \S matches any non-whitespace character.
str_.append(line)
return "\n".join(str_)
def _makeTotalAssemblyMassSummary(massSum):
massLabels = ["wetMass", "dryMass", "fissileMass", "hmMass"]
totals = {}
count = 0
str_ = ["--Totals--"]
for label in massLabels:
totals[label] = 0.0
for assemSum in massSum:
totals[label] += assemSum[label] * assemSum["count"]
count += assemSum["count"]
str_.append("{0:12s} {1:.2f} MT".format(label, totals[label] / 1000.0))
str_.append("Total assembly count: {0}".format(count // len(massLabels)))
return "\n".join(str_)
def writeCycleSummary(core):
"""Prints a cycle summary to the runLog.
Parameters
----------
core: armi.reactor.reactors.Core
cs: armi.settings.caseSettings.Settings
"""
# Would io be worth considering for this?
cycle = core.r.p.cycle
str_ = []
runLog.important("Cycle {0} Summary:".format(cycle))
avgBu = core.calcAvgParam("percentBu", typeSpec=Flags.FUEL, generationNum=2)
str_.append("Core Average Burnup: {0}".format(avgBu))
str_.append("End of Cycle {0:02d}. Timestamp: {1} ".format(cycle, time.ctime()))
runLog.info("\n".join(str_))
def setNeutronBalancesReport(core):
"""Determines the various neutron balances over the full core.
Parameters
----------
core : armi.reactor.reactors.Core
"""
if not core.getFirstBlock().p.rateCap:
runLog.warning(
"No rate information (rateCap, rateAbs, etc.) available on the blocks. Skipping balance summary."
)
return
cap = core.calcAvgParam("rateCap", volumeAveraged=False, generationNum=2)
absorb = core.calcAvgParam("rateAbs", volumeAveraged=False, generationNum=2)
fis = core.calcAvgParam("rateFis", volumeAveraged=False, generationNum=2)
n2nProd = core.calcAvgParam("rateProdN2n", volumeAveraged=False, generationNum=2)
fisProd = core.calcAvgParam("rateProdFis", volumeAveraged=False, generationNum=2)
leak = n2nProd + fisProd - absorb
report.setData(
"Fission",
"{0:.5e} ({1:.2%})".format(fisProd, fisProd / (fisProd + n2nProd)),
report.NEUT_PROD,
)
report.setData(
"n, 2n",
"{0:.5e} ({1:.2%})".format(n2nProd, n2nProd / (fisProd + n2nProd)),
report.NEUT_PROD,
)
report.setData(
"Capture",
"{0:.5e} ({1:.2%})".format(cap, cap / (absorb + leak)),
report.NEUT_LOSS,
)
report.setData(
"Fission",
"{0:.5e} ({1:.2%})".format(fis, fis / (absorb + leak)),
report.NEUT_LOSS,
)
report.setData(
"Absorption",
"{0:.5e} ({1:.2%})".format(absorb, absorb / (absorb + leak)),
report.NEUT_LOSS,
)
report.setData(
"Leakage",
"{0:.5e} ({1:.2%})".format(leak, leak / (absorb + leak)),
report.NEUT_LOSS,
)
runLog.info(report.ALL[report.NEUT_PROD])
runLog.info(report.ALL[report.NEUT_LOSS])
def summarizePinDesign(core):
"""Prints out some information about the pin assembly/duct design.
Handles multiple types of dimensions simplistically by taking the average.
Parameters
----------
core : armi.reactor.reactors.Core
"""
designInfo = collections.defaultdict(list)
try:
for b in core.iterBlocks(Flags.FUEL):
fuel = b.getComponent(Flags.FUEL)
duct = b.getComponent(Flags.DUCT)
clad = b.getComponent(Flags.CLAD)
wire = b.getComponent(Flags.WIRE)
designInfo["hot sd"].append(b.getSmearDensity(cold=False))
designInfo["sd"].append(b.getSmearDensity())
designInfo["ductThick"].append(
(duct.getDimension("op") - duct.getDimension("ip")) * 5.0
) # convert to mm and divide by 2
designInfo["cladThick"].append((clad.getDimension("od") - clad.getDimension("id")) * 5.0)
pinOD = clad.getDimension("od") * 10.0
wireOD = wire.getDimension("od") * 10.0
pitch = pinOD + wireOD # pitch has half a wire on each side.
assemPitch = b.getPitch() * 10 # convert cm to mm.
designInfo["pinOD"].append(pinOD)
designInfo["wireOD"].append(wireOD)
designInfo["pin pitch"].append(pitch)
pinToDuctGap = b.getPinToDuctGap()
if pinToDuctGap is not None:
designInfo["pinToDuct"].append(b.getPinToDuctGap() * 10.0)
designInfo["assemPitch"].append(assemPitch)
designInfo["duct gap"].append(assemPitch - duct.getDimension("op") * 10.0)
designInfo["nPins"].append(b.p.nPins)
designInfo["zrFrac"].append(fuel.getMassFrac("ZR"))
# assumption made that all lists contain only numerical data
designInfo = {key: np.average(data) for key, data in designInfo.items()}
dimensionless = {"sd", "hot sd", "zrFrac", "nPins"}
for key, average_value in designInfo.items():
dim = "{0:10s}".format(key)
val = "{0:.4f}".format(average_value)
if key not in dimensionless:
val += " mm"
report.setData(dim, val, report.PIN_ASSEM_DESIGN)
a = core.refAssem
report.setData(
"Fuel Height (cm):",
"{0:.2f}".format(a.getHeight(Flags.FUEL)),
report.PIN_ASSEM_DESIGN,
)
report.setData(
"Plenum Height (cm):",
"{0:.2f}".format(a.getHeight(Flags.PLENUM)),
report.PIN_ASSEM_DESIGN,
)
runLog.info(report.ALL[report.PIN_ASSEM_DESIGN])
first_fuel_block = core.getFirstBlock(Flags.FUEL)
runLog.info("Design & component information for first fuel block {}".format(first_fuel_block))
runLog.info(first_fuel_block.setAreaFractionsReport())
for component_ in sorted(first_fuel_block):
runLog.info(component_.setDimensionReport())
except Exception as error:
runLog.warning("Pin summarization failed to work")
runLog.warning(error)
def summarizePowerPeaking(core):
"""Prints reactor Fz, Fxy, Fq.
Parameters
----------
core : armi.reactor.reactors.Core
"""
# Fz is the axial peaking of the highest power assembly
_maxPow, maxPowBlock = core.getMaxParam("power", returnObj=True, generationNum=2)
maxPowAssem = maxPowBlock.parent
avgPDens = maxPowAssem.calcAvgParam("pdens")
peakPDens = maxPowAssem.getMaxParam("pdens")
if not avgPDens:
# protect against divide-by-zero. Peaking doesn't make sense if there is no power
return
axPeakF = peakPDens / avgPDens
# Fxy is the radial peaking factor, looking at ALL assemblies with axially integrated powers.
power = 0.0
n = 0
for n, a in enumerate(core):
power += a.calcTotalParam("power", typeSpec=Flags.FUEL)
avgPow = power / (n + 1)
radPeakF = maxPowAssem.calcTotalParam("power", typeSpec=Flags.FUEL) / avgPow
runLog.important(
"Power Peaking: Fz= {0:.3f} Fxy= {1:.3f} Fq= {2:.3f}".format(axPeakF, radPeakF, axPeakF * radPeakF)
)
def makeCoreDesignReport(core, cs):
"""Builds report to summarize core design inputs.
Parameters
----------
core: armi.reactor.reactors.Core
cs: armi.settings.caseSettings.Settings
"""
coreDesignTable = report.data.Table("SUMMARY OF CORE: {}".format(cs.caseTitle.upper()))
coreDesignTable.header = ["", "Input Parameter"]
# Change the ordering of the core design table in the report relative to the other data
report.data.Report.groupsOrderFirst.insert(0, coreDesignTable)
report.data.Report.componentWellGroups.insert(0, coreDesignTable)
_setGeneralCoreDesignData(cs, coreDesignTable)
_setGeneralCoreParametersData(core, cs, coreDesignTable)
_setGeneralSimulationData(core, cs, coreDesignTable)
def _setGeneralCoreDesignData(cs, coreDesignTable):
from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC
from armi.physics.neutronics.settings import CONF_LOADING_FILE
report.setData("Case Title", "{}".format(cs.caseTitle), coreDesignTable, report.DESIGN)
report.setData("Run Type", "{}".format(cs["runType"]), coreDesignTable, report.DESIGN)
report.setData(
"Loading File",
"{}".format(cs[CONF_LOADING_FILE]),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Fuel Shuffling Logic File",
"{}".format(cs[CONF_SHUFFLE_LOGIC]),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Reactor State Loading",
"{}".format(cs["loadStyle"]),
coreDesignTable,
report.DESIGN,
)
if cs["loadStyle"] == "fromDB":
report.setData(
"Database File",
"{}".format(cs["reloadDBName"]),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Starting Cycle",
"{}".format(cs["startCycle"]),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Starting Node",
"{}".format(cs["startNode"]),
coreDesignTable,
report.DESIGN,
)
def _setGeneralCoreParametersData(core, cs, coreDesignTable):
blocks = core.getBlocks()
totalMass = sum(b.getMass() for b in blocks)
fissileMass = sum(b.getFissileMass() for b in blocks)
heavyMetalMass = sum(b.getHMMass() for b in blocks)
totalVolume = sum(b.getVolume() for b in blocks)
report.setData(" ", "", coreDesignTable, report.DESIGN)
report.setData(
"Core Power",
"{:.2f} MWth".format(cs["power"] / units.WATTS_PER_MW),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Base Capacity Factor",
"{}".format(cs["availabilityFactor"]),
coreDesignTable,
report.DESIGN,
) # note this doesn't consider availabilityFactors
report.setData(
"Cycle Length",
"{} days".format(cs["cycleLength"]),
coreDesignTable,
report.DESIGN,
) # note this doesn't consider cycleLengths
report.setData("Burnup Cycles", "{}".format(cs["nCycles"]), coreDesignTable, report.DESIGN)
report.setData(
"Burnup Steps per Cycle",
"{}".format(cs["burnSteps"]),
coreDesignTable,
report.DESIGN,
) # note this doesn't consider the detailed cycle input option
corePowerMult = int(core.powerMultiplier)
report.setData(
"Core Total Volume",
"{:.2f} cc".format(totalVolume * corePowerMult),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Core Fissile Mass",
"{:.2f} kg".format(fissileMass / units.G_PER_KG * corePowerMult),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Core Heavy Metal Mass",
"{:.2f} kg".format(heavyMetalMass / units.G_PER_KG * corePowerMult),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Core Total Mass",
"{:.2f} kg".format(totalMass / units.G_PER_KG * corePowerMult),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Number of Assembly Rings",
"{}".format(core.getNumRings()),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Number of Assemblies",
"{}".format(len(core.getAssemblies() * corePowerMult)),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Number of Fuel Assemblies",
"{}".format(len(core.getAssemblies(Flags.FUEL) * corePowerMult)),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Number of Control Assemblies",
"{}".format(len(core.getAssemblies(Flags.CONTROL) * corePowerMult)),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Number of Reflector Assemblies",
"{}".format(len(core.getAssemblies(Flags.REFLECTOR) * corePowerMult)),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Number of Shield Assemblies",
"{}".format(len(core.getAssemblies(Flags.SHIELD) * corePowerMult)),
coreDesignTable,
report.DESIGN,
)
def _setGeneralSimulationData(core, cs, coreDesignTable):
from armi.physics.neutronics.settings import CONF_GEN_XS, CONF_GLOBAL_FLUX_ACTIVE
report.setData(" ", "", coreDesignTable, report.DESIGN)
report.setData("Full Core Model", "{}".format(core.isFullCore), coreDesignTable, report.DESIGN)
report.setData(
"Tight Physics Coupling Enabled",
"{}".format(bool(cs["tightCoupling"])),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Lattice Physics Enabled for",
"{}".format(cs[CONF_GEN_XS]),
coreDesignTable,
report.DESIGN,
)
report.setData(
"Neutronics Enabled for",
"{}".format(cs[CONF_GLOBAL_FLUX_ACTIVE]),
coreDesignTable,
report.DESIGN,
)
def makeBlockDesignReport(r):
"""Summarize the block designs from the loading file.
Parameters
----------
r : armi.reactor.reactors.Reactor
"""
for bDesign in r.blueprints.blockDesigns:
loadingFileTable = report.data.Table("SUMMARY OF BLOCK: {}".format(bDesign.name))
loadingFileTable.header = ["", "Input Parameter"]
# Change the ordering of the loading file table in the report relative to the other data
report.data.Report.groupsOrderFirst.append(loadingFileTable)
report.data.Report.componentWellGroups.append(loadingFileTable)
report.setData("Number of Components", [len(bDesign)], loadingFileTable, report.DESIGN)
for i, cDesign in enumerate(bDesign):
cType = cDesign.name
componentSplitter = (i + 1) * " " + "\n"
report.setData(componentSplitter, [""], loadingFileTable, report.DESIGN)
dimensions = _getComponentInputDimensions(cDesign)
for label, values in dimensions.items():
value, unit = values
report.setData(
"{} {}".format(cType, label),
"{} {}".format(value, unit),
loadingFileTable,
report.DESIGN,
)
def _getComponentInputDimensions(cDesign):
"""Get the input dimensions of a component and place them in a dictionary with labels and units."""
dims = collections.OrderedDict()
dims["Shape"] = (cDesign.shape, "")
dims["Material"] = (cDesign.material, "")
dims["Cold Temperature"] = (cDesign.Tinput, "C")
dims["Hot Temperature"] = (cDesign.Thot, "C")
if cDesign.isotopics is not None:
dims["Custom Isotopics"] = (cDesign.isotopics, "")
for dimName in ComponentType.TYPES[cDesign.shape.lower()].DIMENSION_NAMES:
value = getattr(cDesign, dimName)
if value is not None:
# if not default, add it to the report
dims[dimName] = (getattr(cDesign, dimName).value, "cm")
return dims
def makeCoreAndAssemblyMaps(r, cs, generateFullCoreMap=False, showBlockAxMesh=True):
"""Create core and assembly design plots.
Parameters
----------
r : armi.reactor.reactors.Reactor
cs: armi.settings.caseSettings.Settings
generateFullCoreMap : bool, default False
showBlockAxMesh : bool, default True
"""
assems = []
blueprints = r.blueprints
for aKey in blueprints.assemDesigns.keys():
a = blueprints.constructAssem(cs, name=aKey)
# since we will be plotting cold input heights, we need to make sure that
# that these new assemblies have access to a blueprints somewhere up the
# composite chain. normally this would happen through an assembly's parent
# reactor, but because these newly created assemblies are in the load queue,
# they will not have a parent reactor. to get around this, we just attach
# the blueprints to the assembly directly.
a.blueprints = blueprints
assems.append(a)
core = r.core
for plotNum, assemBatch in enumerate(iterables.chunk(assems, MAX_ASSEMS_PER_ASSEM_PLOT), start=1):
assemPlotImage = copy(report.ASSEM_TYPES)
assemPlotImage.title = assemPlotImage.title + " ({})".format(plotNum)
report.data.Report.groupsOrderFirst.insert(-1, assemPlotImage)
report.data.Report.componentWellGroups.insert(-1, assemPlotImage)
assemPlotName = os.path.abspath(f"{core.name}AssemblyTypes{plotNum}.png")
plotting.plotAssemblyTypes(
assemBatch,
assemPlotName,
maxAssems=MAX_ASSEMS_PER_ASSEM_PLOT,
showBlockAxMesh=showBlockAxMesh,
hot=False,
)
# Create radial core map
if generateFullCoreMap:
core.growToFullCore(cs)
counts = {
assemDesign.name: len(core.getChildrenOfType(assemDesign.name)) for assemDesign in r.blueprints.assemDesigns
}
# assemDesigns.keys is ordered based on input, assemOrder only contains types that are in the core
assemOrder = [aType for aType in r.blueprints.assemDesigns.keys() if counts[aType] > 0]
data = [assemOrder.index(a.p.type) for a in core]
labels = [r.blueprints.assemDesigns[a.p.type].specifier for a in core]
legendMap = [
(
ai,
assemDesign.specifier,
"{} ({})".format(assemDesign.name, counts[assemDesign.name]),
)
for ai, assemDesign in enumerate(r.blueprints.assemDesigns)
if counts[assemDesign.name] > 0
]
fName = "".join([cs.caseTitle, "RadialCoreMap.", cs["outputFileExtension"]])
plotting.plotFaceMap(
core,
title="{} Radial Core Map".format(cs.caseTitle),
fName=fName,
cmapName="RdYlBu",
data=data,
labels=labels,
legendMap=legendMap,
axisEqual=True,
bare=True,
titleSize=10,
fontSize=8,
)
report.setData("Radial Core Map", os.path.abspath(fName), report.FACE_MAP, report.DESIGN)
COMPONENT_INFO = "Component Information"
================================================
FILE: armi/bookkeeping/report/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/bookkeeping/report/tests/test_report.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Really basic tests of the report Utils."""
import logging
import os
import subprocess
import sys
import unittest
from glob import glob
from unittest.mock import patch
from armi import runLog, settings
from armi.bookkeeping import report
from armi.bookkeeping.report import data, reportInterface
from armi.bookkeeping.report.reportingUtils import (
_getSystemInfoLinux,
_getSystemInfoMac,
_getSystemInfoWindows,
getNodeName,
getSystemInfo,
makeBlockDesignReport,
makeCoreDesignReport,
setNeutronBalancesReport,
summarizePinDesign,
summarizePowerPeaking,
writeAssemblyMassSummary,
writeCycleSummary,
writeWelcomeHeaders,
)
from armi.testing import loadTestReactor
from armi.tests import mockRunLogs
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class _MockReturnResult:
"""Mocking the subprocess.run() return object."""
def __init__(self, stdout):
self.stdout = stdout
class TestReportingUtils(unittest.TestCase):
def test_getSystemInfoLinux(self):
"""Test _getSystemInfoLinux() on any operating system, by mocking the system calls."""
osInfo = '"Ubuntu 22.04.3 LTS"'
procInfo = """processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 126
model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz
...
"""
correctResult = """OS Info: "Ubuntu 22.04.3 LTS"
Processor(s):
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 126
model name : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz
..."""
def __mockSubprocessRun(*args, **kwargs):
if "os-release" in args[0]:
return _MockReturnResult(osInfo)
else:
return _MockReturnResult(procInfo)
with patch.object(subprocess, "run", side_effect=__mockSubprocessRun):
out = _getSystemInfoLinux()
self.assertEqual(out.strip(), correctResult)
@patch("subprocess.run")
def test_getSystemInfoWindows(self, mockSubprocess):
"""Test _getSystemInfoWindows() on any operating system, by mocking the system call."""
windowsResult = """OS Name: Microsoft Windows 10 Enterprise
OS Version: 10.0.19041 N/A Build 19041
Processor(s): 1 Processor(s) Installed.
[01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~801 Mhz"""
mockSubprocess.return_value = _MockReturnResult(windowsResult)
out = _getSystemInfoWindows()
self.assertEqual(out, windowsResult)
@patch("subprocess.run")
def test_getSystemInfoMac(self, mockSubprocess):
"""Test _getSystemInfoMac() on any operating system, by mocking the system call."""
macResult = b"""System Software Overview:
System Version: macOS 12.1 (21C52)
Kernel Version: Darwin 21.2.0
...
Hardware Overview:
Model Name: MacBook Pro
..."""
mockSubprocess.return_value = _MockReturnResult(macResult)
out = _getSystemInfoMac()
self.assertEqual(out, macResult.decode("utf-8"))
def test_getSystemInfo(self):
"""Basic sanity check of getSystemInfo() running in the wild.
This test should pass if it is run on Window or mainstream Linux distros. But we expect this
to fail if the test is run on some other OS.
"""
if "darwin" in sys.platform:
# too complicated to test MacOS in this method
return
out = getSystemInfo()
substrings = ["OS ", "Processor(s):"]
for sstr in substrings:
self.assertIn(sstr, out)
self.assertGreater(len(out), sum(len(sstr) + 5 for sstr in substrings))
def test_getNodeName(self):
"""Test that the getNodeName() method returns a non-empty string.
It is hard to know what string SHOULD be return here, and it would depend on how the OS is
set up on your machine or cluster. But this simple test needs to pass as-is on Windows
and Linux.
"""
self.assertGreater(len(getNodeName()), 0)
class TestReport(unittest.TestCase):
def setUp(self):
self.test_group = data.Table(settings.Settings(), "banana")
def test_setData(self):
report.setData("banana_1", ["sundae", "plain"])
report.setData("banana_2", ["sundae", "vanilla"], self.test_group)
report.setData("banana_3", ["sundae", "chocolate"], self.test_group, [report.ALL])
with self.assertRaises(AttributeError):
report.setData("banana_4", ["sundae", "strawberry"], "no_workie", [report.ALL])
with self.assertRaises(AttributeError):
report.setData("banana_5", ["sundae", "peanut_butter"], self.test_group, "no_workie")
ungroup_instance = report.ALL[report.UNGROUPED]
self.assertEqual(ungroup_instance["banana_1"], ["sundae", "plain"])
filled_instance = report.ALL[self.test_group]
self.assertEqual(filled_instance["banana_2"], ["sundae", "vanilla"])
self.assertEqual(filled_instance["banana_3"], ["sundae", "chocolate"])
def test_getData(self):
# test the null case
self.assertIsNone(self.test_group["fake"])
# insert some data
self.test_group["banana_1"] = ["sundae", "plain"]
# validate we can pull that data back out again
data = self.test_group["banana_1"]
self.assertEqual(len(data), 2)
self.assertIn("sundae", data)
self.assertIn("plain", data)
def test_reactorSpecificReporting(self):
"""Test a number of reporting utils that require reactor/core information."""
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
# make sure makeCoreDesignReport() doesn't fail, though it won't generate an output here
makeCoreDesignReport(r.core, o.cs)
self.assertEqual(len(glob("*.html")), 0)
with mockRunLogs.BufferLog() as mock:
# we should start with a clean slate
self.assertEqual("", mock.getStdout())
runLog.LOG.startLog("test_reactorSpecificReporting")
runLog.LOG.setVerbosity(logging.INFO)
writeAssemblyMassSummary(r)
self.assertIn("BOL Assembly Mass Summary", mock.getStdout())
self.assertIn("igniter fuel", mock.getStdout())
mock.emptyStdout()
setNeutronBalancesReport(r.core)
self.assertIn("No rate information", mock.getStdout())
mock.emptyStdout()
r.core.getFirstBlock().p.rateCap = 1.0
r.core.getFirstBlock().p.rateProdFis = 1.02
r.core.getFirstBlock().p.rateFis = 1.01
r.core.getFirstBlock().p.rateAbs = 1.0
setNeutronBalancesReport(r.core)
self.assertIn("Fission", mock.getStdout())
self.assertIn("Capture", mock.getStdout())
self.assertIn("Absorption", mock.getStdout())
self.assertIn("Leakage", mock.getStdout())
mock.emptyStdout()
summarizePinDesign(r.core)
self.assertIn("Assembly Design Summary", mock.getStdout())
self.assertIn("Design & component information", mock.getStdout())
self.assertIn("Multiplicity", mock.getStdout())
mock.emptyStdout()
writeCycleSummary(r.core)
self.assertIn("Core Average", mock.getStdout())
self.assertIn("End of Cycle", mock.getStdout())
mock.emptyStdout()
# this report won't do much for the test reactor - improve test reactor
makeBlockDesignReport(r)
self.assertEqual(len(mock.getStdout()), 0)
mock.emptyStdout()
# this report won't do much for the test reactor - improve test reactor
summarizePowerPeaking(r.core)
self.assertEqual(len(mock.getStdout()), 0)
def test_writeWelcomeHeaders(self):
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
# grab this file path
randoFile = os.path.abspath(__file__)
# pass that random file into the settings
o.cs["crossSectionControl"]["DA"].xsFileLocation = randoFile
o.cs["crossSectionControl"]["DA"].fluxFileLocation = randoFile
with mockRunLogs.BufferLog() as mock:
# we should start with a clean slate
self.assertEqual("", mock.getStdout())
runLog.LOG.startLog("test_writeWelcomeHeaders")
runLog.LOG.setVerbosity(logging.INFO)
writeWelcomeHeaders(o, o.cs)
# assert our random file (and a lot of other stuff) is in the welcome
self.assertIn("Case Info", mock.getStdout())
self.assertIn("Input File Info", mock.getStdout())
self.assertIn("crossSectionControl-DA", mock.getStdout())
self.assertIn("Python Executable", mock.getStdout())
self.assertIn(randoFile, mock.getStdout())
class TestReportInterface(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.td = TemporaryDirectoryChanger()
cls.td.__enter__()
@classmethod
def tearDownClass(cls):
cls.td.__exit__(None, None, None)
def test_printReports(self):
"""Testing printReports method."""
repInt = reportInterface.ReportInterface(None, None)
rep = repInt.printReports()
self.assertIn("REPORTS BEGIN", rep)
self.assertIn("REPORTS END", rep)
def test_distributableReportInt(self):
repInt = reportInterface.ReportInterface(None, None)
self.assertEqual(repInt.distributable(), 4)
def test_interactBOLReportInt(self):
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
with mockRunLogs.BufferLog() as mock:
repInt.interactBOL()
self.assertIn("Writing assem layout", mock.getStdout())
self.assertIn("BOL Assembly", mock.getStdout())
self.assertIn("wetMass", mock.getStdout())
def test_interactEveryNode(self):
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
with mockRunLogs.BufferLog() as mock:
repInt.interactEveryNode(0, 0)
self.assertIn("Cycle 0", mock.getStdout())
self.assertIn("node 0", mock.getStdout())
self.assertIn("keff=", mock.getStdout())
def test_interactBOC(self):
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
self.assertEqual(repInt.fuelCycleSummary["bocFissile"], 0.0)
repInt.interactBOC(1)
self.assertAlmostEqual(repInt.fuelCycleSummary["bocFissile"], 4.290603409612653)
def test_interactEOC(self):
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
with mockRunLogs.BufferLog() as mock:
repInt.interactEOC(0)
self.assertIn("Cycle 0", mock.getStdout())
self.assertIn("TIMER REPORTS", mock.getStdout())
def test_interactEOL(self):
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
repInt = reportInterface.ReportInterface(r, o.cs)
with mockRunLogs.BufferLog() as mock:
repInt.interactEOL()
self.assertIn("Comprehensive Core Report", mock.getStdout())
self.assertIn("Assembly Area Fractions", mock.getStdout())
================================================
FILE: armi/bookkeeping/snapshotInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Controls points during a calculation where snapshots will be triggered, signaling more detailed treatments.
Snapshots are user-defined cycle/timenode points where something special is to be done.
What in particular is done is dependent on the case settings and the collection of active plugins
* At the very basic level,
third-party code input files are dumped out and stored in special snapshot folders at these times.
This can be useful when you are sharing third-party input files with another party (e.g. for review or
collaboration).
* You may want to run extra long-running physics simulations only at a few time points (e.g. BOL, EOL). This
is useful for detailed transient analysis, or other follow-on analysis.
Snapshots can be requested through the settings: ``dumpSnapshot`` and/or ``defaultSnapshots``.
"""
from armi import interfaces, operators, runLog
from armi.utils import getStepLengths
ORDER = interfaces.STACK_ORDER.POSTPROCESSING
def describeInterfaces(cs):
"""Function for exposing interface(s) to other code."""
return (SnapshotInterface, {})
class SnapshotInterface(interfaces.Interface):
"""
Snapshot managerial interface.
.. impl:: Save extra data to be saved from a run, at specified time nodes.
:id: I_ARMI_SNAPSHOT0
:implements: R_ARMI_SNAPSHOT
This is a special :py:class:`Interface ` that is
designed to run along all the other Interfaces during a simulation, to save off
important or helpful data. By default, this is designed to be used with the
``"defaultSnapshots"`` and ``""dumpSnapshot""`` settings. These settings were
added so users can control if snapshot data will be recorded during their run.
Broadly, this class is implemented to run the Operator method
:py:meth:`o.snapshotRequest `.
"""
name = "snapshot"
def interactBOL(self):
"""Active the default snapshots at BOL."""
interfaces.Interface.interactBOL(self)
if self.cs["defaultSnapshots"]:
self.activateDefaultSnapshots()
def interactEveryNode(self, cycle, node):
"""Call the snapshot interface to copy files at each node, if requested."""
snapText = getCycleNodeStamp(cycle, node) # CCCNNN
if self.cs["dumpSnapshot"] and snapText in self.cs["dumpSnapshot"]:
self.o.snapshotRequest(cycle, node)
def interactCoupled(self, iteration):
"""Call the snapshot interface to copy files for coupled iterations, if requested."""
snapText = getCycleNodeStamp(self.r.p.cycle, self.r.p.timeNode) # CCCNNN
if self.cs["dumpSnapshot"] and snapText in self.cs["dumpSnapshot"]:
self.o.snapshotRequest(self.r.p.cycle, self.r.p.timeNode, iteration)
def activateDefaultSnapshots(self):
"""Figure out and assign some default snapshots (BOL, MOL, EOL)."""
if self.cs["runType"] == operators.RunTypes.EQUILIBRIUM:
snapTimeCycleNodePairs = self._getSnapTimesEquilibrium()
else:
snapTimeCycleNodePairs = self._getSnapTimesNormal()
snapText = ["{0:03d}{1:03d}".format(c, n) for c, n in snapTimeCycleNodePairs]
# determine if there are new snapshots to add to the settings file
for snapT in snapText:
if snapT not in self.cs["dumpSnapshot"]:
runLog.info("Adding default snapshot {0} to snapshot queue.".format(snapT))
self.cs["dumpSnapshot"] = self.cs["dumpSnapshot"] + [snapT]
def _getSnapTimesEquilibrium(self):
"""Set BOEC, MOEC, EOEC snapshots."""
if not self.cs["eqToDatabaseOnlyWhenConverged"]:
raise ValueError("Cannot create default snapshots when `eqToDatabaseOnlyWhenConverged` setting is active")
return [(0, 0), (0, self.cs["burnSteps"] // 2), (0, self.cs["burnSteps"])]
def _getSnapTimesNormal(self):
try:
curCycle = self.r.p.cycle
except AttributeError:
# none has no attribute getParam (no reactor for whatever reason)
curCycle = 0
eolCycle = self.cs["nCycles"] - 1
molCycle = eolCycle // 2
bolCycle = 0
snapTimeCycleNodePairs = []
if bolCycle >= curCycle:
snapTimeCycleNodePairs.append([bolCycle, 0])
if molCycle >= curCycle:
snapTimeCycleNodePairs.append([molCycle, 0])
if eolCycle >= curCycle:
eolCycleLastNode = len(getStepLengths(self.cs)[-1])
snapTimeCycleNodePairs.append([eolCycle, eolCycleLastNode])
return snapTimeCycleNodePairs
def extractCycleNodeFromStamp(stamp):
"""
Returns cycle and node from a CCCNNN stamp.
See Also
--------
getCycleNodeStamp : the opposite
"""
cycle = int(stamp[:3])
node = int(stamp[3:])
return cycle, node
def getCycleNodeStamp(cycle, node):
"""
Returns a CCCNNN stamp for this cycle and node.
Useful for comparing the current cycle/node with requested snapshots in the settings
See Also
--------
isRequestedDetailPoint : compares a cycle,node to the dumpSnapshot list.
extractCycleNodeFromStamp : does the opposite
"""
return "{0:03d}{1:03d}".format(cycle, node)
================================================
FILE: armi/bookkeeping/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Bookkeeping test package.
This may seem a little bit over-engineered, but the jupyter notebooks that get run by
the test_historyTracker are also used in the documentation system, so providing a list
of related files from this package is useful. Also, these are organized like this to
prevent having to import the world just to get something like a list of strings.
"""
from armi.bookkeeping.tests._constants import * # noqa: F403
================================================
FILE: armi/bookkeeping/tests/_constants.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plain old data for the bookkeeping tests.
These are stored here so that they can be accessed from within this test package, but
also re-exported by `__init__.py`, so that other things (like the documentation system)
can use it without having to import the rest of ARMI.
"""
import os
from armi.testing import TESTING_ROOT
from armi.tests import TEST_ROOT
# These files are needed to run the data_model ipython notebook, which is done in
# test_historyTracker, and when building the docs.
TUTORIAL_FILES = [
os.path.join(TESTING_ROOT, "reactors", "anl-afci-177", "anl-afci-177-blueprints.yaml"),
os.path.join(TESTING_ROOT, "reactors", "anl-afci-177", "anl-afci-177-coreMap.yaml"),
os.path.join(TESTING_ROOT, "reactors", "anl-afci-177", "anl-afci-177-fuelManagement.py"),
os.path.join(TESTING_ROOT, "reactors", "anl-afci-177", "anl-afci-177.yaml"),
os.path.join(TEST_ROOT, "tutorials", "data_model.ipynb"),
]
================================================
FILE: armi/bookkeeping/tests/test_historyTracker.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the history tracker interface.
These tests actually run a jupyter notebook that is in the documentation to build a valid HDF5 file to load from as a
test fixtures. Thus they take a little longer than usual.
"""
import os
import shutil
import numpy as np
from armi import init as armi_init
from armi import settings, utils
from armi.reactor.flags import Flags
from armi.tests import TEST_ROOT, ArmiTestHelper
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__) # because tests do not run in this folder
TEST_FILE = os.path.join(TEST_ROOT, "smallestTestReactor", "armiRunSmallest.yaml")
class TestHistoryTracker(ArmiTestHelper):
"""History tracker tests that require a Reactor Model."""
@classmethod
def setUpClass(cls):
cls.dirChanger = TemporaryDirectoryChanger()
cls.dirChanger.__enter__()
# modify the input settings for our tests
dbPath = os.path.join(cls.dirChanger.destination, "armiRunSmallest.h5")
reloadPath = os.path.join(cls.dirChanger.destination, "armiRunSmallestReload.h5")
cs = settings.Settings(TEST_FILE)
newSettings = {}
newSettings["db"] = True
newSettings["nCycles"] = 1
newSettings["detailAssemLocationsBOL"] = ["001-001"]
newSettings["loadStyle"] = "fromDB"
newSettings["reloadDBName"] = reloadPath
newSettings["startNode"] = 1
newSettings["verbosity"] = "error"
cs = cs.modified(newSettings=newSettings)
# build the ARMI operator (and Reactor)
o = armi_init(fName=TEST_FILE, cs=cs)
def _setFakePower(core):
peakPower = 1e6
mgFluxBase = np.arange(5)
for a in core:
for b in a:
vol = b.getVolume()
fuelFlag = 10 if b.isFuel() else 1.0
b.p.power = peakPower * fuelFlag
b.p.pdens = b.p.power / vol
b.p.mgFlux = mgFluxBase * b.p.pdens
# put some test power values on the Reactor object
_setFakePower(o.r.core)
# write some data to the DB
dbi = o.getInterface("database")
dbi.initDB(fName=dbPath)
dbi.database.writeToDB(o.r)
o.r.p.timeNode += 1
dbi.database.writeToDB(o.r)
cls.o = o
cls.r = o.r
@classmethod
def tearDownClass(cls):
cls.dirChanger.__exit__(None, None, None)
try:
cls.o.getInterface("database").database.close()
except FileNotFoundError:
pass
cls.r = None
cls.o = None
def test_calcMGFluence(self):
"""
This test confirms that mg flux has many groups when loaded with the history tracker.
.. test:: Demonstrate that a parameter stored at differing time nodes can be recovered.
:id: T_ARMI_HIST_TRACK0
:tests: R_ARMI_HIST_TRACK
"""
o = self.o
b = o.r.core.childrenByLocator[o.r.core.spatialGrid[0, 0, 0]].getFirstBlock(Flags.FUEL)
bVolume = b.getVolume()
bName = b.name
# duration is None in this DB
hti = o.getInterface("history")
timesInYears = [duration or 1.0 for duration in hti.getTimeSteps()]
timeStepsToRead = [utils.getCycleNodeFromCumulativeNode(i, self.o.cs) for i in range(len(timesInYears))]
hti.preloadBlockHistoryVals([bName], ["mgFlux"], timeStepsToRead)
mgFluence = None
for ts, years in enumerate(timesInYears):
cycle, node = utils.getCycleNodeFromCumulativeNode(ts, self.o.cs)
mgFlux = hti.getBlockHistoryVal(bName, "mgFlux", (cycle, node))
mgFlux /= bVolume
timeInSec = years * 365 * 24 * 3600
if mgFluence is None:
mgFluence = timeInSec * mgFlux
else:
mgFluence += timeInSec * mgFlux
self.assertGreater(len(mgFluence), 1, "mgFluence should have more than 1 group")
# test that unloadBlockHistoryVals() is working
self.assertIsNotNone(hti._preloadedBlockHistory)
hti.unloadBlockHistoryVals()
self.assertIsNone(hti._preloadedBlockHistory)
def test_historyParameters(self):
"""Retrieve various parameters from the history.
.. test:: Demonstrate that various parameters stored at differing time nodes can be recovered.
:id: T_ARMI_HIST_TRACK1
:tests: R_ARMI_HIST_TRACK
"""
o = self.o
b = o.r.core.childrenByLocator[o.r.core.spatialGrid[0, 0, 0]].getFirstBlock(Flags.FUEL)
b.getVolume()
bName = b.name
# duration is None in this DB
hti = o.getInterface("history")
timesInYears = [duration or 1.0 for duration in hti.getTimeSteps()]
timeStepsToRead = [utils.getCycleNodeFromCumulativeNode(i, self.o.cs) for i in range(len(timesInYears))]
hti.preloadBlockHistoryVals([bName], ["power"], timeStepsToRead)
# read some parameters
params = {}
for param in ["height", "pdens", "power"]:
params[param] = []
for ts, years in enumerate(timesInYears):
cycle, node = utils.getCycleNodeFromCumulativeNode(ts, self.o.cs)
params[param].append(hti.getBlockHistoryVal(bName, param, (cycle, node)))
# verify the height parameter doesn't change over time
self.assertGreater(params["height"][0], 0)
self.assertEqual(params["height"][0], params["height"][1])
# verify the power parameter is retrievable from the history
refPower = 1000000.0
self.assertEqual(o.cs["power"], refPower)
self.assertAlmostEqual(params["power"][0], refPower * 10.0, delta=0.1)
# verify the power density parameter is retrievable from the history
refDens = 1636.4803548458785
self.assertAlmostEqual(params["pdens"][0], refDens, delta=0.001)
self.assertAlmostEqual(params["pdens"][0], params["pdens"][1])
# test that unloadBlockHistoryVals() is working
self.assertIsNotNone(hti._preloadedBlockHistory)
hti.unloadBlockHistoryVals()
self.assertIsNone(hti._preloadedBlockHistory)
def test_historyReport(self):
"""
Test generation of history report.
This does a swap for 5 timesteps::
| TS 0 1 2 3 4
|LOC (1,1) (2,1) (3,1) (4,1) SFP
"""
history = self.o.getInterface("history")
history.interactBOL()
history.interactEOL()
testLoc = self.o.r.core.spatialGrid[0, 0, 0]
testAssem = self.o.r.core.childrenByLocator[testLoc]
fileName = history._getAssemHistoryFileName(testAssem)
actualFilePath = os.path.join(THIS_DIR, fileName)
expectedFileName = os.path.join(THIS_DIR, fileName.replace(".txt", "-ref.txt"))
# copy from fast path so the file is retrievable.
shutil.move(fileName, os.path.join(THIS_DIR, fileName))
self.compareFilesLineByLine(expectedFileName, actualFilePath)
# test that detailAssemblyNames() is working
self.assertEqual(len(history.detailAssemblyNames), 1)
history.addAllDetailedAssems()
self.assertEqual(len(history.detailAssemblyNames), 1)
def test_getAssemHistories(self):
"""Get the histories for all blocks in detailed assemblies."""
history = self.o.getInterface("history")
history.interactBOL()
assemList = history.getDetailAssemblies()
params = history.getTrackedParams()
assemHistories = history.getAssemHistories(assemList)
for a in assemList:
for b in history.nonStationaryBlocks(a):
self.assertIn(b, assemHistories)
for param in params:
self.assertIn(param, assemHistories[b])
def test_getBlockInAssembly(self):
history = self.o.getInterface("history")
aFuel = self.o.r.core.getFirstAssembly(Flags.FUEL)
b = history._getBlockInAssembly(aFuel)
self.assertGreater(b.p.height, 1.0)
self.assertEqual(b.getType(), "fuel")
with self.assertRaises(AttributeError):
aShield = self.o.r.core.getFirstAssembly(Flags.SHIELD)
history._getBlockInAssembly(aShield)
================================================
FILE: armi/bookkeeping/tests/test_memoryProfiler.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for memoryProfiler."""
import logging
import unittest
from unittest.mock import MagicMock, patch
from armi import runLog
from armi.bookkeeping import memoryProfiler
from armi.bookkeeping.memoryProfiler import (
getCurrentMemoryUsage,
getTotalJobMemory,
)
from armi.reactor.tests import test_reactors
from armi.tests import TEST_ROOT, mockRunLogs
class TestMemoryProfiler(unittest.TestCase):
def setUp(self):
self.o, self.r = test_reactors.loadTestReactor(
TEST_ROOT,
{"debugMem": True},
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
self.memPro: memoryProfiler.MemoryProfiler = self.o.getInterface("memoryProfiler")
def tearDown(self):
self.o.removeInterface(self.memPro)
def test_fullBreakdown(self):
with mockRunLogs.BufferLog() as mock:
# we should start with a clean slate
self.assertEqual("", mock.getStdout())
runLog.LOG.startLog("test_fullBreakdown")
runLog.LOG.setVerbosity(logging.INFO)
# we should start at info level, and that should be working correctly
self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)
self.memPro._printFullMemoryBreakdown(reportSize=False)
# do some basic testing
self.assertTrue(mock.getStdout().count("UNIQUE_INSTANCE_COUNT") > 10)
self.assertIn("garbage", mock.getStdout())
def test_displayMemoryUsage(self):
with mockRunLogs.BufferLog() as mock:
# we should start with a clean slate
self.assertEqual("", mock.getStdout())
runLog.LOG.startLog("test_displayMemUsage")
runLog.LOG.setVerbosity(logging.INFO)
# we should start at info level, and that should be working correctly
self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)
self.memPro.displayMemoryUsage(1)
# do some basic testing
self.assertIn("End Memory Usage Report", mock.getStdout())
def test_printFullMemoryBreakdown(self):
with mockRunLogs.BufferLog() as mock:
# we should start with a clean slate
self.assertEqual("", mock.getStdout())
runLog.LOG.startLog("test_displayMemUsage")
runLog.LOG.setVerbosity(logging.INFO)
# we should start at info level, and that should be working correctly
self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)
self.memPro._printFullMemoryBreakdown(reportSize=True)
# do some basic testing
self.assertIn("UNIQUE_INSTANCE_COUNT", mock.getStdout())
self.assertIn(" MB", mock.getStdout())
def test_getReferrers(self):
with mockRunLogs.BufferLog() as mock:
# we should start with a clean slate
self.assertEqual("", mock.getStdout())
testName = "test_getReferrers"
runLog.LOG.startLog(testName)
runLog.LOG.setVerbosity(logging.DEBUG)
# grab the referrers
self.memPro.getReferrers(self.r)
memLog = mock.getStdout()
# test the results
self.assertGreater(memLog.count("ref for"), 10)
self.assertLess(memLog.count("ref for"), 50)
self.assertIn(testName, memLog)
self.assertIn("Reactor", memLog)
self.assertIn("core", memLog)
def test_checkForDuplicateObjectsOnArmiModel(self):
with mockRunLogs.BufferLog() as mock:
# we should start with a clean slate
self.assertEqual("", mock.getStdout())
testName = "test_checkForDuplicateObjectsOnArmiModel"
runLog.LOG.startLog(testName)
runLog.LOG.setVerbosity(logging.IMPORTANT)
# check for duplicates
with self.assertRaises(RuntimeError):
self.memPro.checkForDuplicateObjectsOnArmiModel("cs", self.r.core)
# validate the outputs are as we expect
self.assertIn("There are 2 unique objects stored as `.cs`", mock.getStdout())
self.assertIn("Expected id", mock.getStdout())
self.assertIn("Expected object", mock.getStdout())
self.assertIn("These types of objects", mock.getStdout())
self.assertIn("MemoryProfiler", mock.getStdout())
self.assertIn("MainInterface", mock.getStdout())
def test_profileMemoryUsageAction(self):
pmua = memoryProfiler.ProfileMemoryUsageAction("timeDesc")
self.assertEqual(pmua.timeDescription, "timeDesc")
@patch("psutil.virtual_memory")
@patch("armi.bookkeeping.memoryProfiler.cpu_count")
def test_getTotalJobMemory(self, mockCpuCount, mockVMem):
"""Use an example node with 50 GB of total physical memory and 10 CPUs."""
mockCpuCount.return_value = 10
vMem = MagicMock()
vMem.total = (1024**3) * 50
mockVMem.return_value = vMem
expectedArrangement = {
(10, 1): 50,
(1, 10): 50,
(2, 5): 50,
(3, 3): 45,
(4, 1): 20,
(2, 4): 40,
(5, 2): 50,
}
for compReq, jobMemory in expectedArrangement.items():
# compReq[0] is nTasks and compReq[1] is cpusPerTask
self.assertEqual(getTotalJobMemory(compReq[0], compReq[1]), jobMemory)
@patch("armi.bookkeeping.memoryProfiler.PrintSystemMemoryUsageAction")
@patch("armi.bookkeeping.memoryProfiler.SystemAndProcessMemoryUsage")
def test_getCurrentMemoryUsage(self, mockSysAndProcMemUse, mockPrintSysMemUseAction):
"""Mock the memory usage across 3 different processes and that the total usage is as expected (6 MB)."""
self._setMemUseMock(mockPrintSysMemUseAction)
self.assertAlmostEqual(getCurrentMemoryUsage(), 6 * 1024)
@patch("armi.bookkeeping.memoryProfiler.PrintSystemMemoryUsageAction")
@patch("armi.bookkeeping.memoryProfiler.SystemAndProcessMemoryUsage")
@patch("psutil.virtual_memory")
@patch("armi.bookkeeping.memoryProfiler.cpu_count")
def test_printCurrentMemoryState(self, mockCpuCount, mockVMem, mock1, mockPrintSysMemUseAction):
"""Use an example node with 50 GB of total physical memory and 10 CPUs while using 6 GB."""
mockCpuCount.return_value = 10
vMem = MagicMock()
vMem.total = (1024**3) * 50
mockVMem.return_value = vMem
self._setMemUseMock(mockPrintSysMemUseAction)
with mockRunLogs.BufferLog() as mockLogs:
self.memPro.cs = {"cpusPerTask": 1, "nTasks": 10}
self.memPro.printCurrentMemoryState()
stdOut = mockLogs.getStdout()
self.assertIn("Currently using 6.0 GB of memory.", stdOut)
self.assertIn("There is 44.0 GB of memory left.", stdOut)
self.assertIn("There is a total allocation of 50.0 GB", stdOut)
# Try another for funzies where we only use half the available resources on the node
mockLogs.emptyStdout()
self.memPro.cs = {"cpusPerTask": 5, "nTasks": 1}
self.memPro.printCurrentMemoryState()
stdOut = mockLogs.getStdout()
self.assertIn("Currently using 6.0 GB of memory.", stdOut)
self.assertIn("There is 19.0 GB of memory left.", stdOut)
self.assertIn("There is a total allocation of 25.0 GB", stdOut)
def test_printCurrentMemoryState_noSetting(self):
"""Test that the try/except works as it should."""
expectedStr = (
"To view memory consumed, remaining available, and total allocated for a case, "
"add the setting 'cpusPerTask' to your application."
)
with mockRunLogs.BufferLog() as mockLogs:
self.memPro.printCurrentMemoryState()
self.assertIn(expectedStr, mockLogs.getStdout())
def _setMemUseMock(self, mockPrintSysMemUseAction):
class mockMemUse:
def __init__(self, mem: float):
self.processVirtualMemoryInMB = mem
instance = mockPrintSysMemUseAction.return_value
instance.gather.return_value = [
mockMemUse(1 * 1024),
mockMemUse(2 * 1024),
mockMemUse(3 * 1024),
]
class KlassCounterTests(unittest.TestCase):
def get_containers(self):
container1 = [1, 2, 3, 4, 5, 6, 7, 2.0]
container2 = ("a", "b", container1, None)
container3 = {
"yo": container2,
"yo1": container1,
("t1", "t2"): True,
"yeah": [],
"nope": {},
}
return container3
def test_expandContainer(self):
container = self.get_containers()
counter = memoryProfiler.KlassCounter(False)
counter.countObjects(container)
self.assertEqual(counter.count, 24)
self.assertEqual(counter[list].count, 2)
self.assertEqual(counter[dict].count, 2)
self.assertEqual(counter[tuple].count, 2)
self.assertEqual(counter[int].count, 7)
def test_countHandlesRecursion(self):
container = self.get_containers()
container1 = container["yo1"]
container1.append(container1)
counter = memoryProfiler.KlassCounter(False)
counter.countObjects(container)
# despite it now being recursive ... we get the same counts
self.assertEqual(counter.count, 24)
self.assertEqual(counter[list].count, 2)
self.assertEqual(counter[dict].count, 2)
self.assertEqual(counter[tuple].count, 2)
self.assertEqual(counter[int].count, 7)
================================================
FILE: armi/bookkeeping/tests/test_snapshot.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Snapshots."""
import unittest
from unittest.mock import patch
from armi import settings
from armi.bookkeeping import snapshotInterface
from armi.operators.operator import Operator
class MockReactorParams:
def __init__(self):
self.cycle = 0
self.timeNode = 1
class MockReactor:
def __init__(self, cs):
self.p = MockReactorParams()
self.o = Operator(cs)
class TestSnapshotInterface(unittest.TestCase):
@classmethod
def setUpClass(self):
self.cs = settings.Settings()
def setUp(self):
self.cs.revertToDefaults()
self.si = snapshotInterface.SnapshotInterface(MockReactor(self.cs), self.cs)
@patch("armi.operators.operator.Operator.snapshotRequest")
def test_interactEveryNode(self, mockSnapshotRequest):
newSettings = {}
newSettings["dumpSnapshot"] = ["000001"]
self.si.cs = self.si.cs.modified(newSettings=newSettings)
self.si.interactEveryNode(0, 1)
self.assertTrue(mockSnapshotRequest.called)
@patch("armi.operators.operator.Operator.snapshotRequest")
def test_interactCoupled(self, mockSnapshotRequest):
newSettings = {}
newSettings["dumpSnapshot"] = ["000001"]
self.si.cs = self.si.cs.modified(newSettings=newSettings)
self.si.interactCoupled(2)
self.assertTrue(mockSnapshotRequest.called)
def test_activateDefSnapshots_30cyc2burns(self):
"""
Test snapshots for 30 cycles and 2 burnsteps, checking the dumpSnapshot setting.
.. test:: Allow extra data to be saved from a run, at specified time nodes.
:id: T_ARMI_SNAPSHOT0
:tests: R_ARMI_SNAPSHOT
"""
self.assertEqual([], self.cs["dumpSnapshot"])
newSettings = {}
newSettings["nCycles"] = 30
newSettings["burnSteps"] = 2
newSettings["cycleLength"] = 365
self.si.cs = self.si.cs.modified(newSettings=newSettings)
self.cs = self.si.cs
self.si.activateDefaultSnapshots()
self.assertEqual(["000000", "014000", "029002"], self.si.cs["dumpSnapshot"])
def test_activateDeftSnapshots_17cyc5surns(self):
"""
Test snapshots for 17 cycles and 5 burnsteps, checking the dumpSnapshot setting.
.. test:: Allow extra data to be saved from a run, at specified time nodes.
:id: T_ARMI_SNAPSHOT1
:tests: R_ARMI_SNAPSHOT
"""
self.assertEqual([], self.cs["dumpSnapshot"])
newSettings = {}
newSettings["nCycles"] = 17
newSettings["burnSteps"] = 5
newSettings["cycleLength"] = 365
self.si.cs = self.si.cs.modified(newSettings=newSettings)
self.cs = self.si.cs
self.si.activateDefaultSnapshots()
self.assertEqual(["000000", "008000", "016005"], self.si.cs["dumpSnapshot"])
================================================
FILE: armi/bookkeeping/visualization/__init__.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Visualization package contains functionality and entry points for producing files
amenable to visualization of ARMI run results.
This could theoretically support all sorts of visualization file formats, but for now,
only VTK files are supported. VTK was selected because it has wide support from vis
tools, while being a simple-enough format that quality pure-Python libraries exist to
produce them. Other formats (e.g., SILO) tend to require more system-dependent binary
dependencies, so optional support for them may be added later.
"""
from armi import plugins # noqa: F401
from armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint # noqa: F401
================================================
FILE: armi/bookkeeping/visualization/dumper.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract base class for visualization file dumpers."""
from abc import ABC, abstractmethod
from armi.reactor import reactors
class VisFileDumper(ABC):
@abstractmethod
def dumpState(self, r: reactors.Reactor):
"""Dump a single reactor state to the vis file."""
@abstractmethod
def __enter__(self):
"""Invoke initialize when entering a context manager."""
@abstractmethod
def __exit__(self, type, value, traceback):
"""Invoke initialize when entering a context manager."""
================================================
FILE: armi/bookkeeping/visualization/entryPoint.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point for producing visualization files."""
import pathlib
import re
import sys
from armi import runLog
from armi.cli import entryPoint
class VisFileEntryPoint(entryPoint.EntryPoint):
"""Create visualization files from database files."""
name = "vis-file"
description = "Convert ARMI databases in to visualization files"
_FORMAT_VTK = "vtk"
_FORMAT_XDMF = "xdmf"
_SUPPORTED_FORMATS = {_FORMAT_VTK, _FORMAT_XDMF}
def __init__(self):
entryPoint.EntryPoint.__init__(self)
def addOptions(self):
self.parser.add_argument("h5db", help="Input database path", type=str)
self.parser.add_argument(
"--output-name",
"-o",
help="Base name for output file(s). File extensions will be added as appropriate",
type=str,
default=None,
)
self.parser.add_argument(
"--format",
"-f",
help="Output format. Supported formats: `vtk` and `xdmf`",
default="vtk",
)
self.parser.add_argument(
"--nodes",
help="An optional list of time nodes to include. Should look like `(1,0)(1,1)(1,2)`, etc",
type=str,
default=None,
)
self.parser.add_argument(
"--max-node",
help="An optional (cycle,timeNode) tuple to specify the latest time step that should be included",
type=str,
default=None,
)
self.parser.add_argument(
"--min-node",
help="An optional (cycle,timeNode) tuple to specify the earliest time step that should be included",
type=str,
default=None,
)
def parse(self, args):
"""
Process user input.
Strings are parsed against some regular expressions and saved back to their
original locations in the ``self.args`` namespace for later use.
"""
entryPoint.EntryPoint.parse(self, args)
cycleNodePattern = r"\((\d+),(\d+)\)"
if self.args.nodes is not None:
self.args.nodes = [(int(cycle), int(node)) for cycle, node in re.findall(cycleNodePattern, self.args.nodes)]
if self.args.max_node is not None:
nodes = re.findall(cycleNodePattern, self.args.max_node)
if len(nodes) != 1:
runLog.error("Bad --max-node: `{}`. Should look like (c,n).".format(self.args.max_node))
sys.exit(1)
cycle, node = nodes[0]
self.args.max_node = (int(cycle), int(node))
if self.args.min_node is not None:
nodes = re.findall(cycleNodePattern, self.args.min_node)
if len(nodes) != 1:
runLog.error("Bad --min-node: `{}`. Should look like (c,n).".format(self.args.min_node))
sys.exit(1)
cycle, node = nodes[0]
self.args.min_node = (int(cycle), int(node))
if self.args.format not in self._SUPPORTED_FORMATS:
runLog.error(
"Requested format `{}` not among the supported options: {}".format(
self.args.format, self._SUPPORTED_FORMATS
)
)
sys.exit(1)
if self.args.output_name is None:
# infer name from input
inp = pathlib.Path(self.args.h5db)
self.args.output_name = inp.stem
def invoke(self):
# late imports so that we dont have to import the world to do anything
from armi.bookkeeping.db import databaseFactory
from armi.bookkeeping.visualization import vtk, xdmf
# a little baroque, but easy to extend with future formats
formatMap = {
self._FORMAT_VTK: vtk.VtkDumper,
self._FORMAT_XDMF: xdmf.XdmfDumper,
}
dumper = formatMap[self.args.format](self.args.output_name, self.args.h5db)
nodes = self.args.nodes
db = databaseFactory(self.args.h5db, "r")
with db:
dbNodes = list(db.genTimeSteps())
if nodes is not None and any(node not in dbNodes for node in nodes):
raise RuntimeError(
"Some of the requested nodes are not in the source database.\nRequested: {}\nPresent: {}".format(
nodes, dbNodes
)
)
with dumper:
for cycle, node in dbNodes:
if nodes is not None and (cycle, node) not in nodes:
continue
if self.args.min_node is not None and (cycle, node) < self.args.min_node:
continue
if self.args.max_node is not None and (cycle, node) > self.args.max_node:
continue
runLog.info("Creating visualization file for cycle {}, time node {}...".format(cycle, node))
r = db.load(cycle, node)
dumper.dumpState(r)
================================================
FILE: armi/bookkeeping/visualization/tests/__init__.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/bookkeeping/visualization/tests/test_vis.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test report visualization."""
import unittest
import numpy as np
from pyevtk.vtk import VtkTetra
from armi import settings
from armi.bookkeeping.db import Database
from armi.bookkeeping.visualization import utils, vtk, xdmf
from armi.reactor import blocks, components
from armi.reactor.tests import test_reactors
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestVtkMesh(unittest.TestCase):
"""Test the VtkMesh utility class."""
def test_testVtkMesh(self):
mesh = utils.VtkMesh.empty()
self.assertEqual(mesh.vertices.size, 0)
self.assertEqual(mesh.vertices.shape, (0, 3))
self.assertEqual(mesh.connectivity.size, 0)
self.assertEqual(mesh.offsets.size, 0)
self.assertEqual(mesh.cellTypes.size, 0)
verts = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.25, 0.25, 0.5]])
conn = np.array([0, 1, 2, 3])
offsets = np.array([4])
cellTypes = np.array([VtkTetra.tid])
newMesh = utils.VtkMesh(verts, conn, offsets, cellTypes)
mesh.append(newMesh)
mesh.append(newMesh)
self.assertEqual(mesh.vertices.size, 3 * 8)
self.assertEqual(mesh.offsets.size, 2)
self.assertEqual(mesh.connectivity.size, 8)
self.assertEqual(mesh.cellTypes.size, 2)
self.assertEqual(mesh.offsets[-1], 8)
self.assertEqual(mesh.connectivity[-1], 7)
class TestVisDump(unittest.TestCase):
"""Test dumping a whole reactor and some specific block types."""
@classmethod
def setUpClass(cls):
caseSetting = settings.Settings()
_, cls.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
cls.hexBlock = next(cls.r.core.iterBlocks())
cls.cartesianBlock = blocks.CartesianBlock("TestCartesianBlock", caseSetting)
cartesianComponent = components.HoledSquare(
"duct",
"UZr",
Tinput=273.0,
Thot=273.0,
holeOD=68.0,
widthOuter=12.5,
mult=1.0,
)
cls.cartesianBlock.add(cartesianComponent)
cls.cartesianBlock.add(components.Circle("clad", "HT9", Tinput=273.0, Thot=273.0, od=68.0, mult=169.0))
def test_dumpReactorVtk(self):
# This does a lot, and is hard to verify. at least make sure it doesn't crash
with TemporaryDirectoryChanger(dumpOnException=False):
dumper = vtk.VtkDumper("testVtk", inputName=None)
with dumper:
dumper.dumpState(self.r)
def test_dumpReactorXdmf(self):
# This does a lot, and is hard to verify. at least make sure it doesn't crash
with TemporaryDirectoryChanger(dumpOnException=False):
db = Database("testDatabase.h5", "w")
with db:
db.writeToDB(self.r)
dumper = xdmf.XdmfDumper("testVtk", inputName="testDatabase.h5")
with dumper:
dumper.dumpState(self.r)
def test_hexMesh(self):
mesh = utils.createBlockMesh(self.hexBlock)
self.assertEqual(mesh.vertices.size, 12 * 3)
self.assertEqual(mesh.cellTypes[0], 16)
def test_cartesianMesh(self):
mesh = utils.createBlockMesh(self.cartesianBlock)
self.assertEqual(mesh.vertices.size, 8 * 3)
self.assertEqual(mesh.cellTypes[0], 12)
================================================
FILE: armi/bookkeeping/visualization/tests/test_xdmf.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from armi.bookkeeping.visualization import xdmf
class TestXdmf(unittest.TestCase):
"""
Test XDMF-specific functionality.
This is for testing XDMF functions that can reasonably be tested in a vacuum. The
main dump methods are hard to test without resorting to checking whole files, which
isn't particularly useful. Those tests can be found in test_vis.
"""
def test_dedupTimes(self):
# no duplicates
self.assertEqual(
xdmf.XdmfDumper._dedupTimes([1.0 * t for t in range(10)]),
[1.0 * t for t in range(10)],
)
# ends in duplicates
self.assertEqual(
xdmf.XdmfDumper._dedupTimes([0.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 4.0]),
[0.0, 1.0, 2.0, 2.000000002, 3.0, 4.0, 4.000000004, 4.000000008],
)
# ends in unique
self.assertEqual(
xdmf.XdmfDumper._dedupTimes([0.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 4.0, 5.0]),
[0.0, 1.0, 2.0, 2.000000002, 3.0, 4.0, 4.000000004, 4.000000008, 5.0],
)
# all duplicates
self.assertEqual(
xdmf.XdmfDumper._dedupTimes([0.0] * 5),
[0.0, 1e-09, 2e-09, 3.0000000000000004e-09, 4e-09],
)
# single value
self.assertEqual(
xdmf.XdmfDumper._dedupTimes([1.0]),
[1.0],
)
# empty list
self.assertEqual(
xdmf.XdmfDumper._dedupTimes([]),
[],
)
with self.assertRaises(AssertionError):
# input should be sorted
xdmf.XdmfDumper._dedupTimes([float(t) for t in reversed(range(10))])
================================================
FILE: armi/bookkeeping/visualization/utils.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility classes/functions for visualization.
Most of these are derived from the VTK format, which tends to be general enough to
support other formats. Most of the work goes into figuring out where the vertices should
be for a given block/assembly shape. If this coupling becomes problematic, abstractions
for primitive shapes should be created.
"""
import math
import numpy as np
from pyevtk.hl import unstructuredGridToVTK
from pyevtk.vtk import VtkHexahedron, VtkQuadraticHexahedron
from armi.reactor import assemblies, blocks, reactors
from armi.utils import hexagon
# The hex prism cell type is not very well-documented, and so is not described in
# pyevtk. Digging into the header reveals that `16` does the trick.
_HEX_PRISM_TID = 16
class VtkMesh:
"""
Container for VTK unstructured mesh data.
This provides a container for the necessary data to describe a mesh to VTK (vertex
locations, connectivity, offsets, cell types). It supports appending one set of mesh
data onto another, handling the necessary index offsets.
While the specifics are somewhat specific to the VTK format, the concept of storing
a bunch of vertices and their connectivity is a relatively general one, so this may
be of use to other formats as well.
"""
def __init__(self, vertices, connectivity, offsets, cellTypes):
"""
Parameters
----------
vertices : np.ndarray
An Nx3 numpy array with one row per (x,y,z) vertex
connectivity : np.ndarray
A 1-D array containing the vertex indices belonging to each cell
offsets : np.ndarray
A 1-D array containing the index of the first vertex for the next cell
cellTypes : np.ndarray
A 1-D array containing the cell type ID for each cell
"""
self.vertices = vertices
self.connectivity = connectivity
self.offsets = offsets
self.cellTypes = cellTypes
@staticmethod
def empty():
return VtkMesh(
np.empty((0, 3), dtype=np.float64),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
)
@property
def x(self):
return np.array(self.vertices[:, 0])
@property
def y(self):
return np.array(self.vertices[:, 1])
@property
def z(self):
return np.array(self.vertices[:, 2])
def append(self, other):
"""Add more cells to the mesh."""
connectOffset = self.vertices.shape[0]
offsetOffset = self.offsets[-1] if self.offsets.size > 0 else 0
self.vertices = np.vstack((self.vertices, other.vertices))
self.connectivity = np.append(self.connectivity, other.connectivity + connectOffset)
self.offsets = np.append(self.offsets, other.offsets + offsetOffset)
self.cellTypes = np.append(self.cellTypes, other.cellTypes)
def write(self, path, data) -> str:
"""
Write this mesh and the passed data to a VTK file. Returns the base path, plus
relevant extension.
"""
fullPath = unstructuredGridToVTK(
path,
self.x,
self.y,
self.z,
connectivity=self.connectivity,
offsets=self.offsets,
cell_types=self.cellTypes,
cellData=data,
)
return fullPath
def createReactorBlockMesh(r: reactors.Reactor) -> VtkMesh:
mesh = VtkMesh.empty()
blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block))
for b in blks:
mesh.append(createBlockMesh(b))
return mesh
def createReactorAssemMesh(r: reactors.Reactor) -> VtkMesh:
mesh = VtkMesh.empty()
assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly))
for a in assems:
mesh.append(createAssemMesh(a))
return mesh
def createBlockMesh(b: blocks.Block) -> VtkMesh:
if isinstance(b, blocks.HexBlock):
return _createHexBlockMesh(b)
if isinstance(b, blocks.CartesianBlock):
return _createCartesianBlockMesh(b)
if isinstance(b, blocks.ThRZBlock):
return _createTRZBlockMesh(b)
else:
raise TypeError(
"Unsupported block type `{}`. Supported types are: {}".format(
type(b).__name__,
{t.__name__ for t in {blocks.CartesianBlock, blocks.HexBlock, blocks.ThRZBlock}},
)
)
def createAssemMesh(a: assemblies.Assembly) -> VtkMesh:
# Kind of hacky, but since all blocks in an assembly are the same type, let's just
# use the block mesh functions and change their z coordinates to match the size of
# the whole assem 🤯
mesh = createBlockMesh(a[0])
# we should only have a single VTK mesh primitive per block
assert len(mesh.cellTypes) == 1
zMin = a.spatialGrid._bounds[2][0]
zMax = a.spatialGrid._bounds[2][-1]
if mesh.cellTypes[0] == VtkHexahedron:
mesh.vertices[0:4, 2] = zMin
mesh.vertices[4:8, 2] = zMax
elif mesh.cellTypes[0] == _HEX_PRISM_TID:
mesh.vertices[0:6, 2] = zMin
mesh.vertices[6:12, 2] = zMax
elif mesh.cellTypes[0] == VtkQuadraticHexahedron.tid:
# again, quadratic hexahedra are a pain
mesh.vertices[0:4, 2] = zMin
mesh.vertices[8:12, 2] = zMin
mesh.vertices[4:8, 2] = zMax
mesh.vertices[12:16, 2] = zMax
return mesh
def _createHexBlockMesh(b: blocks.HexBlock) -> VtkMesh:
assert b.spatialLocator is not None
zMin = b.p.zbottom
zMax = b.p.ztop
gridOffset = b.spatialLocator.getGlobalCoordinates()[:2]
gridOffset = np.tile(gridOffset, (6, 1))
pitch = b.getPitch()
hexVerts2d = np.array(hexagon.corners(rotation=0)) * pitch
hexVerts2d += gridOffset
# we need a top and bottom hex
hexVerts2d = np.vstack((hexVerts2d, hexVerts2d))
# fold in z locations to get 3d coordinates
hexVerts = np.hstack((hexVerts2d, np.array([[zMin] * 6 + [zMax] * 6]).transpose()))
return VtkMesh(
hexVerts,
np.array(list(range(12))),
np.array([12]),
np.array([_HEX_PRISM_TID]),
)
def _createCartesianBlockMesh(b: blocks.CartesianBlock) -> VtkMesh:
assert b.spatialLocator is not None
zMin = b.p.zbottom
zMax = b.p.ztop
gridOffset = b.spatialLocator.getGlobalCoordinates()[:2]
gridOffset = np.tile(gridOffset, (4, 1))
pitch = b.getPitch()
halfPitchX = pitch[0] * 0.5
halfPitchY = pitch[0] * 0.5
rectVerts = np.array(
[
[halfPitchX, halfPitchY],
[-halfPitchX, halfPitchY],
[-halfPitchX, -halfPitchY],
[halfPitchX, -halfPitchY],
]
)
rectVerts += gridOffset
# make top/bottom rectangles
boxVerts = np.vstack((rectVerts, rectVerts))
# fold in z coordinates
boxVerts = np.hstack((boxVerts, np.array([[zMin] * 4 + [zMax] * 4]).transpose()))
return VtkMesh(
boxVerts,
np.array(list(range(8))),
np.array([8]),
np.array([VtkHexahedron.tid]),
)
def _createTRZBlockMesh(b: blocks.ThRZBlock) -> VtkMesh:
# This could be improved.
rIn = b.radialInner()
rOut = b.radialOuter()
thIn = b.thetaInner()
thOut = b.thetaOuter()
zIn = b.p.zbottom
zOut = b.p.ztop
vertsRTZ = [
(rIn, thOut, zIn),
(rIn, thIn, zIn),
(rOut, thIn, zIn),
(rOut, thOut, zIn),
(rIn, thOut, zOut),
(rIn, thIn, zOut),
(rOut, thIn, zOut),
(rOut, thOut, zOut),
(rIn, (thIn + thOut) * 0.5, zIn),
((rIn + rOut) * 0.5, thIn, zIn),
(rOut, (thIn + thOut) * 0.5, zIn),
((rIn + rOut) * 0.5, thOut, zIn),
(rIn, (thIn + thOut) * 0.5, zOut),
((rIn + rOut) * 0.5, thIn, zOut),
(rOut, (thIn + thOut) * 0.5, zOut),
((rIn + rOut) * 0.5, thOut, zOut),
(rIn, thOut, (zIn + zOut) * 0.5),
(rIn, thIn, (zIn + zOut) * 0.5),
(rOut, thIn, (zIn + zOut) * 0.5),
(rOut, thOut, (zIn + zOut) * 0.5),
]
vertsXYZ = np.array([[r * math.cos(th), r * math.sin(th), z] for r, th, z in vertsRTZ])
return VtkMesh(
vertsXYZ,
np.array(list(range(20))),
np.array([20]),
np.array([VtkQuadraticHexahedron.tid]),
)
================================================
FILE: armi/bookkeeping/visualization/vtk.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Visualization implementation for VTK files.
Limitations
-----------
This version of the VTK file writer comes with a number of limitations and/or aspects
that can be improved upon. For instance:
* Only the Block and Assembly meshes and related parameters are exported to the VTK
file. Adding Core data is totally doable, and will be the product of future work.
With more considerable effort, arbitrary components may be visualizable!
* No efforts are made to de-duplicate the vertices in the mesh, so there are more
vertices than needed. Some fancy canned algorithms probably exist to do this, and it
wouldn't be too difficult to do here either. Also future work, but probably not super
important unless dealing with really big meshes.
"""
from typing import Any, Dict, List, Optional, Set, Tuple
import numpy as np
from pyevtk.vtk import VtkGroup
from armi import runLog
from armi.bookkeeping.db import database
from armi.bookkeeping.visualization import dumper, utils
from armi.reactor import assemblies, blocks, composites, parameters, reactors
class VtkDumper(dumper.VisFileDumper):
"""
Dumper for VTK data.
This handles writing unstructured meshes and associated Block parameter data to VTK
files. The context manager keeps track of how many files have been written (one per
time node), and creates a group/collection file when finished.
"""
def __init__(self, baseName: str, inputName: str):
self._baseName = baseName
self._assemFiles: List[Tuple[str, float]] = []
self._blockFiles: List[Tuple[str, float]] = []
def dumpState(
self,
r: reactors.Reactor,
includeParams: Optional[Set[str]] = None,
excludeParams: Optional[Set[str]] = None,
):
"""
Dump a reactor to a VTK file.
Parameters
----------
r : reactors.Reactor
The reactor state to visualize
includeParams : list of str, optional
A list of parameter names to include in the viz file. Defaults to all
params.
excludeParams : list of str, optional
A list of parameter names to exclude from the output. Defaults to no params.
"""
cycle = r.p.cycle
timeNode = r.p.timeNode
# you never know...
assert cycle < 1000
assert timeNode < 1000
# We avoid using cXnY, since VisIt doesn't support .pvd files, but *does* know
# to lump data with similar file names and integers at the end.
blockPath = "{}_blk_{:0>3}{:0>3}".format(self._baseName, cycle, timeNode)
assemPath = "{}_asy_{:0>3}{:0>3}".format(self._baseName, cycle, timeNode)
# include and exclude params are mutually exclusive
if includeParams is not None and excludeParams is not None:
raise ValueError("includeParams and excludeParams can not both be used at the same time")
blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block))
assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly))
blockMesh = utils.createReactorBlockMesh(r)
assemMesh = utils.createReactorAssemMesh(r)
# collect param data
blockData = _collectObjectData(blks, includeParams, excludeParams)
assemData = _collectObjectData(assems, includeParams, excludeParams)
# block number densities are special, since they aren't stored as params
blockNdens = database.collectBlockNumberDensities(blks)
# we need to copy the number density vectors to guarantee unit stride, which
# pyevtk requires. Kinda seems like something it could do for us, but oh well.
blockNdens = {key: np.array(value) for key, value in blockNdens.items()}
blockData.update(blockNdens)
fullPath = blockMesh.write(blockPath, blockData)
self._blockFiles.append((fullPath, r.p.time))
fullPath = assemMesh.write(assemPath, assemData)
self._assemFiles.append((fullPath, r.p.time))
def __enter__(self):
self._assemFiles = []
self._blockFiles = []
def __exit__(self, type, value, traceback):
assert len(self._assemFiles) == len(self._blockFiles)
if len(self._assemFiles) > 1:
# multiple files need to be wrapped up into groups. VTK does not like having
# multiple meshes in the same group, so we write out separate Collection
# files for them
asyGroup = VtkGroup(f"{self._baseName}_asm")
for path, time in self._assemFiles:
asyGroup.addFile(filepath=path, sim_time=time)
asyGroup.save()
blockGroup = VtkGroup(f"{self._baseName}_blk")
for path, time in self._blockFiles:
blockGroup.addFile(filepath=path, sim_time=time)
blockGroup.save()
def _collectObjectData(
objs: List[composites.ArmiObject],
includeParams: Optional[Set[str]] = None,
excludeParams: Optional[Set[str]] = None,
) -> Dict[str, Any]:
allData = dict()
for pDef in type(objs[0]).pDefs.toWriteToDB(parameters.SINCE_ANYTHING):
if includeParams is not None and pDef.name not in includeParams:
continue
if excludeParams is not None and pDef.name in excludeParams:
continue
data = []
for obj in objs:
val = obj.p[pDef.name]
data.append(val)
data = np.array(data)
if data.dtype.kind == "S" or data.dtype.kind == "U":
# no string support!
continue
if data.dtype.kind == "O":
# datatype is "object", usually because it's jagged, or has Nones. We are
# willing to try handling the Nones, but jagged also isn't visualizable.
nones = np.where([d is None for d in data])[0]
if len(nones) == data.shape[0]:
# all Nones, so give up
continue
if len(nones) == 0:
# looks like Nones had nothing to do with it. bail
continue
try:
data = database.replaceNonesWithNonsense(data, pDef.name, nones=nones)
except (ValueError, TypeError):
# Looks like we have some weird data. We might be able to handle it
# with more massaging, but probably not visualizable anyhow
continue
if data.dtype.kind == "O":
# Didn't work
runLog.warning(
"The parameter data for `{}` could not be coerced into a native type for output; skipping.".format(
pDef.name
)
)
continue
if len(data.shape) != 1:
# We aren't interested in vector data on each block
continue
allData[pDef.name] = data
return allData
================================================
FILE: armi/bookkeeping/visualization/xdmf.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Support for dumping XDMF files.
`XDMF `_ is a data interchange format that
allows for separate representation of the data itself and a description of how those
data are to be interpreted. The data description ("light" data) lives in an XML file,
while the actual data (in our case, data to be plotted), as well as the data describing
the mesh ("hard" data) can be stored in HDF5 files, binary files, or embedded directly
into the XML file. In most cases, this allows for visualizing data directly out of an
ARMI database file. Using the ``XdmfDumper`` will produce an XML file (with an ``.xdmf``
extension) containing the description of data, as well as an HDF5 file containing the
mesh. Together with the input database, the ``.xdmf`` file can be opened in a
visualization tool that supports XDMF.
.. note::
Paraview seems to have rather good support for XDMF, while VisIt does not. The main
issue seems to be that VisIt does not properly render the general polyhedra that
XDMF supports. Unfortunately, we __need__ to use this to show hexagonal geometries,
since it's the only way to get a hexagonal prism without splitting up the mesh into
wedges. To do that would require splitting the parameter data, which would defeat
the main benefit of using XMDF in the first place (to be able to plot out of the
original Database file). Cartesian and R-X-Theta geometries in VisIt seem to work
fine.
"""
import io
import math
import pathlib
import xml.dom.minidom
import xml.etree.ElementTree as ET
from typing import Dict, List, Optional, Set, Tuple
import h5py
import numpy as np
from armi import runLog
from armi.bookkeeping.db import database
from armi.bookkeeping.visualization import dumper, utils
from armi.reactor import assemblies, blocks, composites, reactors
_VTK_TO_XDMF_CELLS = {16: 16}
_POLYHEDRON = 16
_HEXAHEDRON = 9
_QUADRATIC_HEXAHEDRON = 48
# The topology of a hexagonal prism, represented as a general polyhedron. To get this in
# proper XDMF, these need to be offset to the proper vertex indices in the full mesh,
# and have the number of face vertices inserted into the proper locations (notice the
# [0] placeholders).
_HEX_PRISM_TOPO = np.array(
[0]
+ list(range(6))
+ [0]
+ list(range(6, 12))
+ [0]
+ [0, 1, 7, 6]
+ [0]
+ [1, 2, 8, 7]
+ [0]
+ [2, 3, 9, 8]
+ [0]
+ [3, 4, 10, 9]
+ [0]
+ [4, 5, 11, 10]
+ [0]
+ [5, 0, 6, 11]
)
# The indices of the placeholder zeros from _HEX_PRISM_TOPO array above
_HEX_PRISM_FACE_SIZE_IDX = np.array([0, 7, 14, 19, 24, 29, 34, 39])
# The number of vertices for each face
_HEX_PRISM_FACE_SIZES = np.array([6, 6, 4, 4, 4, 4, 4, 4])
def _getAttributesFromDataset(d: h5py.Dataset) -> Dict[str, str]:
dataType = {
np.dtype("int32"): "Int",
np.dtype("int64"): "Int",
np.dtype("float32"): "Float",
np.dtype("float64"): "Float",
}[d.dtype]
precision = {
np.dtype("int32"): "4",
np.dtype("int64"): "8",
np.dtype("float32"): "4",
np.dtype("float64"): "8",
}[d.dtype]
return {
"Dimensions": " ".join(str(i) for i in d.shape),
"DataType": dataType,
"Precision": precision,
"Format": "HDF",
}
class XdmfDumper(dumper.VisFileDumper):
"""
VisFileDumper implementation for XDMF format.
The general strategy of this dumper is to create a new HDF5 file that contains just
the necessary mesh information for each dumped time step. The XML that
describes/points to these data is stored internally as ``ElementTree`` objects until
the end. When all time steps have been processed, these elements have time
information added to them, and are collected into a "TemporalCollection" Grid and
written to an ``.xdmf`` file.
"""
def __init__(self, baseName: str, inputName: Optional[str] = None):
self._baseName = baseName
if inputName is None:
runLog.warning("No input database name was given, so only an XMDF mesh will be created")
self._inputName = inputName
# Check that the inputName is a relative path. XDMF doesn't seem to like
# absolute paths; at least on windows with ParaView
if pathlib.Path(inputName).is_absolute():
raise ValueError(
"XDMF tools tend not to like absolute paths; provide a relative path to the input database."
)
self._meshH5 = None
self._inputDb = None
self._times = []
self._blockGrids = []
self._assemGrids = []
def __enter__(self):
"""
Prepare to write states.
The dumper keeps track of ```` tags that need to be written into a
Collection at the end. This also opens an auxiliary HDF5 file for writing meshes
at each time step.
"""
self._meshH5 = h5py.File(self._baseName + "_mesh.h5", "w")
if self._inputName is None:
# we could handle the case where the database wasn't passed by pumping state
# into a new h5 file, but why?
raise ValueError("Input database needed to generate XDMF output!")
self._inputDb = database.Database(self._inputName, "r")
with self._inputDb as db:
dbVersion = db.version
if math.floor(float(dbVersion)) != 3:
raise ValueError("XDMF output requires Database version 3. Got version `{}`".format(dbVersion))
self._times = []
self._blockGrids = []
self._assemGrids = []
def __exit__(self, type, value, traceback):
"""
Finalize file writing.
This writes all of the ```` tags into a Collection for all time steps, and
closes the input database and mesh-bearing HDF5 file.
"""
self._meshH5.close()
self._meshH5 = None
if self._inputDb is not None:
self._inputDb.close()
self._inputDb = None
timeCollectionBlk = ET.Element("Grid", attrib={"GridType": "Collection", "CollectionType": "Temporal"})
timeCollectionAsm = ET.Element("Grid", attrib={"GridType": "Collection", "CollectionType": "Temporal"})
# make sure all times are unique. Paraview will crash if they are not
times = self._dedupTimes(self._times)
for aGrid, bGrid, time in zip(self._assemGrids, self._blockGrids, times):
timeElement = ET.Element("Time", attrib={"TimeType": "Single", "Value": str(time)})
bGrid.append(timeElement)
timeCollectionBlk.append(bGrid)
aGrid.append(timeElement)
timeCollectionAsm.append(aGrid)
for collection, typ in [
(timeCollectionBlk, "_blk"),
(timeCollectionAsm, "_asm"),
]:
xdmf = ET.Element("Xdmf", attrib={"Version": "3.0"})
domain = ET.Element("Domain", attrib={"Name": "Reactor"})
domain.append(collection)
xdmf.append(domain)
# Write to an internal buffer so that we can print more fancy below
tree = ET.ElementTree(element=xdmf)
buf = io.StringIO()
tree.write(buf, encoding="unicode")
buf.seek(0)
# Round-trip through minidom to do the pretty print
dom = xml.dom.minidom.parse(buf)
with open(self._baseName + typ + ".xdmf", "w") as f:
f.write(dom.toprettyxml())
@staticmethod
def _dedupTimes(times: List[float]) -> List[float]:
"""
Make sure that no two times are the same.
Duplicates will be resolved by bumping each subsequent duplicate time forward by
some epsilon, cascading following duplicates by the same amount until no
duplicates remain. This will fail in the case where there are already times that
are within Ndup*epsilon of each other. In such cases, this function probably
isn't valid anyways.
"""
assert all(a <= b for a, b in zip(times, times[1:])), "Input list must be sorted"
# This should be used as a multiplicative epsilon, to avoid precision issues
# with large times
_EPS = 1.0e-9
# ...except when close enough to 0. Floating-point is a pain
mapZeroToOne = lambda x: x if x > _EPS else 1.0
dups = [0] * len(times)
# We iterate in reverse so that each entry in dups will contain the number of
# duplicate entries that **precede** it
for i in reversed(range(len(times))):
ti = times[i]
nDup = 0
for j in range(i - 1, -1, -1):
if times[j] == ti:
nDup += 1
else:
break
dups[i] = nDup
return [t + dups * _EPS * mapZeroToOne(t) for dups, t in zip(dups, times)]
def dumpState(
self,
r: reactors.Reactor,
includeParams: Optional[Set[str]] = None,
excludeParams: Optional[Set[str]] = None,
):
"""Produce a ```` for a single timestep, as well as supporting HDF5 datasets."""
cycle = r.p.cycle
node = r.p.timeNode
timeGroupName = database.getH5GroupName(cycle, node)
# careful here! we are trying to use the database datasets as the source of hard
# data without copying, so the order that we make the mesh needs to be the same
# order as the data in the database. There is no guarantee that the way a loaded
# reactor is ordered is the same way that it was ordered in the database (though
# perhaps we should do some work to specify that better). We need to look at the
# layout in the input database to re-order the objects.
with self._inputDb as db:
layout = db.getLayout(cycle, node)
snToIdx = {sn: i for i, sn in zip(layout.indexInData, layout.serialNum)}
blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block))
blks = sorted(blks, key=lambda b: snToIdx[b.p.serialNum])
assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly))
assems = sorted(assems, key=lambda a: snToIdx[a.p.serialNum])
blockGrid = self._makeBlockMesh(r, snToIdx)
self._collectObjectData(blks, timeGroupName, blockGrid)
assemGrid = self._makeAssemblyMesh(r, snToIdx)
self._collectObjectData(assems, timeGroupName, assemGrid)
self._blockGrids.append(blockGrid)
self._assemGrids.append(assemGrid)
self._times.append(r.p.time)
def _collectObjectData(self, objs: List[composites.ArmiObject], timeGroupName, node: ET.Element):
"""
Scan for things that look plottable in the input database.
"Plottable" things are anything that have int or float data, and the same number
of elements as there are objects.
.. warning::
This makes some assumptions as to the structure of the database.
"""
if self._inputDb is None:
# If we weren't given a database to draw data from, we will just skip this
# for now. Most of the time, a dumper should have an input database.
# Otherwise, this **could** extract from the reactor state.
return
typeNames = {type(o).__name__ for o in objs}
if len(typeNames) != 1:
raise ValueError("Currently only supporting homogeneous block types")
typeName = next(iter(typeNames))
dataGroupName = "/".join((timeGroupName, typeName))
with self._inputDb as db:
for key, val in db.h5db[dataGroupName].items():
if val.shape != (len(objs),):
continue
try:
dataItem = ET.Element("DataItem", attrib=_getAttributesFromDataset(val))
except KeyError:
continue
dataItem.text = ":".join((db.fileName, val.name))
attrib = ET.Element(
"Attribute",
attrib={"Name": key, "Center": "Cell", "AttributeType": "Scalar"},
)
attrib.append(dataItem)
node.append(attrib)
def _makeBlockMesh(self, r: reactors.Reactor, indexMap) -> ET.Element:
cycle = r.p.cycle
node = r.p.timeNode
blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block))
blks = sorted(blks, key=lambda b: indexMap[b.p.serialNum])
groupName = "c{}n{}".format(cycle, node)
# VTK stuff turns out to be pretty flexible
blockMesh = utils.VtkMesh.empty()
for b in blks:
blockMesh.append(utils.createBlockMesh(b))
verts = blockMesh.vertices
verticesInH5 = groupName + "/blk_vertices"
self._meshH5[verticesInH5] = verts
topoValues = np.array([], dtype=np.int32)
offset = 0
for b in blks:
nVerts, cellTopo = _getTopologyFromShape(b, offset)
topoValues = np.append(topoValues, cellTopo)
offset += nVerts
topoInH5 = groupName + "/blk_topology"
self._meshH5[topoInH5] = topoValues
return self._makeGenericMesh("Blocks", len(blks), self._meshH5[verticesInH5], self._meshH5[topoInH5])
def _makeAssemblyMesh(self, r: reactors.Reactor, indexMap) -> ET.Element:
cycle = r.p.cycle
node = r.p.timeNode
asys = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly))
asys = sorted(asys, key=lambda b: indexMap[b.p.serialNum])
groupName = "c{}n{}".format(cycle, node)
# VTK stuff turns out to be pretty flexible
assemMesh = utils.VtkMesh.empty()
for assem in asys:
assemMesh.append(utils.createAssemMesh(assem))
verts = assemMesh.vertices
verticesInH5 = groupName + "/asy_vertices"
self._meshH5[verticesInH5] = verts
topoValues = np.array([], dtype=np.int32)
offset = 0
for a in asys:
nVerts, cellTopo = _getTopologyFromShape(a[0], offset)
topoValues = np.append(topoValues, cellTopo)
offset += nVerts
topoInH5 = groupName + "/asy_topology"
self._meshH5[topoInH5] = topoValues
return self._makeGenericMesh("Assemblies", len(asys), self._meshH5[verticesInH5], self._meshH5[topoInH5])
@staticmethod
def _makeGenericMesh(name: str, nCells: int, vertexData: h5py.Dataset, topologyData: h5py.Dataset) -> ET.Element:
grid = ET.Element("Grid", attrib={"GridType": "Uniform", "Name": name})
geometry = ET.Element("Geometry", attrib={"GeometryType": "XYZ"})
geomData = ET.Element(
"DataItem",
attrib={
"Dimensions": "{} {}".format(*vertexData.shape),
"NumberType": "Float",
"Format": "HDF",
},
)
geomData.text = ":".join((vertexData.file.filename, vertexData.name))
geometry.append(geomData)
topology = ET.Element(
"Topology",
attrib={"TopologyType": "Mixed", "NumberOfElements": str(nCells)},
)
topoData = ET.Element(
"DataItem",
attrib={
"Dimensions": "{}".format(topologyData.size),
"NumberType": "Int",
"Format": "HDF",
},
)
topoData.text = ":".join((topologyData.file.filename, topologyData.name))
topology.append(topoData)
grid.append(geometry)
grid.append(topology)
return grid
def _getTopologyFromShape(b: blocks.Block, offset: int) -> Tuple[int, List[int]]:
"""
Returns the number of vertices used to make the shape, and XDMF topology values.
The size of the XDMF topology values cannot be used directly in computing the next
offset because it sometimes contains vertex indices __and__ sizing information.
"""
if isinstance(b, blocks.HexBlock):
# polyhedron, 8 faces
prefix = [_POLYHEDRON, 8]
topo = _HEX_PRISM_TOPO + offset
topo[_HEX_PRISM_FACE_SIZE_IDX] = _HEX_PRISM_FACE_SIZES
topo = np.append(prefix, topo)
return 12, topo
if isinstance(b, blocks.CartesianBlock):
return (
8,
[
_HEXAHEDRON,
]
+ list(range(offset, offset + 8)),
)
if isinstance(b, blocks.ThRZBlock):
return 20, [_QUADRATIC_HEXAHEDRON] + list(range(offset, offset + 20))
else:
raise TypeError("Unsupported block type `{}`".format(type(b)))
================================================
FILE: armi/cases/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Case and CaseSuite objects for running and analyzing ARMI cases.
A ``Case`` is a collection of inputs that represents one particular run. Cases have special knowledge about dependencies
and can perform useful operations like compare, clone, and run.
A ``CaseSuite`` is a set of (often related) Cases. These are fundamental to parameter sweeps and test suites.
See Also
--------
armi.cli : Entry points that build Cases and/or CaseSuites and send them off to do work
armi.operators : Operations that ARMI will perform on a reactor model.
Generally these are made by an individual Case.
Examples
--------
Create a Case and run it::
case = Case(settings.Settings("path-to-settings.yaml"))
case.run()
# do something with output database
Create a case suite from existing files, and run the suite::
cs = settings.Settings() # default settings
suite = CaseSuite(settings.Settings()) # default settings
suite.discover("my-cases*.yaml", recursive=True)
suite.run()
.. warning:: Suite running may not work yet if the cases have interdependencies.
Create a ``burnStep`` sensitivity study from some base CS::
baseCase = Case(settings.Settings("base-settings.yaml")) # default settings
suite = CaseSuite(baseCase.cs) # basically just sets armiLocation
for numSteps in range(3, 11):
with ForcedCreationDirectoryChanger("{}steps".format(numSteps)):
case = baseCase.clone(title=baseCase.title + f"-with{numSteps}steps", settings={"burnSteps": numSteps})
suite.add(case)
suite.writeInputs()
Then submit the inputs to your HPC cluster.
"""
from armi.cases.case import Case # noqa: F401
from armi.cases.suite import CaseSuite # noqa: F401
================================================
FILE: armi/cases/case.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``Case`` object is responsible for running, and executing a set of user inputs. Many entry
points redirect into ``Case`` methods, such as ``clone``, ``compare``, and ``run``.
The ``Case`` object provides an abstraction around ARMI inputs to allow for manipulation and
collection of cases.
See Also
--------
armi.cases.suite : A collection of Cases
"""
import ast
import cProfile
import glob
import io
import os
import pathlib
import pstats
import re
import sys
import textwrap
import time
import trace
from typing import Dict, Optional, Sequence, Set, Union
import coverage
from armi import context, getPluginManager, interfaces, operators, runLog, settings
from armi.bookkeeping.db import compareDatabases
from armi.nucDirectory import nuclideBases
from armi.physics.neutronics.settings import CONF_LOADING_FILE
from armi.reactor import blueprints, reactors
from armi.utils import pathTools, tabulate, textProcessors
from armi.utils.customExceptions import NonexistentSetting
from armi.utils.directoryChangers import (
DirectoryChanger,
ForcedCreationDirectoryChanger,
)
# Change from default .coverage to help with Windows dotfile issues.
# Must correspond with data_file entry in `pyproject.toml`!
COVERAGE_RESULTS_FILE = "coverage_results.cov"
class Case:
"""
An ARMI Case that can be used for suite set up and post-analysis.
A Case is capable of loading inputs, checking that they are valid, and initializing a reactor
model. Cases can also compare against other cases and be collected into multiple
:py:class:`armi.cases.suite.CaseSuite`.
"""
def __init__(self, cs, caseSuite=None, bp=None):
"""
Initialize a Case from user input.
Parameters
----------
cs : Settings
Settings for this Case
caseSuite : CaseSuite, optional
CaseSuite this particular case belongs. Passing this in allows dependency tracking
across the other cases (e.g. if one case uses the output of another as input, as happens
in in-use testing for reactivity coefficient snapshot testing or more complex analysis
sequences).
bp : Blueprints, optional
:py:class:`armi.reactor.blueprints.Blueprints` object containing the assembly
definitions and other information. If not supplied, it will be loaded from the ``cs`` as
needed.
"""
self._startTime = time.time()
self._caseSuite = caseSuite
self._tasks = []
self._dependencies: Set[Case] = set()
self.enabled = True
# set the signal if the user passes in a blueprint object, instead of a file
if bp is not None:
cs.filelessBP = True
# NOTE: in order to prevent slow submission times for loading massively large blueprints
# (e.g. certain computer-generated input files), self.bp can be None.
self.cs = cs
self._bp = bp
# this is used in parameter sweeps
self._independentVariables = {}
@property
def independentVariables(self):
"""
Get dictionary of independent variables and their values.
This unpacks independent variables from the cs object's independentVariables setting the
first time it is run. This is used in parameter sweeps.
See Also
--------
writeInputs : writes the ``independentVariabls`` setting
"""
if not self._independentVariables:
for indepStr in self.cs["independentVariables"]:
indepName, value = ast.literal_eval(indepStr)
self._independentVariables[indepName] = value
return self._independentVariables
def __repr__(self):
return "".format(self.cs.path)
@property
def bp(self):
"""
Blueprint object for this case.
Notes
-----
This property allows lazy loading.
"""
if self._bp is None:
self._bp = blueprints.loadFromCs(self.cs, roundTrip=True)
return self._bp
@bp.setter
def bp(self, bp):
self._bp = bp
@property
def dependencies(self):
"""
Get a list of parent Case objects.
Notes
-----
This is performed on demand so that if someone changes the underlying Settings, the case
will reflect the correct dependencies. As a result, if this is being done iteratively,
you may want to cache it somehow (in a dict?).
Ideally, this should not be the responsibility of the Case, but rather the suite!
"""
dependencies = set()
if self._caseSuite is not None:
pm = getPluginManager()
if pm is not None:
for pluginDependencies in pm.hook.defineCaseDependencies(case=self, suite=self._caseSuite):
dependencies.update(pluginDependencies)
# the ([^\/]) capture basically gets the file name portion and excludes any
# directory separator
dependencies.update(
self.getPotentialParentFromSettingValue(
self.cs["explicitRepeatShuffles"],
r"^(?P.*[\/\\])?(?P[^\/\\]+)-SHUFFLES\.txt$",
)
)
# ensure that a case doesn't appear to be its own dependency
dependencies.update(self._dependencies)
dependencies.discard(self)
return dependencies
def addExplicitDependency(self, case):
"""
Register an explicit dependency.
When evaluating the ``dependency`` property, dynamic dependencies are probed
using the current case settings and plugin hooks. Sometimes, it is necessary to
impose dependencies that are not expressed through settings and hooks. This
method stores another case as an explicit dependency, which will be included
with the other, implicitly discovered, dependencies.
"""
if case in self._dependencies:
runLog.warning("The case {} is already explicitly specified as a dependency of {}".format(case, self))
self._dependencies.add(case)
def getPotentialParentFromSettingValue(self, settingValue, filePattern):
"""
Get a parent case based on a setting value and a pattern.
This is a convenient way for a plugin to express a dependency. It uses the
``match.groupdict`` functionality to pull the directory and case name out of a
specific setting value an regular expression.
Parameters
----------
settingValue : str
A particular setting value that might contain a reference to an input that
is produced by a dependency.
filePattern : str
A regular expression for extracting the location and name of the dependency.
If the ``settingValue`` matches the passed pattern, this function will
attempt to extract the ``dirName`` and ``title`` groups to find the dependency.
"""
m = re.match(filePattern, settingValue, re.IGNORECASE)
deps = self._getPotentialDependencies(**m.groupdict()) if m else set()
if len(deps) > 1:
raise KeyError("Found more than one case matching {}".format(settingValue))
return deps
def _getPotentialDependencies(self, dirName, title):
"""Get a parent case based on a directory and case title."""
if dirName is None:
dirName = self.directory
elif not os.path.isabs(dirName):
dirName = os.path.join(self.directory, dirName)
def caseMatches(case):
if os.path.normcase(case.title) != os.path.normcase(title):
return False
return os.path.normcase(os.path.abspath(case.directory)) == os.path.normcase(os.path.abspath(dirName))
return {case for case in self._caseSuite if caseMatches(case)}
@property
def title(self):
"""The case title."""
return self.cs.caseTitle
@title.setter
def title(self, name):
self.cs.caseTitle = name
@property
def dbName(self):
"""The case output database name."""
return os.path.splitext(self.cs.path)[0] + ".h5"
@property
def directory(self):
"""The working directory of the case."""
return self.cs.inputDirectory
def __eq__(self, that):
"""
Compares two cases to determine if they are equivalent by looking at the ``title`` and
``directory``.
Notes
-----
No other attributes except those stated above are used for the comparison; the above stated
attributes can be considered the "primary key" for a Case object and identify it as being
unique. Both of these comparisons are simple string comparisons, so a reference and an
absolute path to the same case would be considered different.
"""
return self.title == that.title and self.directory == that.directory
def __hash__(self):
"""Computes the hash of a Case object.
This is required when __eq__ is been defined. Take the hash of the tuple of the "primary key".
"""
return hash((self.title, self.directory))
def setUpTaskDependence(self):
"""
Set the task dependence based on the :code:`dependencies`.
This accounts for whether or not the dependency is enabled.
"""
if not self.enabled:
return
for dependency in self.dependencies:
if dependency.enabled:
self._tasks[0].add_parent(dependency._tasks[-1])
def run(self):
"""
Run an ARMI case.
.. impl:: The case class allows for a generic ARMI simulation.
:id: I_ARMI_CASE
:implements: R_ARMI_CASE
This method is responsible for "running" the ARMI simulation instigated by the inputted
settings. This initializes an :py:class:`~armi.operators.operator.Operator`, a
:py:class:`~armi.reactor.reactors.Reactor` and invokes
:py:meth:`Operator.operate `. It also
activates supervisory things like code coverage checking, profiling, or tracing, if
requested by users during debugging.
Notes
-----
Room for improvement: The coverage, profiling, etc. stuff can probably be moved out of here
to a more elegant place (like a context manager?).
"""
# Start the log here so that the verbosities for the head and workers can be configured
# based on the user settings for the rest of the run.
runLog.LOG.startLog(self.cs.caseTitle)
if context.MPI_RANK == 0:
runLog.setVerbosity(self.cs["verbosity"])
else:
runLog.setVerbosity(self.cs["branchVerbosity"])
# if in the settings, start the coverage and profiling
cov = self._startCoverage()
profiler = self._startProfiling()
self.checkInputs()
o = self.initializeOperator()
with o:
if self.cs["trace"] and context.MPI_RANK == 0:
# only trace primary node.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix], trace=1)
tracer.runctx("o.operate()", globals(), locals())
else:
o.operate()
# if in the settings, report the coverage and profiling
Case._endCoverage(self.cs["coverageConfigFile"], cov)
Case._endProfiling(profiler)
def _startCoverage(self):
"""Helper to the Case.run: spin up the code coverage tooling, if the Settings file says to.
Returns
-------
coverage.Coverage
Coverage object for pytest or unittest
"""
cov = None
if self.cs["coverage"]:
cov = coverage.Coverage(
config_file=Case._getCoverageRcFile(userCovFile=self.cs["coverageConfigFile"], makeCopy=True),
debug=["dataio"],
)
if context.MPI_SIZE > 1:
# interestingly, you cannot set the parallel flag in the constructor without
# auto-specifying the data suffix. This should enable parallel coverage with
# auto-generated data file suffixes and combinations.
cov.config.parallel = True
cov.start()
return cov
@staticmethod
def _endCoverage(userCovFile, cov=None):
"""Helper to the Case.run(): stop and report code coverage, if the Settings file says to.
Parameters
----------
userCovFile : str
File path to user-supplied coverage configuration file (default setting is empty string)
cov: coverage.Coverage (optional)
Hopefully, a valid and non-empty set of coverage data.
"""
if cov is None:
return
cov.stop()
cov.save()
if context.MPI_SIZE > 1:
context.MPI_COMM.barrier() # force waiting for everyone to finish
if context.MPI_RANK == 0 and context.MPI_SIZE > 1:
# combine all the parallel coverage data files into one and make the XML and HTML
# reports for the whole run.
combinedCoverage = coverage.Coverage(config_file=Case._getCoverageRcFile(userCovFile), debug=["dataio"])
combinedCoverage.config.parallel = True
# combine does delete the files it merges
combinedCoverage.combine()
combinedCoverage.save()
combinedCoverage.html_report()
combinedCoverage.xml_report()
@staticmethod
def _getCoverageRcFile(userCovFile, makeCopy=False):
"""Helper to provide the coverage configuration file according to the OS. A user-supplied
file will take precedence, and is not checked for a dot-filename.
Notes
-----
ARMI replaced the ".coveragerc" file has been replaced by "pyproject.toml".
Parameters
----------
userCovFile : str
File path to user-supplied coverage configuration file (default setting is empty string)
makeCopy : bool (optional)
Whether or not to copy the coverage config file to an alternate file path
Returns
-------
covFile : str
path of pyprojec.toml file
"""
# User-defined file takes precedence.
if userCovFile:
return os.path.abspath(userCovFile)
covRcDir = os.path.abspath(context.PROJECT_ROOT)
return os.path.join(covRcDir, "pyproject.toml")
def _startProfiling(self):
"""Helper to the Case.run(): start the Python profiling, if the Settings file says to.
Returns
-------
cProfile.Profile
Standard Python profiling object
"""
profiler = None
if self.cs["profile"]:
profiler = cProfile.Profile()
profiler.enable(subcalls=True, builtins=True)
return profiler
@staticmethod
def _endProfiling(profiler=None):
"""Helper to the Case.run(): stop and report python profiling,
if the Settings file says to.
Parameters
----------
profiler: cProfile.Profile (optional)
Hopefully, a valid and non-empty set of profiling data.
"""
if profiler is None:
return
profiler.disable()
profiler.dump_stats("profiler.{:0>3}.stats".format(context.MPI_RANK))
statsStream = io.StringIO()
summary = pstats.Stats(profiler, stream=statsStream).sort_stats("cumulative")
summary.print_stats()
if context.MPI_SIZE > 0 and context.MPI_COMM is not None:
allStats = context.MPI_COMM.gather(statsStream.getvalue(), root=0)
if context.MPI_RANK == 0:
for rank, statsString in enumerate(allStats):
# using print statements because the logger has been turned off
print("=" * 100)
print("{:^100}".format(" Profiler statistics for RANK={} ".format(rank)))
print(statsString)
print("=" * 100)
else:
print(statsStream.getvalue())
def initializeOperator(self, r=None):
"""Creates and returns an Operator."""
with DirectoryChanger(self.cs.inputDirectory, dumpOnException=False):
self._initBurnChain()
o = operators.factory(self.cs)
if r is None:
r = reactors.factory(self.cs, self.bp)
o.initializeInterfaces(r)
# Set this here to make sure the full duration of initialization is properly captured.
# Cannot be done in reactors since the above self.bp call implicitly initializes blueprints.
r.core.timeOfStart = self._startTime
return o
def _initBurnChain(self):
"""
Apply the burn chain setting to the nucDir.
Notes
-----
This is admittedly an odd place for this but the burn chain info must be applied sometime
after user-input has been loaded (for custom burn chains) but not long after (because nucDir
is framework-level and expected to be up-to-date by lots of modules).
"""
if not self.cs["initializeBurnChain"]:
runLog.info("Skipping burn-chain initialization since `initializeBurnChain` setting is disabled.")
return
if not os.path.exists(self.cs["burnChainFileName"]):
raise ValueError(
f"The burn-chain file {self.cs['burnChainFileName']} does not exist. The "
"data cannot be loaded. Fix this path or disable burn-chain initialization using "
"the `initializeBurnChain` setting."
)
with open(self.cs["burnChainFileName"]) as burnChainStream:
nuclideBases.imposeBurnChain(burnChainStream)
def checkInputs(self):
"""
Checks ARMI inputs for consistency.
.. impl:: Perform validity checks on case inputs.
:id: I_ARMI_CASE_CHECK
:implements: R_ARMI_CASE_CHECK
This method checks the validity of the current settings. It relies on an
:py:class:`~armi.settings.settingsValidation.Inspector` object from the
:py:class:`~armi.operators.operator.Operator` to generate a list of
:py:class:`~armi.settings.settingsValidation.Query` objects that represent potential
issues in the settings. After gathering the queries, this method prints a table of query
"statements" and "questions" to the console. If running in an interactive mode, the user
then has the opportunity to address the questions posed by the queries by either
addressing the potential issue or ignoring it.
Returns
-------
bool
True if the inputs are all good, False otherwise
"""
runLog.header("=========== Settings Validation Checks ===========")
with DirectoryChanger(self.cs.inputDirectory, dumpOnException=False):
operatorClass = operators.getOperatorClassFromSettings(self.cs)
inspector = operatorClass.inspector(self.cs)
inspectorIssues = [query for query in inspector.queries if query]
# Write out the settings validation issues that will be prompted for resolution if in an
# interactive session or forced to be resolved otherwise.
queryData = []
for i, query in enumerate(inspectorIssues, start=1):
queryData.append(
(
i,
textwrap.fill(query.statement, width=50, break_long_words=False),
textwrap.fill(query.question, width=50, break_long_words=False),
)
)
if queryData and context.MPI_RANK == 0:
runLog.info(
tabulate.tabulate(
queryData,
headers=["Number", "Statement", "Question"],
tableFmt="armi",
)
)
if context.CURRENT_MODE == context.Mode.INTERACTIVE:
# if interactive, ask user to deal with settings issues
inspector.run()
return not any(inspectorIssues)
def clone(
self,
additionalFiles=None,
title=None,
modifiedSettings=None,
writeStyle="short",
):
"""
Clone existing ARMI inputs to current directory with optional settings modifications.
Since each case depends on multiple inputs, this is a safer way to move cases around without
having to wonder if you copied all the files appropriately.
Parameters
----------
additionalFiles : list (optional)
additional file paths to copy to cloned case
title : str (optional)
title of new case
modifiedSettings : dict (optional)
settings to set/modify before creating the cloned case
writeStyle : str (optional)
Writing style for which settings get written back to the settings files
(short, medium, or full).
Raises
------
RuntimeError
If the source and destination are the same
"""
cloneCS = self.cs.duplicate()
if modifiedSettings is not None:
cloneCS = cloneCS.modified(newSettings=modifiedSettings)
clone = Case(cloneCS)
clone.cs.path = pathTools.armiAbsPath(title or self.title) + ".yaml"
if pathTools.armiAbsPath(clone.cs.path) == pathTools.armiAbsPath(self.cs.path):
raise RuntimeError(
"The source file and destination file are the same: {}\nCannot use armi-clone to "
"modify armi settings file.".format(pathTools.armiAbsPath(clone.cs.path))
)
newSettings = copyInterfaceInputs(self.cs, clone.cs.inputDirectory)
newCs = clone.cs.modified(newSettings=newSettings)
clone.cs = newCs
runLog.important(f"writing settings file {clone.cs.path}")
clone.cs.writeToYamlFile(clone.cs.path, style=writeStyle, fromFile=self.cs.path)
runLog.important(f"finished writing {clone.cs}")
fromPath = lambda f: pathTools.armiAbsPath(self.cs.inputDirectory, f)
fileName = self.cs[CONF_LOADING_FILE]
if fileName:
pathTools.copyOrWarn(
CONF_LOADING_FILE,
fromPath(fileName),
os.path.join(clone.cs.inputDirectory, fileName),
)
else:
runLog.warning(f"skipping {CONF_LOADING_FILE}, there is no file specified")
with open(self.cs[CONF_LOADING_FILE], "r") as f:
# The root for handling YAML includes is relative to the YAML file, not the
# settings file
root = pathlib.Path(self.cs.inputDirectory) / pathlib.Path(self.cs[CONF_LOADING_FILE]).parent
cloneRoot = pathlib.Path(clone.cs.inputDirectory) / pathlib.Path(clone.cs[CONF_LOADING_FILE]).parent
for includePath, mark in textProcessors.findYamlInclusions(f, root=root):
if not includePath.is_absolute():
includeSrc = root / includePath
includeDest = cloneRoot / includePath
else:
# don't bother copying absolute files
continue
if not includeSrc.exists():
raise OSError("The input file file `{}` referenced at {} does not exist.".format(includeSrc, mark))
pathTools.copyOrWarn(
"auxiliary input file `{}` referenced at {}".format(includeSrc, mark),
includeSrc,
includeDest,
)
for fileName in additionalFiles or []:
pathTools.copyOrWarn("additional file", fromPath(fileName), clone.cs.inputDirectory)
return clone
def compare(
self,
that,
exclusion: Optional[Sequence[str]] = None,
tolerance=0.01,
timestepCompare=None,
) -> int:
"""
Compare the output databases from two run cases. Return number of differences.
This is useful both for in-use testing and engineering analysis.
"""
runLog.info("Comparing the following databases:\nREF: {}\nSRC: {}".format(self.dbName, that.dbName))
diffResults = compareDatabases(
self.dbName,
that.dbName,
tolerance=tolerance,
exclusions=exclusion,
timestepCompare=timestepCompare,
)
code = 1 if diffResults is None else diffResults.nDiffs()
sameOrDifferent = "different" if diffResults is None or diffResults.nDiffs() > 0 else "the same"
runLog.important("Cases are {}.".format(sameOrDifferent))
return code
def writeInputs(self, sourceDir: Optional[str] = None, writeStyle: Optional[str] = "short"):
"""
Write the inputs to disk.
This allows input objects that have been modified in memory (e.g. for a parameter sweep or
migration) to be written out as input for a forthcoming case.
Parameters
----------
sourceDir : str (optional)
The path to copy inputs from (if different from the cs.path). Needed
in SuiteBuilder cases to find the baseline inputs from plugins (e.g. shuffleLogic)
writeStyle : str (optional)
Writing style for which settings get written back to the settings files
(short, medium, or full).
Notes
-----
This will rename the ``loadingFile`` to ``title-blueprints + '.yaml'``.
See Also
--------
independentVariables
parses/reads the independentVariables setting
clone
Similar to this but doesn't let you write out new/modified blueprints objects
"""
with ForcedCreationDirectoryChanger(self.cs.inputDirectory, dumpOnException=False):
# These seemingly no-ops load the bp via properties if they are not yet initialized.
self.bp
newSettings = {}
newSettings[CONF_LOADING_FILE] = self.title + "-blueprints.yaml"
if self.independentVariables:
newSettings["independentVariables"] = [
f"({repr(varName)}, {repr(val)})" for varName, val in self.independentVariables.items()
]
with open(newSettings[CONF_LOADING_FILE], "w") as loadingFile:
blueprints.Blueprints.dump(self.bp, loadingFile)
# copy input files from other modules/plugins
interfaceSettings = copyInterfaceInputs(self.cs, ".", sourceDir)
for settingName, value in interfaceSettings.items():
newSettings[settingName] = value
self.cs = self.cs.modified(newSettings=newSettings)
if sourceDir:
fromPath = os.path.join(sourceDir, self.title + ".yaml")
else:
fromPath = self.cs.path
self.cs.writeToYamlFile(f"{self.title}.yaml", style=writeStyle, fromFile=fromPath)
def _copyInputsHelper(fileDescription: str, sourcePath: str, destPath: str, origFile: str) -> str:
"""
Helper function for copyInterfaceInputs: Creates an absolute file path, and copies the file to
that location. If that file path does not exist, returns the file path from the original
settings file.
Parameters
----------
fileDescription : str
A file description for the copyOrWarn method
sourcePath : str
The absolute file path of the file to copy
destPath : str
The target directory to copy input files to
origFile : str
File path as defined in the original settings file
Returns
-------
destFilePath (or origFile) : str
"""
sourceName = pathlib.Path(sourcePath).name
destFilePath = os.path.join(destPath, sourceName)
try:
pathTools.copyOrWarn(fileDescription, sourcePath, destFilePath)
if pathlib.Path(destFilePath).exists():
# the basename gets written back to the settings file to protect against potential
# future dir structure changes
return os.path.basename(destFilePath)
else:
# keep original filepath in the settings file if file copy was unsuccessful
return origFile
except Exception:
return origFile
def copyInterfaceInputs(cs, destination: str, sourceDir: Optional[str] = None) -> Dict[str, Union[str, list]]:
"""
Ping active interfaces to determine which files are considered "input". This enables developers
to add new inputs in a plugin-dependent/ modular way.
This function should now be able to handle the updating of:
- a single file (relative or absolute)
- a list of files (relative or absolute)
- a file entry that has a wildcard processing into multiple files. Glob is used to offer
support for wildcards.
- a directory and its contents
If the file paths are absolute, do nothing. The case will be able to find the file.
In case suites or parameter sweeps, these files often have a sourceDir associated with them that
is different from the cs.inputDirectory. So, if relative or wildcard, update the file paths to
be absolute in the case settings and copy the file to the destination directory.
Parameters
----------
cs : Settings
The source case settings to find input files
destination : str
The target directory to copy input files to
sourceDir : str, optional
The directory from which to copy files. Defaults to cs.inputDirectory
Returns
-------
dict
A new settings object that contains settings for the keys and values that are either an
absolute file path, a list of absolute file paths, or the original file path if absolute
paths could not be resolved.
Notes
-----
Regarding the handling of relative file paths: In the future this could be simplified by adding
a concept for a suite root directory, below which it is safe to copy files without needing to
update settings that point with a relative path to files that are below it.
"""
activeInterfaces = interfaces.getActiveInterfaceInfo(cs)
sourceDir = sourceDir or cs.inputDirectory
sourceDirPath = pathlib.Path(sourceDir)
assert pathlib.Path(destination).is_dir()
newSettings = {}
for klass, _ in activeInterfaces:
interfaceFileNames = klass.specifyInputs(cs)
for key, files in interfaceFileNames.items():
if not isinstance(key, settings.Setting):
try:
key = cs.getSetting(key)
label = key.name
isSetting = True
except NonexistentSetting(key):
runLog.debug(f"{key} is not a valid setting; continuing on anyway.")
label = key
isSetting = False
else:
isSetting = True
label = key.name
newFiles = []
for f in files:
WILDCARD = False
EMPTY = False
ABSOLUTE = False
if "*" in f:
WILDCARD = True
if not f:
# beware: pathlib.path("") returns "." which can be bad news, so we handle empty
# strings as their own category
EMPTY = True
path = pathlib.Path(f)
if not EMPTY and path.is_absolute():
ABSOLUTE = True
# Attempt to construct an absolute file path
srcFullPath = os.path.join(sourceDirPath, f)
destFilePath = None
if WILDCARD:
globFilePaths = [pathlib.Path(os.path.join(sourceDirPath, g)) for g in glob.glob(srcFullPath)]
if len(globFilePaths) == 0:
destFilePath = f
newFiles.append(str(destFilePath))
else:
for gFile in globFilePaths:
destFilePath = _copyInputsHelper(label, gFile, destination, f)
newFiles.append(str(destFilePath))
elif EMPTY:
pass
elif ABSOLUTE:
if path.exists():
# Path is absolute, no settings modification or filecopy needed
newFiles.append(path)
else:
# treat as a relative path
destFilePath = _copyInputsHelper(label, srcFullPath, destination, f)
newFiles.append(str(destFilePath))
if destFilePath == f:
runLog.debug(
f"No input files for `{label}` could be resolved with the following path: "
f"`{srcFullPath}`. Will not update `{label}`."
)
# Some settings are a single filename. Others are lists of files. Make
# sure we are returning what the setting expects
if isSetting and len(newFiles):
if len(files) == 1 and not WILDCARD and key.name in cs and not isinstance(cs[key.name], list):
newSettings[label] = newFiles[0]
else:
newSettings[label] = newFiles
return newSettings
================================================
FILE: armi/cases/inputModifiers/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code that changes input files and writes them back out. Useful for parameter sweeps.
See Also
--------
armi.reactor.converters
Code that changes reactor objects at runtime. These often take longer to run than
these but can be used in the middle of ARMI analyses.
"""
================================================
FILE: armi/cases/inputModifiers/inputModifiers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modifies inputs."""
class InputModifier:
"""
Object that modifies input definitions in some well-defined way.
.. impl:: A generic tool to modify user inputs on multiple cases.
:id: I_ARMI_CASE_MOD1
:implements: R_ARMI_CASE_MOD
This class serves as an abstract base class for modifying the inputs of a case, typically
case settings. Child classes must implement a ``__call__`` method accepting a
:py:class:`~armi.settings.caseSettings.Settings` and
:py:class:`~armi.reactor.blueprints.Blueprints` and return the appropriately modified
version of these objects. The class attribute ``FAIL_IF_AFTER`` should be a tuple defining
what, if any, modifications this should fail if performed after. For example, one should not
adjust the smear density (a function of Cladding ID) before adjusting the Cladding ID. Some
generic child classes are provided in this module, but it is expected that design-specific
modifiers are built individually.
"""
FAIL_IF_AFTER = ()
def __init__(self, independentVariable=None):
"""
Constructor.
Parameters
----------
independentVariable : dict or None, optional
Name/value pairs to associate with the independent variable being modified by this
object. Will be analyzed and plotted against other modifiers with the same name.
"""
if independentVariable is None:
independentVariable = {}
self.independentVariable = independentVariable
def __call__(self, cs, bp):
"""Perform the desired modifications to input objects."""
raise NotImplementedError
class SamplingInputModifier(InputModifier):
"""
Object that modifies input definitions in some well-defined way.
(This class is abstract.)
Subclasses must implement a ``__call__`` method accepting a ``Settings``,
``Blueprints``, and ``SystemLayoutInput``.
This is a modified version of the InputModifier abstract class that imposes structure for
parameters in a design space that will be sampled by a quasi-random sampling algorithm. These
algorithms require input modifiers to specify if the parameter is continuous or discrete and
have the bounds specified.
"""
def __init__(self, name: str, paramType: str, bounds: list, independentVariable=None):
"""Constructor for the Sampling input modifier.
Parameters
----------
name: str
Name of input modifier.
paramType : str
specify if parameter is 'continuous' or 'discrete'
bounds : list
If continuous, provide floating points [a, b] specifying the inclusive bounds.
If discrete, provide a list of potential values [a, b, c, ...]
independentVariable : [type], optional
Name/value pairs to associate with the independent variable being modified
by this object. Will be analyzed and plotted against other modifiers with
the same name, by default None
"""
InputModifier.__init__(self, independentVariable=independentVariable)
self.name = name
self.paramType = paramType
self.bounds = bounds
def __call__(self, cs, blueprints):
"""Perform the desired modifications to input objects."""
raise NotImplementedError
class FullCoreModifier(InputModifier):
"""
Grow the SystemLayoutInput to from a symmetric core to a full core.
Notes
-----
Besides the Core, other grids may also be of interest for expansion, like a grid that defines
fuel management. However, the expansion of a fuel management schedule to full core is less
trivial than just expanding the core itself. Thus, this modifier currently does not attempt to
update fuel management grids, but an expanded implementation could do so in the future if
needed. For now, users must expand fuel management grids to full core themself.
"""
def __call__(self, cs, bp):
coreBp = bp.gridDesigns["core"]
coreBp.expandToFull()
return cs, bp
class SettingsModifier(InputModifier):
"""Adjust setting to specified value."""
def __init__(self, settingName, value):
InputModifier.__init__(self, independentVariable={settingName: value})
self.settingName = settingName
self.value = value
def __call__(self, cs, bp):
cs = cs.modified(newSettings={self.settingName: self.value})
return cs, bp
class MultiSettingModifier(InputModifier):
"""
Adjust multiple settings to specified values.
Examples
--------
>>> inputModifiers.MultiSettingModifier({CONF_NEUTRONICS_TYPE: "both", CONF_COARSE_MESH_REBALANCE: -1})
"""
def __init__(self, settingVals: dict):
InputModifier.__init__(self, independentVariable=settingVals)
self.settings = settingVals
def __call__(self, cs, bp):
newSettings = {}
for name, val in self.settings.items():
newSettings[name] = val
cs = cs.modified(newSettings=newSettings)
return cs, bp
class BluePrintBlockModifier(InputModifier):
"""Adjust blueprint block->component->dimension to specified value."""
def __init__(self, block, component, dimension, value):
InputModifier.__init__(self, independentVariable={dimension: value})
self.block = block
self.component = component
self.dimension = dimension
self.value = value
def __call__(self, cs, bp):
# parse block
for blockDesign in bp.blockDesigns:
if blockDesign.name == self.block:
# parse component
for componentDesign in blockDesign:
if componentDesign.name == self.component:
# set new value
setattr(componentDesign, self.dimension, self.value)
return cs, bp
return cs, bp
================================================
FILE: armi/cases/inputModifiers/neutronicsModifiers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modifies inputs related to neutronics controls.
Notes
-----
This may make more sense in the neutronics physics plugin.
"""
from armi.cases.inputModifiers import inputModifiers
from armi.physics.neutronics.settings import (
CONF_EPS_EIG,
CONF_EPS_FSAVG,
CONF_EPS_FSPOINT,
)
class NeutronicConvergenceModifier(inputModifiers.InputModifier):
"""
Adjust the neutronics convergence parameters ``CONF_EPS_EIG``, ``CONF_EPS_FSAVG``, and
``CONF_EPS_FSPOINT``.
The supplied value is used for ``CONF_EPS_EIG``. ``CONF_EPS_FSAVG`` and ``CONF_EPS_FSPOINT`` are
set to 100 times the supplied value.
This can be used to perform sensitivity studies on convergence criteria.
"""
def __init__(self, value):
inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: value})
self.value = value
if value > 1e-2 or value <= 0.0:
raise ValueError(
f"Neutronic convergence modifier value must be greater than 0 and less than 1e-2 (got {value})"
)
def __call__(self, cs, bp):
newSettings = {}
newSettings[CONF_EPS_FSAVG] = self.value * 100
newSettings[CONF_EPS_FSPOINT] = self.value * 100
newSettings[CONF_EPS_EIG] = self.value
cs = cs.modified(newSettings=newSettings)
return cs, bp
class NeutronicMeshsSizeModifier(inputModifiers.InputModifier):
"""
Adjust the neutronics mesh in all assemblies by a multiplication factor.
This can be useful when switching between nodal and finite difference approximations, or when
doing mesh convergence sensitivity studies.
Attributes
----------
multFactor : int
Factor to multiply the number of axial mesh points per block by.
"""
def __init__(self, multFactor):
inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: multFactor})
if not isinstance(multFactor, int):
raise TypeError("multFactor must be an integer, but got {}".format(multFactor))
self.multFactor = multFactor
def __call__(self, cs, bp):
for assemDesign in bp.assemDesigns:
assemDesign.axialMeshPoints = [ax * self.multFactor for ax in assemDesign.axialMeshPoints]
return cs, bp
================================================
FILE: armi/cases/inputModifiers/pinTypeInputModifiers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from armi.cases.inputModifiers import inputModifiers
from armi.reactor import flags
from armi.reactor.components import component
from armi.reactor.converters import pinTypeBlockConverters
class _PinTypeAssemblyModifier(inputModifiers.InputModifier):
"""
Abstract class for modifying something about a pin, within a block.
This will construct blocks, determine if the block should be modified by checking
the ``_getBlockTypesToModify``, and then run ``_adjustBlock(b)``. The ``Blueprints``
are then updated based on the modification assuming that dimension names match
exactly to ComponenBlueprint attributes (which is true, because ComponentBlueprint
attributes are programmatically derived from Component constructors).
"""
def __init__(self, value):
inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: value})
self.value = value
def __call__(self, cs, bp):
for bDesign in bp.blockDesigns:
# bDesign construct requires lots of arguments, many of which have no impact.
# The following can safely be defaulted to meaningless inputs:
# axialIndex: a block can be reused at any axial index, modifications made
# dependent on will not translate back to the input in a meaningful
# fashion
# axialMeshPoints: similar to above, this is specified by the assembly, and
# a block can be within any section of an assembly.
# height: similar to above. a block can have any height specified by an
# assembly. if height-specific modifications are required, then a new
# block definition should be created in the input
# xsType: similar to above. a block can have any xsType specified through
# the assembly definition assembly. if xsType-specific modifications are
# required, then a new block definition should be created in the input
# materialInput: this is the materialModifications from the assembly
# definition. if material modifications are required on a block-specific
# basis, they should be edited directly
b = bDesign.construct(
cs,
bp,
axialIndex=1,
axialMeshPoints=1,
height=1,
xsType="A",
materialInput={},
)
if not b.hasFlags(self._getBlockTypesToModify()):
continue
self._adjustBlock(b)
for cDesign, c in zip(bDesign, b):
for dimName in c.DIMENSION_NAMES:
inpDim = getattr(cDesign, dimName)
newDim = getattr(c.p, dimName)
if isinstance(newDim, tuple):
# map linked component dimension
link = component._DimensionLink(newDim)
newDim = str(link)
if inpDim != newDim:
setattr(cDesign, dimName, newDim)
return cs, bp
def _getBlockTypesToModify(self):
"""Hook method to determine blocks that should be modified."""
raise NotImplementedError
def _adjustBlock(self, b):
"""Hook method for `__call__` template method."""
raise NotImplementedError
class SmearDensityModifier(_PinTypeAssemblyModifier):
"""
Adjust the smeared density to the specified value.
This is effectively how much of the space inside the cladding tube is occupied by
fuel at fabrication.
"""
def _getBlockTypesToModify(self):
"""Hook method to determine blocks that should be modified."""
return flags.Flags.FUEL
def _adjustBlock(self, b):
"""Hook method for `__call__` template method."""
pinTypeBlockConverters.adjustSmearDensity(b, self.value)
class CladThicknessByODModifier(_PinTypeAssemblyModifier):
"""Adjust the cladding thickness by adjusting the inner diameter of all cladding components."""
FAIL_IF_AFTER = (SmearDensityModifier,)
def _getBlockTypesToModify(self):
"""Hook method to determine blocks that should be modified."""
return ""
def _adjustBlock(self, b):
pinTypeBlockConverters.adjustCladThicknessByOD(b, self.value)
class CladThicknessByIDModifier(_PinTypeAssemblyModifier):
"""Adjust the cladding thickness by adjusting the outer diameter of the cladding component."""
FAIL_IF_AFTER = (SmearDensityModifier,)
def _getBlockTypesToModify(self):
"""Hook method to determine blocks that should be modified."""
return ""
def _adjustBlock(self, b):
pinTypeBlockConverters.adjustCladThicknessByID(b, self.value)
================================================
FILE: armi/cases/inputModifiers/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/cases/inputModifiers/tests/test_inputModifiers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for input modifiers."""
import os
import unittest
from ruamel import yaml
from armi import cases, settings
from armi.cases import suiteBuilder
from armi.cases.inputModifiers import (
inputModifiers,
neutronicsModifiers,
pinTypeInputModifiers,
)
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FP_MODEL,
)
from armi.physics.neutronics.settings import (
CONF_EPS_EIG,
CONF_EPS_FSAVG,
CONF_EPS_FSPOINT,
)
from armi.reactor import blueprints
from armi.reactor.tests import test_reactors
from armi.utils import directoryChangers
FLAGS_INPUT = """nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
MN: {burn: false, xs: true}
FE: {burn: false, xs: true}
SI: {burn: false, xs: true}
C: {burn: false, xs: true}
CR: {burn: false, xs: true}
MO: {burn: false, xs: true}
NI: {burn: false, xs: true}
V: {burn: false, xs: true}
W: {burn: false, xs: true}"""
CLAD = """clad: &fuel_1_clad
Tinput: 350.0
Thot: 350.0
shape: circle
id: 1.0
od: 1.1
material: HT9"""
CLAD_LINKED = """clad: &fuel_1_clad
Tinput: 350.0
Thot: 350.0
shape: circle
id: fuel.od
od: 1.1
material: HT9"""
BLOCKS_INPUT = """blocks:
fuel 1: &fuel_1
fuel: &fuel_1_fuel
Tinput: 350.0
Thot: 350.0
shape: circle
id: 0.0
od: 0.5
material: UZr
{clad}
hex: &fuel_1_hex
Tinput: 350.0
Thot: 350.0
shape: hexagon
ip: 1.0
op: 10.0
material: HT9
fuel 2: *fuel_1
block 3: *fuel_1 # non-fuel blocks
block 4: {{<<: *fuel_1}} # non-fuel blocks
block 5: {{fuel: *fuel_1_fuel, clad: *fuel_1_clad, hex: *fuel_1_hex}} # non-fuel blocks"""
BLOCKS_INPUT_1 = BLOCKS_INPUT.format(clad=CLAD)
BLOCKS_INPUT_2 = BLOCKS_INPUT.format(clad=CLAD_LINKED)
BLUEPRINT_INPUT = f"""
{FLAGS_INPUT}
{BLOCKS_INPUT_1}
assemblies: {{}}
"""
BLUEPRINT_INPUT_LINKS = f"""
{FLAGS_INPUT}
{BLOCKS_INPUT_2}
assemblies: {{}}
"""
CORE_INPUT = """
systems:
core:
grid name: core
origin:
x: 0.0
y: 0.0
z: 0.0
grids:
core:
geom: hex
symmetry: third core periodic
grid contents:
[0, 0]: A1
[1, 0]: A2
[1, 1]: A3
[2, -2]: A4
[2, -1]: A5
[2, 0]: A6
[2, 1]: A7
[2, 2]: A8
"""
class TestsuiteBuilderIntegrations(unittest.TestCase):
@classmethod
def setUpClass(cls):
bp = blueprints.Blueprints.load(BLUEPRINT_INPUT_LINKS + CORE_INPUT)
cs = settings.Settings()
bp._prepConstruction(cs)
cls.baseCase = cases.Case(cs=cs, bp=bp)
def test_smearDensityFail(self):
builder = suiteBuilder.FullFactorialSuiteBuilder(self.baseCase)
builder.addDegreeOfFreedom(pinTypeInputModifiers.SmearDensityModifier(v) for v in (0.5, 0.6))
builder.addDegreeOfFreedom(pinTypeInputModifiers.CladThicknessByIDModifier(v) for v in (0.05, 0.01))
self.assertEqual(4, len(builder))
with self.assertRaisesRegex(RuntimeError, "before .*SmearDensityModifier"):
builder.buildSuite()
def test_settingsModifier(self):
builder = suiteBuilder.SeparateEffectsSuiteBuilder(self.baseCase)
builder.addDegreeOfFreedom(
inputModifiers.SettingsModifier(CONF_FP_MODEL, v) for v in ("noFissionProducts", "infinitelyDilute", "MO99")
)
builder.addDegreeOfFreedom(inputModifiers.SettingsModifier("detailedAxialExpansion", v) for v in (True,))
builder.addDegreeOfFreedom(
inputModifiers.SettingsModifier("buGroups", v)
for v in (
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 100],
[3, 5, 7, 9, 10, 20, 100],
[3, 5, 10, 15, 20, 100],
)
)
builder.addDegreeOfFreedom((inputModifiers.FullCoreModifier(),))
with directoryChangers.TemporaryDirectoryChanger():
suite = builder.buildSuite()
for c in suite:
c.writeInputs()
self.assertTrue(os.path.exists("case-suite"))
def test_bluePrintBlockModifier(self):
"""Test BluePrintBlockModifier with build suite naming function argument."""
case_nbr = 1
builder = suiteBuilder.FullFactorialSuiteBuilder(self.baseCase)
builder.addDegreeOfFreedom(
[inputModifiers.BluePrintBlockModifier("fuel 1", "clad", "od", float("{:.2f}".format(22 / 7)))]
)
builder.addDegreeOfFreedom([inputModifiers.BluePrintBlockModifier("block 5", "clad", "od", 3.14159)])
def SuiteNaming(index, _case, _mods):
uniquePart = "{:0>4}".format(index + case_nbr)
return os.path.join(
".",
"case-suite-testBPBM",
uniquePart,
self.baseCase.title + "-" + uniquePart,
)
with directoryChangers.TemporaryDirectoryChanger():
suite = builder.buildSuite(namingFunc=SuiteNaming)
suite.writeInputs()
self.assertTrue(os.path.exists("case-suite-testBPBM"))
yamlfile = open(
f"case-suite-testBPBM/000{case_nbr}/armi-000{case_nbr}-blueprints.yaml",
"r",
)
bp_dict = yaml.YAML().load(yamlfile)
yamlfile.close()
self.assertEqual(bp_dict["blocks"]["fuel 1"]["clad"]["od"], 3.14)
self.assertEqual(bp_dict["blocks"]["block 5"]["clad"]["od"], 3.14159)
class TestSettingsModifiers(unittest.TestCase):
def test_NeutronicConvergenceModifier(self):
cs = settings.Settings()
with self.assertRaises(ValueError):
_ = neutronicsModifiers.NeutronicConvergenceModifier(0.0)
with self.assertRaises(ValueError):
_ = neutronicsModifiers.NeutronicConvergenceModifier(1e-2 + 1e-15)
cs, _ = neutronicsModifiers.NeutronicConvergenceModifier(1e-2)(cs, None)
self.assertAlmostEqual(cs[CONF_EPS_EIG], 1e-2)
self.assertAlmostEqual(cs[CONF_EPS_FSAVG], 1.0)
self.assertAlmostEqual(cs[CONF_EPS_FSPOINT], 1.0)
class NeutronicsKernelOpts(inputModifiers.InputModifier):
def __init__(self, neutronicsKernelOpts):
inputModifiers.InputModifier.__init__(self)
self.neutronicsKernelOpts = neutronicsKernelOpts
def __call__(self, cs, bp):
cs = cs.modified(self.neutronicsKernelOpts)
return cs, bp
class TestFullCoreModifier(unittest.TestCase):
"""Ensure full core conversion works."""
def test_fullCoreConversion(self):
cs = settings.Settings(os.path.join(test_reactors.TEST_ROOT, "armiRun.yaml"))
case = cases.Case(cs=cs)
mod = inputModifiers.FullCoreModifier()
self.assertEqual(case.bp.gridDesigns["core"].symmetry, "third periodic")
case, case.bp = mod(case, case.bp)
self.assertEqual(case.bp.gridDesigns["core"].symmetry, "full")
def test_fullCoreConversionWithOrientation(self):
"""Tests modifying a reactor to full core that includes beginning of life orientations."""
cs = settings.Settings(os.path.join(test_reactors.TEST_ROOT, "armiRun.yaml"))
case = cases.Case(cs=cs)
mod = inputModifiers.FullCoreModifier()
self.assertEqual(case.bp.gridDesigns["core"].symmetry, "third periodic")
# Add beginning of life orientations
case.bp.gridDesigns["core"].orientationBOL = {(2, 1): 30.0}
# Modify to full core
case, case.bp = mod(case, case.bp)
# Check results
self.assertEqual(case.bp.gridDesigns["core"].symmetry, "full")
self.assertIn((2, 3), case.bp.gridDesigns["core"].orientationBOL)
self.assertEqual(150.0, case.bp.gridDesigns["core"].orientationBOL[(2, 3)])
self.assertIn((2, 5), case.bp.gridDesigns["core"].orientationBOL)
self.assertEqual(270.0, case.bp.gridDesigns["core"].orientationBOL[(2, 5)])
================================================
FILE: armi/cases/inputModifiers/tests/test_pinTypeInputModifiers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for input modifiers."""
import math
import unittest
from armi import settings
from armi.cases.inputModifiers import pinTypeInputModifiers
from armi.cases.inputModifiers.tests.test_inputModifiers import BLUEPRINT_INPUT
from armi.reactor import blueprints
class TestBlueprintModifiers(unittest.TestCase):
def setUp(self):
self.bp = blueprints.Blueprints.load(BLUEPRINT_INPUT)
self.bp._prepConstruction(settings.Settings())
def test_AdjustSmearDensity(self):
r"""
Compute the smear density where clad.id is 1.0.
.. math::
areaFuel = smearDensity * innerCladArea
fuelOD^2 / 4 = 0.5 * cladID^2 / 4
fuelOD = \sqrt{0.5}
Notes
-----
The area of fuel is 0.5 * inner area of clad.
"""
bp = self.bp
self.assertEqual(1.0, bp.blockDesigns["fuel 1"]["clad"].id)
self.assertEqual(0.5, bp.blockDesigns["fuel 1"]["fuel"].od)
self.assertEqual(0.5, bp.blockDesigns["fuel 2"]["fuel"].od)
self.assertEqual(0.5, bp.blockDesigns["block 3"]["fuel"].od)
self.assertEqual(0.5, bp.blockDesigns["block 4"]["fuel"].od)
self.assertEqual(0.5, bp.blockDesigns["block 5"]["fuel"].od)
pinTypeInputModifiers.SmearDensityModifier(0.5)(settings.Settings(), bp)
self.assertEqual(math.sqrt(0.5), bp.blockDesigns["fuel 1"]["fuel"].od)
self.assertEqual(math.sqrt(0.5), bp.blockDesigns["fuel 2"]["fuel"].od)
self.assertEqual(math.sqrt(0.5), bp.blockDesigns["block 3"]["fuel"].od)
self.assertEqual(math.sqrt(0.5), bp.blockDesigns["block 4"]["fuel"].od)
self.assertEqual(0.5, bp.blockDesigns["block 5"]["fuel"].od) # unique instance
def test_CladThickenessByODModifier(self):
"""
Adjust the clad thickness by outer diameter.
.. math::
cladThickness = (clad.od - clad.id) / 2
clad.od = 2 * cladThicness - clad.id
when ``clad.id = 1.0`` and ``cladThickness = 0.12``,
.. math::
clad.od = 2 * 0.12 - 1.0
clad.od = 1.24
"""
bp = self.bp
self.assertEqual(1.1, bp.blockDesigns["fuel 1"]["clad"].od)
self.assertEqual(1.1, bp.blockDesigns["fuel 2"]["clad"].od)
self.assertEqual(1.1, bp.blockDesigns["block 3"]["clad"].od)
self.assertEqual(1.1, bp.blockDesigns["block 4"]["clad"].od)
self.assertEqual(1.1, bp.blockDesigns["block 5"]["clad"].od)
pinTypeInputModifiers.CladThicknessByODModifier(0.12)(settings.Settings(), bp)
self.assertEqual(1.24, bp.blockDesigns["fuel 1"]["clad"].od)
self.assertEqual(1.24, bp.blockDesigns["fuel 2"]["clad"].od)
self.assertEqual(1.24, bp.blockDesigns["block 3"]["clad"].od)
self.assertEqual(1.24, bp.blockDesigns["block 4"]["clad"].od)
self.assertEqual(1.24, bp.blockDesigns["block 5"]["clad"].od) # modifies all blocks
def test_CladThickenessByIDModifier(self):
"""
Adjust the clad thickness by inner diameter.
.. math::
cladThickness = (clad.od - clad.id) / 2
clad.id = cladod - 2 * cladThicness
when ``clad.id = 1.1`` and ``cladThickness = 0.025``,
.. math::
clad.od = 1.1 - 2 * 0.025
clad.od = 1.05
"""
bp = self.bp
self.assertEqual(1.0, bp.blockDesigns["fuel 1"]["clad"].id)
self.assertEqual(1.0, bp.blockDesigns["fuel 2"]["clad"].id)
self.assertEqual(1.0, bp.blockDesigns["block 3"]["clad"].id)
self.assertEqual(1.0, bp.blockDesigns["block 4"]["clad"].id)
self.assertEqual(1.0, bp.blockDesigns["block 5"]["clad"].id)
pinTypeInputModifiers.CladThicknessByIDModifier(0.025)(settings.Settings(), bp)
self.assertEqual(1.05, bp.blockDesigns["fuel 1"]["clad"].id)
self.assertEqual(1.05, bp.blockDesigns["fuel 2"]["clad"].id)
self.assertEqual(1.05, bp.blockDesigns["block 3"]["clad"].id)
self.assertEqual(1.05, bp.blockDesigns["block 4"]["clad"].id)
self.assertEqual(1.05, bp.blockDesigns["block 5"]["clad"].id) # modifies all blocks
================================================
FILE: armi/cases/suite.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
The ``CaseSuite`` object is responsible for running, and executing a set of user inputs. Many
entry points redirect into ``CaseSuite`` methods, such as ``clone``, ``compare``, and ``submit``.
Used in conjunction with the :py:class:`~armi.cases.case.Case` object, ``CaseSuite`` can be used to
collect a series of cases and submit them to a cluster for execution. Furthermore, a ``CaseSuite``
can be used to gather executed cases for post-analysis.
``CaseSuite``\ s should allow ``Cases`` to be added from totally separate directories. This is
useful for plugin-informed testing as well as other things.
See Also
--------
armi.cases.case : An individual item of a case suite.
"""
import os
import traceback
from typing import Optional, Sequence
from armi import runLog, settings
from armi.cases import case as armicase
from armi.utils import directoryChangers, tabulate
class CaseSuite:
"""
A CaseSuite is a collection of possibly related Case objects.
.. impl:: CaseSuite allows for one case to start after another completes.
:id: I_ARMI_CASE_SUITE
:implements: R_ARMI_CASE_SUITE
The CaseSuite object allows multiple, often related,
:py:class:`~armi.cases.case.Case` objects to be run sequentially. A CaseSuite
is intended to be both a pre-processing or a post-processing tool to facilitate
case generation and analysis. Under most circumstances one may wish to subclass
a CaseSuite to meet the needs of a specific calculation. A CaseSuite is a
collection that is keyed off Case titles.
"""
def __init__(self, cs):
self._cases = list()
self.cs = cs
def add(self, case):
"""
Add a Case object to the CaseSuite.
Case objects within a CaseSuite must have unique ``title`` attributes, a
KeyError will be raised
"""
existing = next((c for c in self if case == c), None)
if existing is not None:
raise ValueError(
"CaseSuite already contains case with title `{}`\nFirst case: {}\nSecond case: {}".format(
case.title, existing, case
)
)
self._cases.append(case)
case._caseSuite = self
def remove(self, case):
"""Remove a case from a suite."""
self._cases.remove(case)
case._caseSuite = None
def __iter__(self):
return iter(self._cases)
def __len__(self):
return len(self._cases)
def discover(
self,
rootDir=None,
patterns=None,
ignorePatterns=None,
recursive=True,
skipInspection=False,
):
"""
Finds case objects by searching for a pattern of file paths, and adds them to
the suite.
This searches for Settings input files and loads them to create Case objects.
Parameters
----------
rootDir : str, optional
root directory to search for settings files
patterns : list of str, optional
file pattern to use to filter file names
ignorePatterns : list of str, optional
file patterns to exclude matching file names
recursive : bool, optional
if True, recursively search for settings files
skipInspection : bool, optional
if True, skip running the check inputs
"""
csFiles = settings.recursivelyLoadSettingsFiles(
rootDir or os.path.abspath(os.getcwd()),
patterns or ["*.yaml"],
recursive=recursive,
ignorePatterns=ignorePatterns,
handleInvalids=False,
)
for cs in csFiles:
case = armicase.Case(cs=cs, caseSuite=self)
if not skipInspection:
case.checkInputs()
self.add(case)
def echoConfiguration(self):
"""
Print information about this suite to the run log.
Notes
-----
Some of these printouts won't make sense for all users, and may make sense to
be delegated to the plugins/app.
"""
for setting in self.cs.environmentSettings:
runLog.important("{}: {}".format(self.cs.getSetting(setting).label, self.cs[setting]))
runLog.important("Test inputs will be taken from test case results when they have finished")
runLog.important(
tabulate.tabulate(
[
(
c.title,
"T" if c.enabled else "F",
",".join(d.title for d in c.dependencies),
)
for c in self
],
headers=["Title", "Enabled", "Dependencies"],
tableFmt="armi",
)
)
def clone(self, oldRoot=None, writeStyle="short"):
"""
Clone a CaseSuite to a new place.
Creates a clone for each case within a CaseSuite. If ``oldRoot`` is not
specified, then each case clone is made in a directory with the title of the
case. If ``oldRoot`` is specified, then a relative path from ``oldRoot`` will
be used to determine a new relative path to the current directory ``oldRoot``.
Parameters
----------
oldRoot : str (optional)
root directory of original case suite used to help filter when a suite
contains one or more cases with the same case title.
writeStyle : str (optional)
Writing style for which settings get written back to the settings files
(short, medium, or full).
Notes
-----
By design, a CaseSuite has no location dependence; this allows any set of cases
to compose a CaseSuite. The thought is that the post-analysis capabilities
without restricting a root directory could be beneficial. For example, this
allows one to perform analysis on cases analyzed by Person A and Person B, even
if the analyses were performed in completely different locations. As a
consequence, when you want to clone, we need to infer a "root" of the original
cases to attempt to mirror whatever existing directory structure there may have
been.
"""
clone = CaseSuite(self.cs.duplicate())
modifiedSettings = {ss.name: ss.value for ss in self.cs.values() if ss.offDefault}
for case in self:
if oldRoot:
newDir = os.path.dirname(os.path.relpath(case.cs.path, oldRoot))
else:
newDir = case.title
with directoryChangers.ForcedCreationDirectoryChanger(newDir, dumpOnException=False):
clone.add(case.clone(modifiedSettings=modifiedSettings, writeStyle=writeStyle))
return clone
def run(self):
"""
Run each case, one after the other.
Warning
-------
Suite running may not work yet if the cases have interdependencies. We typically run on a
HPC but are still working on a platform independent way of handling HPCs.
"""
for ci, case in enumerate(self):
runLog.important(f"Running case {ci + 1}/{len(self)}: {case}")
with directoryChangers.DirectoryChanger(case.directory):
try:
case.run()
except Exception:
# allow all errors and continue to next run
runLog.error(f"{case} failed during execution.")
traceback.print_exc()
def compare(
self,
that,
exclusion: Optional[Sequence[str]] = None,
weights=None,
tolerance=0.01,
timestepCompare=None,
) -> int:
"""
Compare one case suite with another.
Returns
-------
The number of problem differences encountered.
"""
runLog.important("Comparing case suites.")
nIssues = 0
refTitles = set(c.title for c in self)
cmpTitles = set(c.title for c in that)
suiteHasMissingFiles = False
tableResults = {}
for caseTitle in refTitles.union(cmpTitles):
refCase = next((c for c in self if c.title == caseTitle), None)
cmpCase = next((c for c in that if c.title == caseTitle), None)
caseStatus = []
for case in (refCase, cmpCase):
status = "Found"
if case is None or not os.path.exists(case.dbName):
status = "Missing"
caseStatus.append(status)
refFile, userFile = caseStatus
if any(stat != "Found" for stat in caseStatus):
# Case was not run, or failed to produce a database.
# In either case, this is an issue.
# It could possibly be a new test, but there is no way to tell this
# versus a reference file being missing so when a new test is made
# it will be an issue. After the first push with the new tests the files
# will be copied over and future tests will be fine.
caseIssues = 1
suiteHasMissingFiles = False
else:
caseIssues = refCase.compare(
cmpCase,
exclusion=exclusion,
tolerance=tolerance,
timestepCompare=timestepCompare,
)
nIssues += caseIssues
tableResults[caseTitle] = (userFile, refFile, caseIssues)
self.writeTable(tableResults)
if suiteHasMissingFiles:
runLog.warning((UNMISSABLE_FAILURE.format(", ".join(t for t in refTitles - cmpTitles))))
return nIssues
def writeInputs(self, writeStyle="short"):
"""
Write inputs for all cases in the suite.
writeStyle : str (optional)
Writing style for which settings get written back to the settings files
(short, medium, or full).
See Also
--------
clone
Similar to this but doesn't let you write out new geometry or blueprints objects.
"""
for case in self:
case.writeInputs(sourceDir=self.cs.inputDirectory, writeStyle=writeStyle)
@staticmethod
def writeTable(tableResults):
"""Write a table summarizing the test differences."""
fmt = "psql"
print(
(
tabulate.tabulate(
[["Integration test directory: {}".format(os.getcwd())]],
["SUMMARIZED INTEGRATION TEST DIFFERENCES:"],
tableFmt=fmt,
)
)
)
header = ["Test", "User File", "Reference File", "# Problem Diff Lines"]
totalDiffs = 0
data = []
for testName in sorted(tableResults.keys()):
userFile, refFile, caseIssues = tableResults[testName]
data.append((testName, userFile, refFile, caseIssues))
totalDiffs += caseIssues
print(tabulate.tabulate(data, header, tableFmt=fmt))
print(tabulate.tabulate([["Total number of differences: {}".format(totalDiffs)]], tableFmt=fmt))
UNMISSABLE_FAILURE = '''
!! THESE TESTS HAVE UNEXPECTED ABSENT RESULTS !!
uuuuuuu
uu$$$$$$$$$$$uu
uu$$$$$$$$$$$$$$$$$uu
u$$$$$$$$$$$$$$$$$$$$$u
u$$$$$$$$$$$$$$$$$$$$$$$u
u$$$$$$$$$$$$$$$$$$$$$$$$$u
u$$$$$$$$$$$$$$$$$$$$$$$$$u
u$$$$$$" "$$$" "$$$$$$u
"$$$$" u$u $$$$"
$$$u u$u u$$$
$$$u u$$$u u$$$
"$$$$uu$$$ $$$uu$$$$"
"$$$$$$$" "$$$$$$$"
u$$$$$$$u$$$$$$$u
u$"$"$"$"$"$"$u
uuu $$u$ $ $ $ $u$$ uuu
u$$$$ $$$$$u$u$u$$$ u$$$$
$$$$$uu "$$$$$$$$$" uu$$$$$$
u$$$$$$$$$$$uu """"" uuuu$$$$$$$$$$
$$$$"""$$$$$$$$$$uuu uu$$$$$$$$$"""$$$"
""" ""$$$$$$$$$$$uu ""$"""
uuuu ""$$$$$$$$$$uuu
u$$$uuu$$$$$$$$$uu ""$$$$$$$$$$$uuu$$$
$$$$$$$$$$"""" ""$$$$$$$$$$$"
"$$$$$" ""$$$$""
$$$" $$$$"
Comparison suite is missing the following case titles: {}
'''
================================================
FILE: armi/cases/suiteBuilder.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Contains classes that build case suites from perturbing inputs.
The general use case is to create a :py:class:`~SuiteBuilder` with a base
:py:class:`~armi.cases.case.Case`, use :py:meth:`~SuiteBuilder.addDegreeOfFreedom` to
adjust inputs according to the supplied arguments, and finally use ``.buildSuite`` to
generate inputs. The case suite can then be discovered, submitted, and analyzed using
the standard ``CaseSuite`` objects.
This module contains a variety of ``InputModifier`` objects as well, which are examples
of how you can modify inputs for parameter sweeping. Power-users will generally make
their own ``Modifier``\ s that are design-specific.
"""
import copy
import os
import random
from typing import List
from armi.cases import suite
def getInputModifiers(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in getInputModifiers(s)]
class SuiteBuilder:
"""
Class for constructing a CaseSuite from combinations of modifications on base inputs.
.. impl:: A generic tool to modify user inputs on multiple cases.
:id: I_ARMI_CASE_MOD0
:implements: R_ARMI_CASE_MOD
This class provides the capability to create a :py:class:`~armi.cases.suite.CaseSuite` based
on programmatic perturbations/modifications to case settings. It works by being constructed
with a base or nominal :py:class:`~armi.cases.case.Case` object. Children classes then
append the ``self.modifierSets`` member. Each entry in ``self.modifierSets`` is a
:py:class:`~armi.cases.inputModifiers.inputModifiers.InputModifier` representing a case to
add to the suite by specifying modifications to the settings of the base case.
:py:meth:`SuiteBuilder.buildSuite` is then invoked, returning an instance of the
:py:class:`~armi.cases.suite.CaseSuite` containing all the cases with modified settings.
Attributes
----------
baseCase : armi.cases.case.Case
A Case object to perturb
modifierSets : list(tuple(InputModifier))
Contains a list of tuples of ``InputModifier`` instances. A single case is constructed by
running a series (the tuple) of InputModifiers on the case.
Notes
-----
This is public such that someone could pop an item out of the list if it is known to not work,
or be unnecessary.
"""
def __init__(self, baseCase):
self.baseCase = baseCase
self.modifierSets = []
from armi.cases.inputModifiers import inputModifiers
# use an instance variable instead of global lookup. this could allow someone to add their own
# modifiers, and also prevents it memory usage / discovery from simply loading the module.
self._modifierLookup = {k.__name__: k for k in getInputModifiers(inputModifiers.InputModifier)}
def __len__(self):
return len(self.modifierSets)
def __repr__(self):
return "".format(len(self), self.baseCase)
def addDegreeOfFreedom(self, inputModifiers):
"""
Add a degree of freedom to the SweepBuilder.
The exact application of this is dependent on a subclass.
Parameters
----------
inputModifiers : list(callable(Settings, Blueprints, SystemLayoutInput))
A list of callable objects with the signature
``(Settings, Blueprints, SystemLayoutInput)``. When these objects are called they should
perturb the settings or blueprints by some amount determined by their construction.
"""
raise NotImplementedError
def addModifierSet(self, inputModifierSet: List):
"""
Add a single input modifier set to the suite.
Used to add modifications that are not necessarily another degree of freedom.
"""
self.modifierSets.append(inputModifierSet)
def buildSuite(self, namingFunc=None):
"""
Builds a ``CaseSuite`` based on the modifierSets contained in the SuiteBuilder.
For each sequence of modifications, this creates a new ``Case`` from the ``baseCase``, and
runs the sequence of modifications on the new ``Case``'s inputs. The modified ``Case`` is
then added to a ``CaseSuite``. The resulting ``CaseSuite`` is returned.
Parameters
----------
namingFunc : callable(index, case, tuple(InputModifier)), (optional)
Function used to name each case. It is supplied with the index (int), the case (Case),
and a tuple of InputModifiers used to edit the case. This should be enough information
for someone to derive a meaningful name.
The function should return a string specifying the path of the ``Settings``, this
allows the user to specify the directories where each case will be run.
If not supplied the path will be ``./case-suite/<0000>/-<0000>``, where
``<0000>`` is the four-digit case index, and ```` is the ``baseCase.title``.
Raises
------
RuntimeError
When order of modifications is deemed to be invalid.
Returns
-------
caseSuite : CaseSuite
Derived from the ``baseCase`` and modifications.
"""
caseSuite = suite.CaseSuite(self.baseCase.cs)
if namingFunc is None:
def namingFunc(index, _case, _mods):
uniquePart = "{:0>4}".format(index)
return os.path.join(
".",
"case-suite",
uniquePart,
self.baseCase.title + "-" + uniquePart,
)
for index, modList in enumerate(self.modifierSets):
case = copy.deepcopy(self.baseCase)
previousMods = []
case.bp._prepConstruction(case.cs)
for mod in modList:
# it may seem late to figure this out, but since we are doing it now, someone could
# filter these conditions out before the buildSuite. optionally, we could have a
# flag for "skipInvalidModficationCombos=False"
shouldHaveBeenBefore = [fail for fail in getattr(mod, "FAIL_IF_AFTER", ()) if fail in previousMods]
if any(shouldHaveBeenBefore):
raise RuntimeError(
"{} must occur before {}".format(mod, ",".join(repr(m) for m in shouldHaveBeenBefore))
)
previousMods.append(type(mod))
case.cs, case.bp = mod(case.cs, case.bp)
case.independentVariables.update(mod.independentVariable)
case.cs.path = namingFunc(index, case, modList)
caseSuite.add(case)
return caseSuite
class FullFactorialSuiteBuilder(SuiteBuilder):
"""Builds a suite that has every combination of each modifier."""
def __init__(self, baseCase):
SuiteBuilder.__init__(self, baseCase)
# initialize with empty tuple to trick cross-product to always work
self.modifierSets.append(())
def addDegreeOfFreedom(self, inputModifiers):
"""
Add a degree of freedom to the SuiteBuilder.
Creates the Cartesian product of the ``inputModifiers`` supplied and those already applied.
For example::
class SettingModifier(InputModifier):
def __init__(self, settingName, value):
self.settingName = settingName
self.value = value
def __call__(self, cs, bp):
cs = cs.modified(newSettings={self.settingName: self.value})
return cs, bp
builder = FullFactorialSuiteBuilder(someCase)
builder.addDegreeOfFreedom(SettingModifier("settingName1", value) for value in (1, 2))
builder.addDegreeOfFreedom(SettingModifier("settingName2", value) for value in (3, 4, 5))
would result in 6 cases:
+-------+------------------+------------------+
| Index | ``settingName1`` | ``settingName2`` |
+=======+==================+==================+
| 0 | 1 | 3 |
+-------+------------------+------------------+
| 1 | 2 | 3 |
+-------+------------------+------------------+
| 2 | 1 | 4 |
+-------+------------------+------------------+
| 3 | 2 | 4 |
+-------+------------------+------------------+
| 4 | 1 | 5 |
+-------+------------------+------------------+
| 5 | 2 | 5 |
+-------+------------------+------------------+
See Also
--------
SuiteBuilder.addDegreeOfFreedom
"""
# Cartesian product. Append a new modifier to the end of a chain of previously defined.
new = [
existingModSet + (newModifier,) for newModifier in inputModifiers for existingModSet in self.modifierSets
]
del self.modifierSets[:]
self.modifierSets.extend(new)
class FullFactorialSuiteBuilderNoisy(FullFactorialSuiteBuilder):
"""
Adds a bit of noise to each independent variable to avoid duplicates.
This can be useful in some statistical postprocessors.
.. warning:: Use with caution. This is part of ongoing research.
"""
def __init__(self, baseCase, noiseFraction):
FullFactorialSuiteBuilder.__init__(self, baseCase)
self.noiseFraction = noiseFraction
def addDegreeOfFreedom(self, inputModifiers):
new = []
for newMod in inputModifiers:
for existingModSet in self.modifierSets:
existingModSetCopy = copy.deepcopy(existingModSet)
for mod in existingModSetCopy:
self._perturb(mod)
newModCopy = copy.deepcopy(newMod)
self._perturb(newModCopy)
new.append(existingModSetCopy + (newModCopy,))
del self.modifierSets[:]
self.modifierSets.extend(new)
def _perturb(self, mod):
indeps = {}
for key, val in mod.independentVariable.items():
# perturb values by 10% randomly
newVal = val + val * self.noiseFraction * (2 * random.random() - 1)
indeps[key] = newVal
mod.independentVariable = indeps
class SeparateEffectsSuiteBuilder(SuiteBuilder):
"""Varies each degree of freedom in isolation."""
def addDegreeOfFreedom(self, inputModifiers):
"""
Add a degree of freedom to the SuiteBuilder.
Adds a case for each modifier supplied.
For example::
class SettingModifier(InputModifier):
def __init__(self, settingName, value):
self.settingName = settingName
self.value = value
def __call__(self, cs, bp):
cs = cs.modified(newSettings={self.settignName: self.value})
return cs, bp
builder = SeparateEffectsSuiteBuilder(someCase)
builder.addDegreeOfFreedom(SettingModifier("settingName1", value) for value in (1, 2))
builder.addDegreeOfFreedom(SettingModifier("settingName2", value) for value in (3, 4, 5))
would result in 5 cases:
+-------+------------------+------------------+
| Index | ``settingName1`` | ``settingName2`` |
+=======+==================+==================+
| 0 | 1 | default |
+-------+------------------+------------------+
| 1 | 2 | default |
+-------+------------------+------------------+
| 2 | default | 3 |
+-------+------------------+------------------+
| 3 | default | 4 |
+-------+------------------+------------------+
| 4 | default | 5 |
+-------+------------------+------------------+
See Also
--------
SuiteBuilder.addDegreeOfFreedom
"""
self.modifierSets.extend((modifier,) for modifier in inputModifiers)
================================================
FILE: armi/cases/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/cases/tests/test_cases.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for Case and CaseSuite objects."""
import copy
import cProfile
import logging
import os
import platform
import unittest
import h5py
from armi import cases, context, getApp, interfaces, plugins, runLog, settings
from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC
from armi.reactor import blueprints
from armi.reactor.tests import test_reactors
from armi.testing import TESTING_ROOT
from armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs
from armi.utils import directoryChangers
BLUEPRINT_INPUT = """
nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
MN: {burn: false, xs: true}
FE: {burn: false, xs: true}
SI: {burn: false, xs: true}
C: {burn: false, xs: true}
CR: {burn: false, xs: true}
MO: {burn: false, xs: true}
NI: {burn: false, xs: true}
blocks:
fuel 1: &fuel_1
fuel: &fuel_1_fuel
Tinput: 350.0
Thot: 350.0
shape: circle
id: 0.0
od: 0.5
material: UZr
clad: &fuel_1_clad
Tinput: 350.0
Thot: 350.0
shape: circle
id: 1.0
od: 1.1
material: SS316
fuel 2: *fuel_1
block 3: *fuel_1 # non-fuel blocks
block 4: {<<: *fuel_1} # non-fuel blocks
block 5: {fuel: *fuel_1_fuel, clad: *fuel_1_clad} # non-fuel blocks
assemblies: {}
systems:
core:
grid name: core
origin:
x: 0.0
y: 0.0
z: 0.0
grids:
core:
geom: hex
symmetry: third core periodic
grid contents:
[0, 0]: A1
[1, 0]: A2
[1, 1]: A3
"""
class TestArmiCase(unittest.TestCase):
"""Class to tests armi.cases.Case methods."""
def test_independentVariables(self):
"""Ensure that independentVariables added to a case move with it."""
bp = blueprints.Blueprints.load(BLUEPRINT_INPUT)
cs = settings.Settings(ARMI_RUN_PATH)
cs = cs.modified(newSettings={"verbosity": "important"})
baseCase = cases.Case(cs, bp=bp)
with directoryChangers.TemporaryDirectoryChanger():
vals = {"cladThickness": 1, "control strat": "good", "enrich": 0.9}
case = baseCase.clone()
case._independentVariables = vals
case.writeInputs()
newCs = settings.Settings(fName=case.title + ".yaml")
newCase = cases.Case(newCs)
for name, val in vals.items():
self.assertEqual(newCase.independentVariables[name], val)
def test_setUpTaskDependence(self):
case = cases.Case(settings.Settings())
case.enabled = False
case.setUpTaskDependence()
case.enabled = True
case.setUpTaskDependence()
self.assertTrue(case.enabled)
self.assertEqual(len(case._tasks), 0)
self.assertEqual(len(case.dependencies), 0)
def test_getCoverageRcFile(self):
case = cases.Case(settings.Settings())
covRcDir = os.path.abspath(context.PROJECT_ROOT)
# Don't actually copy the file, just check the file paths match
covRcFile = case._getCoverageRcFile(userCovFile="", makeCopy=False)
self.assertEqual(covRcFile, os.path.join(covRcDir, "pyproject.toml"))
userFile = "UserCovRc"
covRcFile = case._getCoverageRcFile(userCovFile=userFile, makeCopy=False)
self.assertEqual(covRcFile, os.path.abspath(userFile))
def test_startCoverage(self):
with directoryChangers.TemporaryDirectoryChanger():
cs = settings.Settings(ARMI_RUN_PATH)
# Test the null case
cs = cs.modified(newSettings={"coverage": False})
case = cases.Case(cs)
cov = case._startCoverage()
self.assertIsNone(cov)
# NOTE: We can't test coverage=True, because it breaks coverage on CI
def test_endCoverage(self):
with directoryChangers.TemporaryDirectoryChanger():
cs = settings.Settings(ARMI_RUN_PATH)
cs = cs.modified(newSettings={"coverage": False})
case = cases.Case(cs)
# NOTE: We can't test coverage=True, because it breaks coverage on CI
outFile = "coverage_results.cov"
prof = case._startCoverage()
self.assertFalse(os.path.exists(outFile))
case._endCoverage(userCovFile="", cov=prof)
self.assertFalse(os.path.exists(outFile))
@unittest.skipUnless(context.MPI_RANK == 0, "test only on root node")
def test_startProfiling(self):
with directoryChangers.TemporaryDirectoryChanger():
cs = settings.Settings(ARMI_RUN_PATH)
# Test the null case
cs = cs.modified(newSettings={"profile": False})
case = cases.Case(cs)
prof = case._startProfiling()
self.assertIsNone(prof)
# Test when we start coverage correctly
cs = cs.modified(newSettings={"profile": True})
case = cases.Case(cs)
prof = case._startProfiling()
self.assertTrue(isinstance(prof, cProfile.Profile))
@unittest.skipUnless(context.MPI_RANK == 0, "test only on root node")
def test_endProfiling(self):
with directoryChangers.TemporaryDirectoryChanger():
cs = settings.Settings(ARMI_RUN_PATH)
cs = cs.modified(newSettings={"profile": True})
case = cases.Case(cs)
# run the profiler
prof = case._startProfiling()
case._endProfiling(prof)
self.assertTrue(isinstance(prof, cProfile.Profile))
def test_run(self):
"""
Test running a case.
.. test:: There is a generic mechanism to allow simulation runs.
:id: T_ARMI_CASE
:tests: R_ARMI_CASE
.. test:: Test case settings object is created, settings can be edited, and case can run.
:id: T_ARMI_SETTING
:tests: R_ARMI_SETTING
"""
with directoryChangers.TemporaryDirectoryChanger():
cs = settings.Settings(ARMI_RUN_PATH)
newSettings = {
"branchVerbosity": "important",
"coverage": False,
"nCycles": 2,
"profile": False,
"trace": False,
"verbosity": "important",
}
cs = cs.modified(newSettings=newSettings)
case = cases.Case(cs)
with mockRunLogs.BufferLog() as mock:
# start with a clean slate
self.assertEqual("", mock.getStdout())
runLog.LOG.startLog("test_run")
runLog.LOG.setVerbosity(logging.INFO)
case.run()
stdOut = mock.getStdout()
self.assertIn("Triggering BOL Event", stdOut)
self.assertIn("xsGroups", stdOut)
self.assertIn("Completed EveryNode - timestep: cycle 0, node 0, year 0.00 Event", stdOut)
def test_clone(self):
testTitle = "CLONE_TEST"
# test the short write style
with directoryChangers.TemporaryDirectoryChanger():
cs = settings.Settings(ARMI_RUN_PATH)
case = cases.Case(cs)
shortCase = case.clone(
additionalFiles=["ISOAA"],
title=testTitle,
modifiedSettings={"verbosity": "important"},
)
# Check additional files made it
self.assertTrue(os.path.exists("ISOAA"))
# Check title change made it
clonedYaml = testTitle + ".yaml"
self.assertTrue(os.path.exists(clonedYaml))
self.assertTrue(shortCase.title, testTitle)
# Check on some expected settings
# Availability factor is in the original settings file but since it is a
# default value, gets removed for the write-out
txt = open(clonedYaml, "r").read()
self.assertNotIn("availabilityFactor", txt)
self.assertIn("verbosity: important", txt)
# test the medium write style
with directoryChangers.TemporaryDirectoryChanger():
cs = settings.Settings(ARMI_RUN_PATH)
case = cases.Case(cs)
case.clone(writeStyle="medium")
clonedYaml = "armiRun.yaml"
self.assertTrue(os.path.exists(clonedYaml))
# Availability factor is in the original settings file and it is a default
# value. While "short" (default writing style) removes, "medium" should not
txt = open(clonedYaml, "r").read()
self.assertIn("availabilityFactor", txt)
class TestCaseSuiteDependencies(unittest.TestCase):
"""CaseSuite tests."""
def setUp(self):
self.suite = cases.CaseSuite(settings.Settings())
bp = blueprints.Blueprints.load(BLUEPRINT_INPUT)
self.c1 = cases.Case(cs=settings.Settings(), bp=bp)
self.c1.cs.path = "c1.yaml"
self.suite.add(self.c1)
self.c2 = cases.Case(cs=settings.Settings(), bp=bp)
self.c2.cs.path = "c2.yaml"
self.suite.add(self.c2)
def test_clone(self):
"""If you pass an invalid path, the clone can't happen, but it won't do any damage either."""
with self.assertRaises(RuntimeError):
_clone = self.suite.clone("test_clone")
def test_checkInputs(self):
"""
Test the checkInputs() method on a couple of cases.
.. test:: Check the ARMI inputs for consistency and validity.
:id: T_ARMI_CASE_CHECK
:tests: R_ARMI_CASE_CHECK
"""
self.c1.checkInputs()
self.c2.checkInputs()
def test_dependenciesWithObscurePaths(self):
"""Test directory dependence for strangely-written file paths (escape characters)."""
checks = [
("c1.yaml", "c2.yaml", "c1.h5", True),
(r"\\case\1\c1.yaml", r"\\case\2\c2.yaml", "c1.h5", False),
(r"\\case\1\c1.yaml", r"\\case\2\c2.yaml", r"..\1\c1.h5", False),
]
if platform.system() == "Windows":
# windows-specific case insensitivity
checks.extend(
[
("c1.yaml", "c2.yaml", "C1.H5", True),
(
r"\\cas\es\1\c1.yaml",
r"\\cas\es\2\c2.yaml",
r"..\..\1\c1.h5",
True,
),
(
r"c1.yaml",
r"c2.yaml",
r".\c1.h5",
True,
),
(
r"\\cas\es\1\c1.yaml",
r"\\cas\es\2\c2.yaml",
r"../..\1\c1.h5",
True,
),
(
r"\\cas\es\1\c1.yaml",
r"\\cas\es\2\c2.yaml",
r"../../1\c1.h5",
True,
),
(
r"\\cas\es\1\c1.yaml",
r"\\cas\es\2\c2.yaml",
r"..\../1\c1.h5",
True,
),
(
r"\\cas\es\1\c1.yaml",
r"\\cas\es\2\c2.yaml",
r"\\cas\es\1\c1.h5",
True,
),
# below False because getcwd() != \\case\es\2
(
r"..\..\1\c1.yaml",
r"\\cas\es\2\c2.yaml",
r"\\cas\es\1\c1.h5",
False,
),
(
r"\\cas\es\1\c1.yaml",
r"\\cas\es\2\c2.yaml",
r"..\..\2\c1.h5",
False,
),
]
)
for p1, p2, dbPath, isIn in checks:
self.c1.cs.path = p1
self.c2.cs.path = p2
newSettings = {}
newSettings["loadStyle"] = "fromDB"
newSettings["reloadDBName"] = dbPath
self.c2.cs = self.c2.cs.modified(newSettings=newSettings)
# note that case.dependencies is a property and
# will actually reflect these changes
self.assertEqual(
isIn,
self.c1 in self.c2.dependencies,
"where p1: {} p2: {} dbPath: {}".format(p1, p2, dbPath),
)
def test_dependencyFromDBName(self):
# no effect -> need to specify loadStyle, 'fromDB'
newSettings = {"reloadDBName": "c1.h5"}
self.c2.cs = self.c2.cs.modified(newSettings=newSettings)
self.assertEqual(0, len(self.c2.dependencies))
newSettings = {"loadStyle": "fromDB"}
self.c2.cs = self.c2.cs.modified(newSettings=newSettings)
self.assertIn(self.c1, self.c2.dependencies)
# the .h5 extension is optional
newSettings = {"reloadDBName": "c1"}
self.c2.cs = self.c2.cs.modified(newSettings=newSettings)
self.assertIn(self.c1, self.c2.dependencies)
def test_dependencyFromExplictRepeatShuffles(self):
self.assertEqual(0, len(self.c2.dependencies))
newSettings = {"explicitRepeatShuffles": "c1-SHUFFLES.txt"}
self.c2.cs = self.c2.cs.modified(newSettings=newSettings)
self.assertIn(self.c1, self.c2.dependencies)
def test_explicitDependency(self):
"""
Test dependencies for case suites.
.. test:: Dependence allows for one case to start after the completion of another.
:id: T_ARMI_CASE_SUITE
:tests: R_ARMI_CASE_SUITE
"""
self.c1.addExplicitDependency(self.c2)
self.assertIn(self.c2, self.c1.dependencies)
def test_titleSetterGetter(self):
self.assertEqual(self.c1.title, "c1")
self.c1.title = "new_bob"
self.assertEqual(self.c1.title, "new_bob")
class TestCaseSuiteComparison(unittest.TestCase):
"""CaseSuite.compare() tests."""
def setUp(self):
self.td = directoryChangers.TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
def test_compareNoDiffs(self):
"""As a baseline, this test should always reveal zero diffs."""
# build two super-simple H5 files for testing
o, r = test_reactors.loadTestReactor(
TEST_ROOT,
customSettings={"reloadDBName": "reloadingDB.h5"},
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
suites = []
for _i in range(2):
# Build the cases
suite = cases.CaseSuite(settings.Settings())
bp = blueprints.Blueprints.load(BLUEPRINT_INPUT)
c1 = cases.Case(cs=settings.Settings(), bp=bp)
c1.cs.path = "c1.yaml"
suite.add(c1)
c2 = cases.Case(cs=settings.Settings(), bp=bp)
c2.cs.path = "c2.yaml"
suite.add(c2)
suites.append(suite)
# create two DBs, identical but for file names
tmpDir = os.getcwd()
dbs = []
for i in range(1, 3):
# create the tests DB
dbi = DatabaseInterface(r, o.cs)
dbi.initDB(fName=f"{tmpDir}/c{i}.h5")
db = dbi.database
# validate the file exists, and force it to be readable again
b = h5py.File(db._fullPath, "r")
self.assertEqual(list(b.keys()), ["inputs"])
self.assertEqual(sorted(b["inputs"].keys()), ["blueprints", "settings"])
b.close()
# append to lists
dbs.append(db)
# do a comparison that should have no diffs
diff = c1.compare(c2)
self.assertEqual(diff, 0)
diff = suites[0].compare(suites[1])
self.assertEqual(diff, 0)
diff = suites[1].compare(suites[0])
self.assertEqual(diff, 0)
class TestExtraInputWriting(unittest.TestCase):
"""Make sure extra inputs from interfaces are written."""
def test_writeInput(self):
fName = os.path.join(TEST_ROOT, "armiRun.yaml")
cs = settings.Settings(fName)
baseCase = cases.Case(cs)
with directoryChangers.TemporaryDirectoryChanger():
case = baseCase.clone()
case.writeInputs()
self.assertTrue(os.path.exists(cs[CONF_SHUFFLE_LOGIC]))
# Availability factor is in the original settings file but since it is a default value,
# gets removed for the write-out
txt = open("armiRun.yaml", "r").read()
self.assertNotIn("availabilityFactor", txt)
self.assertIn("armiRun-blueprints.yaml", txt)
with directoryChangers.TemporaryDirectoryChanger():
case = baseCase.clone(writeStyle="medium")
case.writeInputs(writeStyle="medium")
# Availability factor is in the original settings file and it is a default value. While
# "short" (default writing style) removes, "medium" should not
txt = open("armiRun.yaml", "r").read()
self.assertIn("availabilityFactor", txt)
class MultiFilesInterfaces(interfaces.Interface):
"""
A little test interface that adds a setting that we need to test copyInterfaceInputs with
multiple files.
"""
name = "MultiFilesInterfaces"
@staticmethod
def specifyInputs(cs):
settingName = "multipleFilesSetting"
return {settingName: cs[settingName]}
class TestPluginWithDuplicateSetting(plugins.ArmiPlugin):
@staticmethod
@plugins.HOOKIMPL
def defineSettings():
"""Define a duplicate setting."""
return [
settings.setting.Setting(
"power",
default=123,
label="power",
description="duplicate power",
)
]
class TestPluginCopyInterfaceFiles(plugins.ArmiPlugin):
@staticmethod
@plugins.HOOKIMPL
def defineSettings():
"""Define settings for the plugin."""
return [
settings.setting.Setting(
"multipleFilesSetting",
default=[],
label="multiple files",
description="testing stuff",
)
]
@staticmethod
@plugins.HOOKIMPL
def exposeInterfaces(cs):
"""A plugin is mostly just a vehicle to add Interfaces to an Application."""
return [
interfaces.InterfaceInfo(
interfaces.STACK_ORDER.PREPROCESSING,
MultiFilesInterfaces,
{"enabled": True},
)
]
class TestCopyInterfaceInputs(unittest.TestCase):
"""Ensure file path is found and updated properly."""
def setUp(self):
"""
Manipulate the standard App. We can't just configure our own, since the
pytest environment bleeds between tests.
"""
self._backupApp = copy.deepcopy(getApp())
def tearDown(self):
"""Restore the App to its original state."""
import armi
armi._app = self._backupApp
context.APP_NAME = "armi"
def test_copyInputsHelper(self):
"""Test the helper function for copyInterfaceInputs."""
testSetting = CONF_SHUFFLE_LOGIC
cs = settings.Settings(ARMI_RUN_PATH)
shuffleFile = cs[testSetting]
# test it passes
sourceFullPath = os.path.join(TEST_ROOT, shuffleFile)
# ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
destFilePath = cases.case._copyInputsHelper(
testSetting,
sourcePath=sourceFullPath,
destPath=newDir.destination,
origFile=shuffleFile,
)
newFilePath = os.path.join(newDir.destination, shuffleFile)
self.assertTrue(os.path.exists(newFilePath))
self.assertEqual(destFilePath, os.path.basename(newFilePath))
# test with bad file path, should return original file ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
destFilePath = cases.case._copyInputsHelper(
testSetting,
sourcePath=sourceFullPath,
destPath="fakeDest",
origFile=shuffleFile,
)
self.assertFalse(os.path.exists(destFilePath))
self.assertEqual(destFilePath, shuffleFile)
def test_copyInterfaceInputsSingleFile(self):
testSetting = CONF_SHUFFLE_LOGIC
cs = settings.Settings(ARMI_RUN_PATH)
shuffleFile = cs[testSetting]
# ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
newFilePath = os.path.join(newDir.destination, shuffleFile)
self.assertTrue(os.path.exists(newFilePath))
self.assertEqual(newSettings[testSetting], os.path.basename(newFilePath))
def test_copyInterfaceInputsNonFilePath(self):
testSetting = CONF_SHUFFLE_LOGIC
cs = settings.Settings(ARMI_RUN_PATH)
fakeShuffle = "fakeFile.py"
cs = cs.modified(newSettings={testSetting: fakeShuffle})
# ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
self.assertFalse(os.path.exists(newSettings[testSetting]))
self.assertEqual(newSettings[testSetting], fakeShuffle)
def test_copyInterfaceInputs_emptyFilePath(self):
testSetting = CONF_SHUFFLE_LOGIC
cs = settings.Settings(ARMI_RUN_PATH)
fakeShuffle = ""
cs = cs.modified(newSettings={testSetting: fakeShuffle})
# ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
with self.assertRaises(KeyError):
# shouldn't process this setting as anything to worry about, so it won't be added to the dict
_shuffleLogic = newSettings[testSetting]
def test_failOnDuplicateSetting(self):
"""That that if a plugin attempts to add a duplicate setting, it raises an error."""
# register the new Plugin
app = getApp()
app.pluginManager.register(TestPluginWithDuplicateSetting)
with self.assertRaises(ValueError):
_ = settings.Settings(ARMI_RUN_PATH)
def test_copyInterfaceInputsMultipleFiles(self):
# register the new Plugin
app = getApp()
app.pluginManager.register(TestPluginCopyInterfaceFiles)
pluginPath = "armi.cases.tests.test_cases.TestPluginCopyInterfaceFiles"
settingFiles = [str(os.path.join(TESTING_ROOT, "resources", "COMPXS.ascii")), "ISOAA"]
testName = "test_copyInterfaceInputs_multipleFiles"
testSetting = "multipleFilesSetting"
cs = settings.Settings(ARMI_RUN_PATH)
cs = cs.modified(
caseTitle=testName,
newSettings={testName: [pluginPath]},
)
cs = cs.modified(newSettings={testSetting: settingFiles})
# ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
newFilePaths = [os.path.join(newDir.destination, f) for f in settingFiles]
for newFilePath in newFilePaths:
self.assertTrue(os.path.exists(newFilePath))
self.assertEqual([str(s) for s in newSettings[testSetting]], [str(s) for s in settingFiles])
def test_copyInterfaceInputsOneFile(self):
# register the new Plugin
app = getApp()
app.pluginManager.register(TestPluginCopyInterfaceFiles)
pluginPath = "armi.cases.tests.test_cases.TestPluginCopyInterfaceFiles"
settingFiles = [str(os.path.join(TESTING_ROOT, "resources", "COMPXS.ascii"))]
testName = "test_copyInterfaceInputsOneFile"
testSetting = "multipleFilesSetting"
cs = settings.Settings(ARMI_RUN_PATH)
cs = cs.modified(
caseTitle=testName,
newSettings={testName: [pluginPath]},
)
cs = cs.modified(newSettings={testSetting: settingFiles})
# ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
newFilePaths = [os.path.join(newDir.destination, f) for f in settingFiles]
for newFilePath in newFilePaths:
self.assertTrue(os.path.exists(newFilePath))
self.assertEqual([str(s) for s in newSettings[testSetting]], [str(s) for s in settingFiles])
def test_copyInterfaceInputsWildcardFile(self):
testSetting = CONF_SHUFFLE_LOGIC
cs = settings.Settings(ARMI_RUN_PATH)
# Use something that isn't the shuffle logic file in the case settings
wcFile = "ISO*"
cs = cs.modified(newSettings={testSetting: wcFile})
# ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
newFilePath = [os.path.join(newDir.destination, "ISOAA")]
self.assertTrue(os.path.exists(newFilePath[0]))
self.assertEqual(newSettings[testSetting], [os.path.basename(newFilePath[0])])
# Check on a file that doesn't exist (so globFilePaths len is 0)
wcFile = "fakeFile*"
cs = cs.modified(newSettings={testSetting: wcFile})
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
self.assertFalse(os.path.exists(newSettings[testSetting][0]))
self.assertEqual(newSettings[testSetting], [wcFile])
def test_copyInterfaceInputsRelPath(self):
testSetting = CONF_SHUFFLE_LOGIC
cs = settings.Settings(ARMI_RUN_PATH)
shuffleFile = cs[testSetting]
relFile = "../tests/" + shuffleFile
cs = cs.modified(newSettings={testSetting: relFile})
# ensure we are not in TEST_ROOT
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
newFilePath = os.path.join(newDir.destination, shuffleFile)
self.assertTrue(os.path.exists(newFilePath))
self.assertEqual(newSettings[testSetting], os.path.basename(newFilePath))
def test_copyInterfaceInputsAbsPath(self):
testSetting = CONF_SHUFFLE_LOGIC
cs = settings.Settings(ARMI_RUN_PATH)
shuffleFile = cs[testSetting]
absFile = os.path.dirname(os.path.abspath(ARMI_RUN_PATH))
absFile = str(os.path.join(absFile, os.path.basename(shuffleFile)))
cs = cs.modified(newSettings={testSetting: absFile})
with directoryChangers.TemporaryDirectoryChanger() as newDir:
newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)
# file exists
self.assertTrue(os.path.exists(newSettings[testSetting]))
# but not copied to this dir
self.assertFalse(os.path.exists(os.path.basename(newSettings[testSetting])))
self.assertEqual(str(newSettings[testSetting]), absFile)
================================================
FILE: armi/cases/tests/test_suiteBuilder.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the SuiteBuilder."""
import os
import unittest
from armi import cases, settings
from armi.cases.inputModifiers.inputModifiers import InputModifier
from armi.cases.suiteBuilder import FullFactorialSuiteBuilder, SeparateEffectsSuiteBuilder
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
AFCI_PATH = os.path.join(THIS_DIR, "..", "..", "testing", "reactors", "anl-afci-177", "anl-afci-177.yaml")
class SettingModifier(InputModifier):
def __init__(self, settingName, value):
self.settingName = settingName
self.value = value
def __call__(self, cs, bp):
cs = cs.modified(newSettings={self.settingName: self.value})
return cs, bp
class TestFullFactorialSuiteBuilder(unittest.TestCase):
"""Class to test FullFactorialSuiteBuilder."""
def test_buildSuite(self):
"""Initialize a full factorial suite of cases.
.. test:: A generic mechanism to allow users to modify user inputs in cases.
:id: T_ARMI_CASE_MOD1
:tests: R_ARMI_CASE_MOD
"""
cs = settings.Settings(AFCI_PATH)
case = cases.Case(cs)
builder = FullFactorialSuiteBuilder(case)
builder.addDegreeOfFreedom(SettingModifier("settingName1", value) for value in (1, 2))
builder.addDegreeOfFreedom(SettingModifier("settingName2", value) for value in (3, 4, 5))
self.assertEqual(builder.modifierSets[0][0].value, 1)
self.assertEqual(builder.modifierSets[0][1].value, 3)
self.assertEqual(builder.modifierSets[1][0].value, 2)
self.assertEqual(builder.modifierSets[1][1].value, 3)
self.assertEqual(builder.modifierSets[2][0].value, 1)
self.assertEqual(builder.modifierSets[2][1].value, 4)
self.assertEqual(builder.modifierSets[3][0].value, 2)
self.assertEqual(builder.modifierSets[3][1].value, 4)
self.assertEqual(builder.modifierSets[4][0].value, 1)
self.assertEqual(builder.modifierSets[4][1].value, 5)
self.assertEqual(builder.modifierSets[5][0].value, 2)
self.assertEqual(builder.modifierSets[5][1].value, 5)
self.assertEqual(len(builder.modifierSets), 6)
class TestSeparateEffectsBuilder(unittest.TestCase):
"""Class to test separate effects builder."""
def test_buildSuite(self):
"""Initialize a full factorial suite of cases.
.. test:: A generic mechanism to allow users to modify user inputs in cases.
:id: T_ARMI_CASE_MOD2
:tests: R_ARMI_CASE_MOD
"""
cs = settings.Settings(AFCI_PATH)
case = cases.Case(cs)
builder = SeparateEffectsSuiteBuilder(case)
builder.addDegreeOfFreedom(SettingModifier("settingName1", value) for value in (1, 2))
builder.addDegreeOfFreedom(SettingModifier("settingName2", value) for value in (3, 4, 5))
self.assertEqual(builder.modifierSets[0][0].value, 1)
self.assertEqual(builder.modifierSets[0][0].settingName, "settingName1")
self.assertEqual(builder.modifierSets[1][0].value, 2)
self.assertEqual(builder.modifierSets[1][0].settingName, "settingName1")
self.assertEqual(builder.modifierSets[2][0].value, 3)
self.assertEqual(builder.modifierSets[2][0].settingName, "settingName2")
self.assertEqual(builder.modifierSets[3][0].value, 4)
self.assertEqual(builder.modifierSets[3][0].settingName, "settingName2")
self.assertEqual(builder.modifierSets[4][0].value, 5)
self.assertEqual(builder.modifierSets[4][0].settingName, "settingName2")
self.assertEqual(len(builder.modifierSets), 5)
================================================
FILE: armi/cli/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package provides various operations users can ask ARMI to do with their inputs.
An Entry Point might run a simulation, migrate inputs, build a suite of related inputs
and submit them in a parameter sweep, validate inputs, open the GUI, run a test suite,
or other similar things. There are built-in entry points, and additional ones may
be specified by custom plugins.
The full :doc:`docs for entry points are here `.
See Also
--------
armi.cases : Individual collections of tasks that may run one or more entry points.
These allow one entry point to create a sequence of events that may call one
or more additional entry points. For example, the ``submitSuite`` entry point builds
a case suite with many related cases that will all call the ``run`` entry point from
a HPC cluster.
armi.operators : Operations that ARMI will perform on a reactor model.
These may be created by ``Case`` objects created by certain entry points (e.g. ``run``).
armi : Fundamental entry point that calls this package.
"""
# importing each module causes the any EntryPoints defined in the module that
# are decorated with @armi.command to be added to the collection of registered
# classes
import argparse
import re
import textwrap
from typing import Optional
from armi import context, meta, plugins, runLog
class EntryPointsPlugin(plugins.ArmiPlugin):
@staticmethod
@plugins.HOOKIMPL
def defineEntryPoints():
from armi.cli import (
checkInputs,
# testing
cleanTemps,
clone,
compareCases,
gridGui,
migrateInputs,
modify,
reportsEntryPoint,
run,
runSuite,
)
entryPoints = []
entryPoints.append(checkInputs.CheckInputEntryPoint)
entryPoints.append(checkInputs.ExpandBlueprints)
entryPoints.append(clone.CloneArmiRunCommandBatch)
entryPoints.append(clone.CloneArmiRunCommandInteractive)
entryPoints.append(clone.CloneSuiteCommand)
entryPoints.append(compareCases.CompareCases)
entryPoints.append(compareCases.CompareSuites)
entryPoints.append(migrateInputs.MigrateInputs)
entryPoints.append(modify.ModifyCaseSettingsCommand)
entryPoints.append(run.RunEntryPoint)
entryPoints.append(runSuite.RunSuiteCommand)
entryPoints.append(gridGui.GridGuiEntryPoint)
# testing
entryPoints.append(cleanTemps.CleanTemps)
entryPoints.append(reportsEntryPoint.ReportsEntryPoint)
return entryPoints
class ArmiParser(argparse.ArgumentParser):
"""Subclass of default ArgumentParser to better handle application splash text."""
def print_help(self, file=None):
splash()
argparse.ArgumentParser.print_help(self, file)
class ArmiCLI:
"""
ARMI CLI -- The main entry point into ARMI. There are various commands available. To get help
for the individual commands, run again with ` --help`. Typically, the CLI implements
functions that already exist within ARMI.
.. impl:: The basic ARMI CLI, for running a simulation.
:id: I_ARMI_CLI_CS
:implements: R_ARMI_CLI_CS
Provides a basic command-line interface (CLI) for running an ARMI simulation. Available
commands can be listed with ``-l``. Information on individual commands can be obtained by
running with `` --help``.
"""
def __init__(self):
from armi import getPluginManager
self._entryPoints = dict()
for pluginEntryPoints in getPluginManager().hook.defineEntryPoints():
for entryPoint in pluginEntryPoints:
if entryPoint.name in self._entryPoints:
raise KeyError(
"Duplicate entry points defined for `{}`: {} and {}".format(
entryPoint.name,
self._entryPoints[entryPoint.name],
entryPoint,
)
)
self._entryPoints[entryPoint.name] = entryPoint
parser = ArmiParser(
prog=context.APP_NAME,
description=self.__doc__.split(".. impl")[0],
usage="%(prog)s [-h] [-l | command [args]]",
)
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--version", action="store_true", help="display the version")
group.add_argument("-l", "--list-commands", action="store_true", help="list commands")
group.add_argument("command", nargs="?", default="help", help=argparse.SUPPRESS)
parser.add_argument("args", nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
self.parser = parser
@staticmethod
def showVersion():
"""Print the App name and version on the command line."""
from armi import getApp
prog = context.APP_NAME
app = getApp()
if app is None or prog == "armi":
print("{0} {1}".format(prog, meta.__version__))
else:
print("{0} {1}".format(prog, app.version))
def listCommands(self):
"""List commands with a short description."""
splash()
indent = 22
initial_indent = " "
subsequent_indent = initial_indent + " " * indent
wrapper = textwrap.TextWrapper(initial_indent=initial_indent, subsequent_indent=subsequent_indent, width=79)
sub = re.compile(r"\s+").sub
# given a string, condense white space into a single space
condense = lambda s: sub(" ", s.strip())
commands = self._entryPoints.values()
formatter = "{name:<{width}}{desc}".format
print("\ncommands:")
for cmd in sorted(commands, key=lambda cmd: cmd.name):
"""Each command can optionally define a class attribute `description`
as documentation. If description is not defined (default=None since
it should inherit from EntryPoint), then the docstring is used.
If the docstring is also None, then fall back to an empty string."""
desc = condense(cmd.description or cmd.__doc__ or "")
print(wrapper.fill(formatter(width=indent, name=cmd.name, desc=desc)))
def run(self) -> Optional[int]:
args = self.parser.parse_args()
if args.list_commands:
self.listCommands()
return 0
elif args.version:
ArmiCLI.showVersion()
return 0
elif args.command == "help":
self.parser.print_help()
return 0
return self.executeCommand(args.command, args.args)
def executeCommand(self, command, args) -> Optional[int]:
"""Execute `command` with arguments `args`, return optional exit code."""
command = command.lower()
if command not in self._entryPoints:
print('Unrecognized command "{}". Valid commands are listed below.'.format(command))
self.listCommands()
return 1
commandClass = self._entryPoints[command]
cmd = commandClass()
if cmd.splash:
splash()
# parse the arguments... command can have their own
cmd.parse(args)
if cmd.args.batch:
context.Mode.setMode(context.Mode.BATCH)
elif cmd.mode is not None:
context.Mode.setMode(cmd.mode)
# do whatever there is to be done!
return cmd.invoke()
def splash():
"""Emit a the active App's splash text to the runLog for the primary node."""
from armi import getApp
app = getApp()
assert app is not None
if context.MPI_RANK == 0:
runLog.raw(app.splashText)
================================================
FILE: armi/cli/checkInputs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point into ARMI to check inputs of a case or a whole folder of cases."""
import pathlib
import sys
from armi import runLog
from armi.cli.entryPoint import EntryPoint
from armi.utils.textProcessors import resolveMarkupInclusions
class ExpandBlueprints(EntryPoint):
"""
Perform expansion of !include directives in a blueprint file.
This is useful for testing inputs that make heavy use of !include directives.
"""
name = "expand-bp"
splash = False
def addOptions(self):
self.parser.add_argument("blueprints", type=str, help="Path to root blueprints file")
def invoke(self):
p = pathlib.Path(self.args.blueprints)
if not p.exists():
runLog.error("Blueprints file `{}` does not exist".format(str(p)))
return 1
stream = resolveMarkupInclusions(p)
sys.stdout.write(stream.read())
return None
class CheckInputEntryPoint(EntryPoint):
"""
Check ARMI inputs for errors, inconsistencies, and the ability to initialize a reactor.
Also has functionality to generate a summary report of the input design. This can be run on
multiple cases and creates a table detailing the results of the input check.
"""
name = "check-input"
def addOptions(self):
self.parser.add_argument(
"--recursive",
"-r",
action="store_true",
default=False,
help="Recursively check directory structure for valid settings files",
)
self.parser.add_argument(
"--skip-checks",
"-C",
action="store_true",
default=False,
help="Skip checking inputs (might be useful if you only want to generate a report).",
)
self.parser.add_argument(
"patterns",
type=str,
nargs="*",
default=["*.yaml"],
help="File names or patterns",
)
def invoke(self):
from armi import cases
from armi.utils import tabulate
suite = cases.CaseSuite(self.cs)
suite.discover(patterns=self.args.patterns, recursive=self.args.recursive)
table = [] # tuples (case, hasIssues, hasErrors)
for case in suite:
hasIssues = "UNKNOWN"
if not self.args.skip_checks:
hasIssues = "PASSED" if case.checkInputs() else "HAS ISSUES"
canStart = "UNKNOWN"
table.append((case.cs.path, case.title, canStart, hasIssues))
runLog.important(
tabulate.tabulate(
table,
headers=["case", "can start", "input is self consistent"],
tableFmt="armi",
)
)
if any(t[3] == "HAS ISSUES" for t in table):
runLog.error("The case is not self consistent")
if any(t[2] == "FAILED" for t in table):
runLog.error("The case can not start")
================================================
FILE: armi/cli/cleanTemps.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from armi import context
from armi.cli.entryPoint import EntryPoint
class CleanTemps(EntryPoint):
"""
Delete all temp directories created by any ARMI run.
Useful for occasionally cleaning temporary dirs from crashed runs.
.. warning:: This will break any ongoing runs.
"""
name = "clean-temps"
def invoke(self):
context.cleanFastPathAfterSimulation()
================================================
FILE: armi/cli/clone.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from armi.cli.entryPoint import EntryPoint
class CloneArmiRunCommandBatch(EntryPoint):
"""
Clone existing ARMI settings input, and associated files, to the current
directory and modify it according to the supplied settings (on the
command line).
"""
name = "clone-batch"
settingsArgument = "required"
def addOptions(self):
self.parser.add_argument(
"--additional-files",
nargs="*",
default=[],
help="Additional files from the source directory to copy into the target directory",
)
self.parser.add_argument(
"--settingsWriteStyle",
type=str,
default="short",
help="Writing style for which settings get written back to the settings files.",
choices=["short", "medium", "full"],
)
# somehow running `armi clone-batch -h` on the command line requires this to
# not be first?
for settingName in self.cs.keys():
self.createOptionFromSetting(settingName, suppressHelp=True)
def invoke(self):
# get the case title.
from armi import cases
inputCase = cases.Case(cs=self.cs)
inputCase.clone(
additionalFiles=self.args.additional_files,
writeStyle=self.args.settingsWriteStyle,
)
class CloneArmiRunCommandInteractive(CloneArmiRunCommandBatch):
"""
Interactively clone existing ARMI settings input, and associated files, to the current
directory and modify it according to the supplied settings (on the command line).
"""
name = "clone"
settingsArgument = "required"
class CloneSuiteCommand(EntryPoint):
"""Clone existing ARMI cases as a new suite."""
name = "clone-suite"
def addOptions(self):
for settingName in self.cs.environmentSettings:
self.createOptionFromSetting(settingName)
self.parser.add_argument(
"--directory",
"-d",
type=str,
default=os.getcwd(),
help="Root directory to search for cases",
)
self.parser.add_argument(
"patterns",
nargs="*",
type=str,
default=["*.yaml"],
help="Pattern to use while searching for ARMI settings files.",
)
self.parser.add_argument(
"--ignore",
"-i",
nargs="+",
type=str,
default=[],
help="Pattern to search for inputs to ignore.",
)
self.parser.add_argument(
"--list",
"-l",
action="store_true",
default=False,
help="Just list the settings files found, don't actually submit them.",
)
self.parser.add_argument(
"--settingsWriteStyle",
type=str,
default="short",
help="Writing style for which settings get written back to the settings files.",
choices=["short", "medium", "full"],
)
def invoke(self):
from armi import cases
suite = cases.CaseSuite(self.cs)
suite.discover(
patterns=self.args.patterns,
rootDir=self.args.directory,
ignorePatterns=self.args.ignore,
)
suite.clone(oldRoot=self.args.directory, writeStyle=self.args.settingsWriteStyle)
================================================
FILE: armi/cli/compareCases.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from armi import runLog
from armi.cli.entryPoint import EntryPoint
# Params that are well-known to vary from run to run. In the future we should probably
# derive this from a parameter category so that it is extensible
DEFAULT_EXCLUSIONS = (
"^.*/minutesSinceStart$",
"^.*/maxProcessMemoryInMB$",
"^.*/minProcessMemoryInMB$",
)
# Parameters that under normal circumstances would be the same, but may not be
# faithfully represented by an old database format.
CONVERTED_EXCLUSIONS = DEFAULT_EXCLUSIONS + (
"^.*/serialNum$",
"^.*/temperatureInC$",
"^.*/volume$",
"^.*/layout/temperatures$",
)
class CompareCases(EntryPoint):
"""Compare the databases from two ARMI cases."""
name = "compare"
def _addComparisonOptions(self):
parser = self.parser
parser.add_argument(
"--tolerance",
default=0.01,
action="store",
type=float,
help=(
"If a test database entry differs by more than this percent "
"from the reference database, then it will be marked "
"as a difference between the two databases."
),
)
parser.add_argument(
"--weights",
nargs="*",
action="store",
help="Period separated key/value pairs for database table weights",
)
parser.add_argument(
"--exclude",
default=CONVERTED_EXCLUSIONS,
action="store",
nargs="+",
help=("Patterns for parameters to ignore in comparisons"),
)
parser.add_argument(
"--timestepCompare",
default=None,
action="store",
nargs="+",
help=(
"List of timesteps to compare. Note that any timestep not listed will "
"not be compared. Format the cycle and node separated by a period. E.g. "
"0.0 0.1 1.2 3.3 will compare c0n0, c0n1, c1n2, c3n3 and skip all others"
),
)
def addOptions(self):
self._addComparisonOptions()
parser = self.parser
parser.add_argument(
"refDB",
type=str,
help="The database to be used as the reference, baseline case.",
)
parser.add_argument(
"cmpDB",
type=str,
help="The database to be used as the comparison, evaluated case.",
)
parser.add_argument("--output", "-o", type=str, default="", help="Output file name.")
def parse(self, args):
EntryPoint.parse(self, args)
if self.args.timestepCompare:
self.args.timestepCompare = list(tuple(map(int, step.split("."))) for step in self.args.timestepCompare)
if self.args.weights:
self.args.weights = dict(w.split(".") for w in self.args.weights)
def invoke(self):
from armi.bookkeeping.db import compareDatabases
diffs = compareDatabases(
self.args.refDB,
self.args.cmpDB,
tolerance=self.args.tolerance,
exclusions=self.args.exclude,
timestepCompare=self.args.timestepCompare,
)
return diffs.nDiffs()
class CompareSuites(CompareCases):
"""Do a case-by-case comparison between two CaseSuites."""
name = "compare-suites"
def addOptions(self):
self._addComparisonOptions()
self.parser.add_argument(
"reference",
type=str,
help="The root directory of the reference, or baseline, suite.",
)
self.parser.add_argument(
"comparison",
type=str,
help="The root directory of the comparison, or evaluated, suite.",
)
self.parser.add_argument(
"--patterns",
"-p",
nargs="*",
type=str,
default=["*.yaml"],
help="Pattern to use while searching for ARMI settings files.",
)
self.parser.add_argument(
"--additional_comparisons",
nargs="*",
type=str,
default=[],
help="Pattern tests that were not run but should appear in table.",
)
self.parser.add_argument(
"--ignore",
"-i",
nargs="*",
type=str,
default=[],
help="Pattern to search for inputs to ignore.",
)
self.parser.add_argument(
"--skip-inspection",
"-I",
action="store_true",
default=False,
help="Skip inspection. By default, setting files are checked for integrity and consistency. These "
"checks result in needing to manually resolve a number of differences. Using this option will "
"suppress the inspection step.",
)
def invoke(self):
from armi import cases
if not os.path.exists(self.args.reference):
runLog.error("Could not find reference directory {}".format(self.args.reference))
sys.exit(1)
if not os.path.exists(self.args.comparison):
runLog.error("Could not find comparison directory {}".format(self.args.comparison))
sys.exit(1)
refSuite = cases.CaseSuite(self.cs)
# contains all tests that user had access to
allTests = []
for pat in self.args.patterns + self.args.additional_comparisons:
allTests.append(pat)
refSuite.discover(
rootDir=self.args.reference,
patterns=allTests,
ignorePatterns=self.args.ignore,
skipInspection=self.args.skip_inspection,
)
cmpSuite = cases.CaseSuite(self.cs)
cmpSuite.discover(
rootDir=self.args.comparison,
patterns=self.args.patterns,
ignorePatterns=self.args.ignore,
skipInspection=self.args.skip_inspection,
)
nIssues = refSuite.compare(
cmpSuite,
weights=self.args.weights,
tolerance=self.args.tolerance,
exclusion=self.args.exclude,
timestepCompare=self.args.timestepCompare,
)
if nIssues > 0:
sys.exit(1)
================================================
FILE: armi/cli/database.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point into ARMI for manipulating output databases."""
import os
import pathlib
from armi import context, runLog
from armi.cli.entryPoint import EntryPoint
from armi.utils.textProcessors import resolveMarkupInclusions
class ExtractInputs(EntryPoint):
"""
Recover input files from a database file.
This can come in handy when input files need to be hand-migrated to facilitate loading or
migration of the database file itself, or when attempting to re-run a slightly-modified version
of a case.
"""
name = "extract-inputs"
mode = context.Mode.BATCH
def addOptions(self):
self.parser.add_argument("h5db", help="Path to input database", type=str)
self.parser.add_argument(
"--output-base",
"-o",
help="Base name for extracted inputs. If not provided, base name is implied from the database name.",
type=str,
default=None,
)
def parse_args(self, args):
EntryPoint.parse_args(self, args)
if self.args.output_base is None:
self.args.output_base = os.path.splitext(self.args.h5db)[0]
def invoke(self):
from armi.bookkeeping.db.database import Database
db = Database(self.args.h5db, "r")
with db:
settings, bp = db.readInputsFromDB()
settingsPath = self.args.output_base + "_settings.yaml"
bpPath = self.args.output_base + "_blueprints.yaml"
bail = False
for path in [settingsPath, bpPath]:
if os.path.exists(settingsPath):
runLog.error("`{}` already exists. Aborting.".format(path))
bail = True
if bail:
return
for path, data, inp in [
(settingsPath, settings, "settings"),
(bpPath, bp, "blueprints"),
]:
if path is None:
continue
runLog.info("Writing {} to `{}`".format(inp, path))
if isinstance(data, bytes):
data = data.decode()
with open(path, "w") as f:
f.write(data)
class InjectInputs(EntryPoint):
"""
Insert new inputs into a database file, overwriting any existing inputs.
This is useful for performing hand migrations of inputs to facilitate database migrations.
"""
name = "inject-inputs"
mode = context.Mode.BATCH
def addOptions(self):
self.parser.add_argument("h5db", help="Path to affected database", type=str)
self.parser.add_argument("--blueprints", help="Path to blueprints file", type=str, default=None)
self.parser.add_argument("--settings", help="Path to settings file", type=str, default=None)
def invoke(self):
from armi.bookkeeping.db.database import Database
if all(li is None for li in [self.args.blueprints, self.args.settings]):
runLog.error("No settings, blueprints, or geometry files specified; nothing to do.")
return
bp = None
settings = None
if self.args.blueprints is not None:
bp = resolveMarkupInclusions(pathlib.Path(self.args.blueprints)).read()
if self.args.settings is not None:
settings = resolveMarkupInclusions(pathlib.Path(self.args.settings)).read()
db = Database(self.args.h5db, "a")
with db:
# Not calling writeInputsToDb, since it makes too many assumptions about where the
# inputs are coming from, and which ones we want to write. Instead, we assume that we
# know where to store them, and do it ourselves.
for data, key in [
(bp, "blueprints"),
(settings, "settings"),
]:
if data is not None:
dSetName = "inputs/" + key
if dSetName in db.h5db:
del db.h5db[dSetName]
db.h5db[dSetName] = data
================================================
FILE: armi/cli/entryPoint.py
================================================
"""
EntryPoint base classes.
See :doc:`/developer/entrypoints`.
"""
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import Optional, Union
from armi import context, runLog, settings
class _EntryPointEnforcer(type):
"""
Simple metaclass used for the EntryPoint abstract base class to enforce class
attributes.
"""
def __new__(mcs, name, bases, attrs):
if "name" not in attrs:
raise AttributeError("Subclasses of EntryPoint must define a `name` class attribute.")
# basic input validation. Will throw a KeyError if argument is incorrect
clsSettings = {"optional": "optional", "required": "required", None: None}[attrs.get("settingsArgument", None)]
attrs["settingsArgument"] = clsSettings
return type.__new__(mcs, name, bases, attrs)
class EntryPoint(metaclass=_EntryPointEnforcer):
"""
Generic command line entry point.
A valid subclass must provide at least a ``name`` class attribute, and may also specify the
other class attributes described below.
.. impl:: Generic CLI base class for developers to use.
:id: I_ARMI_CLI_GEN
:implements: R_ARMI_CLI_GEN
Provides a base class for plugin developers to use in creating application-specific CLIs.
Valid subclasses must at least provide a ``name`` class attribute.
Optional class attributes that a subclass may provide include ``description``, a string
describing the command's actions, ``splash``, a boolean specifying whether to display a
splash screen upon execution, and ``settingsArgument``. If ``settingsArgument`` is specified
as ``required``, then a settings files is a required positional argument. If
``settingsArgument`` is set to ``optional``, then a settings file is an optional positional
argument. If None is specified for the ``settingsArgument``, then no settings file argument
is added.
"""
#: The that is used to call the command from the command line
name: Optional[str] = None
description: Optional[str] = None
"""A string summarizing the command's actions. This is summary that is printed when
you run `python -m armi --list-commands` or `python -m armi
--help`. If not provided, the docstring of the decorated class will be used
instead. In general, the docstring is probably sufficient but this argument allows
you to provide a short description of the command while retaining a long and
detailed docstring."""
settingsArgument: Union[str, None] = None
"""
One of {'optional', 'required', None}, or unspecified.
Specifies whether a settings file argument is to be added to the
command's argument parser. If settingsArgument == 'required', then a settings
file is a required positional argument. If settingsArgument == 'optional',
then it is an optional positional argument. Finally, if settingsArgument is
None, then no settings file argument is added."""
splash = True
"""
Whether running the entry point should produce a splash text upon executing.
Setting this to ``False`` is useful for utility commands that produce standard
output that would be needlessly cluttered by the splash text.
"""
#: One of {armi.Mode.BATCH, armi.Mode.INTERACTIVE, armi.Mode.GUI}, optional.
#: Specifies the ARMI mode in which the command is run. Default is armi.Mode.BATCH.
mode: Optional[int] = None
def __init__(self):
if self.name is None:
raise AttributeError("Subclasses of EntryPoint must define a `name` class attribute")
self.cs = self._initSettings()
self.parser = argparse.ArgumentParser(
prog="{} {}".format(context.APP_NAME, self.name),
description=self.description or self.__doc__,
)
if self.settingsArgument is not None:
if self.settingsArgument not in ["required", "optional"]:
raise AttributeError(
"Subclasses of EntryPoint must specify if the a case settings file is `required` or `optional`"
)
if self.settingsArgument == "optional":
self.parser.add_argument(
"settings_file",
nargs="?",
action=loadSettings(self.cs),
help="path to the settings file to load.",
)
elif self.settingsArgument == "required":
self.parser.add_argument(
"settings_file",
action=loadSettings(self.cs),
help="path to the settings file to load.",
)
# optional arguments
self.parser.add_argument(
"--caseTitle",
type=str,
nargs=None,
action=setCaseTitle(self.cs),
help="update the case title of the run.",
)
self.parser.add_argument(
"--batch",
action="store_true",
default=False,
help="Run in batch mode even on TTY, silencing all queries.",
)
self.createOptionFromSetting("verbosity", "-v")
self.createOptionFromSetting("branchVerbosity", "-V")
self.args = argparse.Namespace()
self.settingsProvidedOnCommandLine = []
@staticmethod
def _initSettings():
"""
Initialize settings for this entry point.
Settings given on command line will update this data structure.
Override to provide specific settings in the entry point.
"""
return settings.Settings()
def addOptions(self):
"""
Add additional command line options.
Values of options added to ``self.parser`` will be available
on ``self.args``. Values added with ``createOptionFromSetting``
will override the setting values in the settings input file.
See Also
--------
createOptionFromSetting : A method often called from here to creat CLI options from
application settings.
argparse.ArgumentParser.add_argument : Often called from here using
``self.parser.add_argument`` to add custom argparse arguments.
"""
def parse_args(self, args):
self.parser.parse_args(args, namespace=self.args)
runLog.setVerbosity(self.cs["verbosity"])
def parse(self, args):
"""Parse the command line arguments, with the command specific arguments."""
self.addOptions()
self.parse_args(args)
def invoke(self) -> Optional[int]:
"""
Body of the entry point.
This is an abstract method, and must must be overridden in sub-classes.
Returns
-------
exitcode : int or None
Implementations should return an exit code, or ``None``, which is interpreted the
same as zero (successful completion).
"""
raise NotImplementedError("Subclasses of EntryPoint must override the .invoke() method")
def createOptionFromSetting(self, settingName: str, additionalAlias: str = None, suppressHelp: bool = False):
"""
Create a CLI option from an ARMI setting.
This will override whatever is in the settings file.
Parameters
----------
settingName : str
the setting name
additionalAlises : str
additional alias for the command line option, be careful and make sure they are all distinct!
supressHelp : bool
option to suppress the help message when using the command line :code:`--help` function. This is
particularly beneficial when many options are being added as they can clutter the :code:`--help` to be
almost unusable.
"""
settingsInstance = self.cs.getSetting(settingName)
if settings.isBoolSetting(settingsInstance):
helpMessage = argparse.SUPPRESS if suppressHelp else settingsInstance.description
self._createToggleFromSetting(settingName, helpMessage, additionalAlias)
else:
choices = None
if suppressHelp:
helpMessage = argparse.SUPPRESS
else:
helpMessage = settingsInstance.description.replace("%", "%%")
aliases = ["--" + settingName]
if additionalAlias is not None:
aliases.append(additionalAlias)
isListType = settingsInstance.underlyingType is list
try:
self.parser.add_argument(
*aliases,
type=str, # types are properly converted by _SetSettingAction
nargs="*" if isListType else None,
action=setSetting(self),
default=settingsInstance.default,
choices=choices,
help=helpMessage,
)
# Capture an argument error here to prevent errors when duplicate options are attempting
# to be added. This may also be captured by exploring the parser's `_actions` list as well
# but this avoid accessing a private attribute.
except argparse.ArgumentError:
pass
def _createToggleFromSetting(self, settingName, helpMessage, additionalAlias=None):
aliases = ["--" + settingName]
if additionalAlias is not None:
aliases.append(additionalAlias)
group = self.parser.add_mutually_exclusive_group()
group.add_argument(*aliases, action=storeBool(True, self), help=helpMessage)
# not really sure what to do about the help message here. Don't
# want to suppress it since it won't show up at all, but can't
# exactly "negate" the text automatically. Ideas?
if helpMessage is not argparse.SUPPRESS:
helpMessage = ""
group.add_argument(
"--no-" + settingName,
action=storeBool(False, self),
dest=settingName,
help=helpMessage,
)
# ^^ overwrites settingName with False
def storeBool(boolDefault, ep):
class _StoreBoolAction(argparse.Action):
def __init__(self, option_strings, dest, help=None):
super(_StoreBoolAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=boolDefault,
default=False,
required=False,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
ep.cs[self.dest] = self.const
ep.settingsProvidedOnCommandLine.append(self.dest)
ep.cs.failOnLoad()
return _StoreBoolAction
def setSetting(ep):
class _SetSettingAction(argparse.Action):
"""This class loads the command line supplied setting values into the
:py:data:`armi.settings.cs`.
"""
def __call__(self, parser, namespace, values, option_string=None):
# correctly converts type
ep.cs[self.dest] = values
ep.settingsProvidedOnCommandLine.append(self.dest)
ep.cs.failOnLoad()
return _SetSettingAction
# Q: Why does this require special treatment? Why not treat it like the other
# case settings and use setSetting action?
# A: Because caseTitle is no longer an actual cs setting. It's a instance attr.
def setCaseTitle(cs):
class _SetCaseTitleAction(argparse.Action):
"""This class sets the case title to the supplied value of the
:py:data:`armi.settings.cs`.
"""
def __call__(self, parser, namespace, value, option_string=None):
cs.caseTitle = value
return _SetCaseTitleAction
# Careful, this is used by physicalProgramming
def loadSettings(cs):
class LoadSettingsAction(argparse.Action):
"""This class loads the command line supplied settings file into the
:py:data:`armi.settings.cs`.
"""
def __call__(self, parser, namespace, values, option_string=None):
# since this is a positional argument, it can be called with values is
# None (i.e. default)
if values is not None:
cs.loadFromInputFile(values)
return LoadSettingsAction
================================================
FILE: armi/cli/gridGui.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Grid editor GUI entry point.
CLI entry point to spin up the GridEditor GUI.
"""
from armi.cli import entryPoint
class GridGuiEntryPoint(entryPoint.EntryPoint):
"""Load the grid editor GUI."""
name = "grids"
def addOptions(self):
self.parser.add_argument(
"blueprints",
nargs="?",
type=str,
default=None,
help="Optional path to a blueprint file to open",
)
def invoke(self):
# Import late since wxpython is kind of big and only needed when actually
# invoking the entry point
try:
import wx
from armi.utils import gridEditor
except ImportError:
raise RuntimeError(
"wxPython is not installed in this "
"environment, but is required for the Grid GUI. wxPython is not "
"installed during the default ARMI installation process. Refer to "
"installation instructions to install extras like wxPython."
)
app = wx.App()
frame = wx.Frame(None, wx.ID_ANY, title="Grid Editor", size=(1000, 1000))
gui = gridEditor.GridBlueprintControl(frame)
frame.Show()
if self.args.blueprints is not None:
gui.loadFile(self.args.blueprints)
app.MainLoop()
================================================
FILE: armi/cli/migrateInputs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point into ARMI to migrate inputs to the latest version of ARMI."""
import os
from armi.cli.entryPoint import EntryPoint
from armi.migration import ACTIVE_MIGRATIONS, base
from armi.utils import directoryChangers
class MigrateInputs(EntryPoint):
"""Migrate ARMI Inputs and/or outputs to Latest ARMI Code Base."""
name = "migrate-inputs"
def addOptions(self):
self.parser.add_argument(
"--settings-path",
"--cs",
help="Migrate a case settings file to be compatible with the latest ARMI code base",
type=str,
)
self.parser.add_argument(
"--database-path",
"--db",
help="Migrate a database file to be compatible with the latest ARMI code base",
type=str,
)
def invoke(self):
"""Run the entry point."""
if self.args.settings_path:
path, _fname = os.path.split(self.args.settings_path)
with directoryChangers.DirectoryChanger(path, dumpOnException=False):
self._migrate(self.args.settings_path, self.args.database_path)
else:
self._migrate(self.args.settings_path, self.args.database_path)
@staticmethod
def _migrate(settingsPath, dbPath):
"""
Run all migrations.
Notes
-----
Some migrations change the paths so we update them one by one.
"""
for migrationI in ACTIVE_MIGRATIONS:
if issubclass(migrationI, (base.SettingsMigration, base.BlueprintsMigration)) and settingsPath:
mig = migrationI(path=settingsPath)
mig.apply()
if issubclass(migrationI, base.SettingsMigration):
# don't update on blueprints migration paths, that's not settings!
settingsPath = mig.path
elif issubclass(migrationI, base.DatabaseMigration) and dbPath:
mig = migrationI(path=dbPath)
mig.apply()
dbPath = mig.path
================================================
FILE: armi/cli/modify.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Search through a directory tree and modify ARMI settings in existing input
file(s). All valid settings may be used as keyword arguments.
"""
from armi import operators, runLog, settings
from armi.cli.entryPoint import EntryPoint
class ModifyCaseSettingsCommand(EntryPoint):
"""
Search through a directory tree and modify ARMI settings in existing input file(s).
All valid settings may be used as keyword arguments.
Run the entry point like this::
$ python -m armi modify --nTasks=3 *.yaml
"""
name = "modify"
def addOptions(self):
self.parser.add_argument(
"--list-setting-files",
"-l",
action="store_true",
help=("Just list the settings files found and the proposed changes to make. Don't actually modify them."),
)
self.parser.add_argument(
"--skip-inspection",
"-I",
action="store_true",
default=False,
help="Skip inspection. By default, setting files are checked for integrity and consistency. These "
"checks result in needing to manually resolve a number of differences. Using this option will "
"suppress the inspection step.",
)
self.parser.add_argument(
"--rootDir",
type=str,
default=".",
help="A root directory in which to search for settings files, e.g., armi/tests.",
)
self.parser.add_argument(
"--settingsWriteStyle",
type=str,
default="short",
help="Writing style for which settings get written back to the settings files.",
choices=["short", "medium", "full"],
)
self.parser.add_argument(
"patterns",
type=str,
nargs="*",
default=["*.yaml"],
help="Pattern(s) to use to find match file names (e.g. *.yaml)",
)
for settingName in self.cs.keys():
self.createOptionFromSetting(settingName, suppressHelp=True)
def invoke(self):
csInstances = settings.recursivelyLoadSettingsFiles(self.args.rootDir, self.args.patterns)
messages = ("found", "listing") if self.args.list_setting_files else ("writing", "modifying")
for cs in csInstances:
runLog.important("{} settings file {}".format(messages[0], cs.path))
for settingName in self.settingsProvidedOnCommandLine:
if cs[settingName] != self.cs[settingName]:
runLog.info(
" changing `{}` from : {}\n {} to -> {}".format(
settingName,
cs[settingName],
" " * (2 + len(settingName)),
self.cs[settingName],
)
)
cs[settingName] = self.cs[settingName]
# if we are only listing setting files, don't write them; it is OK that we modified them in memory
if not self.args.skip_inspection:
inspector = operators.getOperatorClassFromSettings(cs).inspector(cs)
inspector.run()
if not self.args.list_setting_files:
cs.writeToYamlFile(cs.path, style=self.args.settingsWriteStyle)
runLog.important("Finished {} {} settings files.".format(messages[1], len(csInstances)))
================================================
FILE: armi/cli/reportsEntryPoint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from armi.cli import entryPoint
class ReportsEntryPoint(entryPoint.EntryPoint):
"""
Placeholder for an ARMI reports entry point.
Subclass this if you want to parse the ARMI DB or Reactor data model to build your reports.
"""
name = "report"
settingsArgument = "optional"
def __init__(self):
entryPoint.EntryPoint.__init__(self)
def invoke(self):
pass
================================================
FILE: armi/cli/run.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run an ARMI case."""
from armi.cli.entryPoint import EntryPoint
class RunEntryPoint(EntryPoint):
"""Run an ARMI case."""
name = "run"
settingsArgument = "required"
def invoke(self):
from armi import cases
inputCase = cases.Case(cs=self.cs)
inputCase.run()
================================================
FILE: armi/cli/runSuite.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run multiple ARMI cases one after the other on the local machine."""
import os
from armi import cases
from armi.cli.run import RunEntryPoint
from armi.utils import directoryChangers
class RunSuiteCommand(RunEntryPoint):
"""
Recursively run all the cases in a suite one after the other on the local machine.
Invoke with ``mpirun`` or ``mpiexec`` to activate parallelism within each individual case.
"""
name = "run-suite"
def addOptions(self):
RunEntryPoint.addOptions(self)
self.parser.add_argument(
"patterns",
nargs="*",
type=str,
default=["*.yaml"],
help="Pattern to use while searching for ARMI settings files.",
)
self.parser.add_argument(
"--ignore",
"-i",
nargs="+",
type=str,
default=[],
help="Pattern to search for inputs to ignore.",
)
self.parser.add_argument(
"--list",
"-l",
action="store_true",
default=False,
help="Just list the settings files found, don't actually run them.",
)
self.parser.add_argument(
"--suiteDir",
type=str,
default=os.getcwd(),
help=("The path containing the case suite to run. Default current working directory."),
)
def invoke(self):
with directoryChangers.DirectoryChanger(self.args.suiteDir, dumpOnException=False):
suite = cases.CaseSuite(self.cs)
suite.discover(patterns=self.args.patterns, ignorePatterns=self.args.ignore)
if self.args.list:
suite.echoConfiguration()
else:
suite.run()
================================================
FILE: armi/cli/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/cli/tests/test_runEntryPoint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for run cli entry point."""
import logging
import os
import sys
import unittest
from shutil import copyfile
from armi import runLog
from armi.__main__ import main
from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint
from armi.cli.checkInputs import CheckInputEntryPoint, ExpandBlueprints
from armi.cli.clone import CloneArmiRunCommandBatch, CloneSuiteCommand
from armi.cli.compareCases import CompareCases, CompareSuites
from armi.cli.database import ExtractInputs, InjectInputs
from armi.cli.entryPoint import EntryPoint
from armi.cli.migrateInputs import MigrateInputs
from armi.cli.modify import ModifyCaseSettingsCommand
from armi.cli.reportsEntryPoint import ReportsEntryPoint
from armi.cli.run import RunEntryPoint
from armi.cli.runSuite import RunSuiteCommand
from armi.physics.neutronics.diffIsotxs import CompareIsotxsLibraries
from armi.testing import loadTestReactor
from armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs
from armi.utils.directoryChangers import TemporaryDirectoryChanger
from armi.utils.dynamicImporter import getEntireFamilyTree
def buildTestDB(fileName, numNodes=1, numCycles=1):
"""This function builds a (super) simple test DB.
Notes
-----
This needs to be run inside a temp directory.
Parameters
----------
fileName : str
The file name (not path) we want for the ARMI test DB.
numNodes : int, optional
The number of nodes we want in the DB, default 1.
numCycles : int, optional
The number of cycles we want in the DB, default 1.
Returns
-------
str
Database file name.
"""
o, r = loadTestReactor(
TEST_ROOT,
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
# create the tests DB
dbi = DatabaseInterface(r, o.cs)
dbi.initDB(fName=f"{fileName}.h5")
db = dbi.database
# populate the db with something
r.p.cycle = 0
for node in range(abs(numNodes)):
for cycle in range(abs(numCycles)):
r.p.timeNode = node
r.p.cycle = cycle
r.p.cycleLength = 100
db.writeToDB(r)
db.close()
return f"{fileName}.h5"
class TestInitializationEntryPoints(unittest.TestCase):
def test_entryPointInitialization(self):
"""Tests the initialization of all subclasses of `EntryPoint`.
.. test:: Test initialization of many basic CLIs.
:id: T_ARMI_CLI_GEN0
:tests: R_ARMI_CLI_GEN
"""
entryPoints = getEntireFamilyTree(EntryPoint)
# Comparing to a minimum number of entry points, in case more are added.
self.assertGreater(len(entryPoints), 15)
for e in entryPoints:
entryPoint = e()
entryPoint.addOptions()
settingsArg = None
if entryPoint.settingsArgument is not None:
for a in entryPoint.parser._actions:
if "settings_file" in a.dest:
settingsArg = a
break
self.assertIsNotNone(
settingsArg,
msg=(
f"A settings file argument was expected for {entryPoint}, "
"but does not exist. This is a error in the EntryPoint "
"implementation."
),
)
class TestCheckInputEntryPoint(unittest.TestCase):
def test_checkInputEntryPointBasics(self):
ci = CheckInputEntryPoint()
ci.addOptions()
ci.parse_args(["/path/to/fake.yaml", "-C"])
self.assertEqual(ci.name, "check-input")
self.assertEqual(ci.args.patterns, ["/path/to/fake.yaml"])
self.assertEqual(ci.args.skip_checks, True)
def test_checkInputEntryPointInvoke(self):
"""Test the "check inputs" entry point.
.. test:: A working CLI child class, to validate inputs.
:id: T_ARMI_CLI_GEN1
:tests: R_ARMI_CLI_GEN
"""
ci = CheckInputEntryPoint()
ci.addOptions()
ci.parse_args([ARMI_RUN_PATH])
with mockRunLogs.BufferLog() as mock:
runLog.LOG.startLog("test_checkInputEntryPointInvoke")
runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
ci.invoke()
self.assertIn(ARMI_RUN_PATH, mock.getStdout())
self.assertIn("input is self consistent", mock.getStdout())
class TestCloneArmiRunCommandBatch(unittest.TestCase):
def test_cloneArmiRunCommandBatchBasics(self):
ca = CloneArmiRunCommandBatch()
ca.addOptions()
ca.parse_args(
[
ARMI_RUN_PATH,
"--additional-files",
"test",
"--settingsWriteStyle",
"full",
]
)
self.assertEqual(ca.name, "clone-batch")
self.assertEqual(ca.settingsArgument, "required")
self.assertEqual(ca.args.additional_files, ["test"])
self.assertEqual(ca.args.settingsWriteStyle, "full")
def test_cloneArmiRunCommandBatchInvokeShort(self):
# Test short write style
ca = CloneArmiRunCommandBatch()
ca.addOptions()
ca.parse_args([ARMI_RUN_PATH])
with TemporaryDirectoryChanger():
ca.invoke()
self.assertEqual(ca.settingsArgument, "required")
self.assertEqual(ca.args.settingsWriteStyle, "short")
clonedYaml = "armiRun.yaml"
self.assertTrue(os.path.exists(clonedYaml))
# validate a setting that has a default value was removed
txt = open(clonedYaml, "r").read()
self.assertNotIn("availabilityFactor", txt)
def test_cloneArmiRunCommandBatchInvokeMedium(self):
"""Test the "clone armi run" batch entry point, on medium detail.
.. test:: A working CLI child class, to clone a run.
:id: T_ARMI_CLI_GEN2
:tests: R_ARMI_CLI_GEN
"""
# Test medium write style
ca = CloneArmiRunCommandBatch()
ca.addOptions()
ca.parse_args([ARMI_RUN_PATH, "--settingsWriteStyle", "medium"])
with TemporaryDirectoryChanger():
ca.invoke()
self.assertEqual(ca.settingsArgument, "required")
self.assertEqual(ca.args.settingsWriteStyle, "medium")
clonedYaml = "armiRun.yaml"
self.assertTrue(os.path.exists(clonedYaml))
# validate a setting that has a default value is still there
txt = open(clonedYaml, "r").read()
self.assertIn("availabilityFactor", txt)
class TestCloneSuiteCommand(unittest.TestCase):
def test_cloneSuiteCommandBasics(self):
cs = CloneSuiteCommand()
cs.addOptions()
cs.parse_args(["-d", "test", "--settingsWriteStyle", "medium"])
self.assertEqual(cs.name, "clone-suite")
self.assertEqual(cs.args.directory, "test")
self.assertEqual(cs.args.settingsWriteStyle, "medium")
class TestCompareCases(unittest.TestCase):
def test_compareCasesBasics(self):
with TemporaryDirectoryChanger():
cc = CompareCases()
cc.addOptions()
cc.parse_args(["/path/to/fake1.h5", "/path/to/fake2.h5"])
self.assertEqual(cc.name, "compare")
self.assertIsNone(cc.args.timestepCompare)
self.assertIsNone(cc.args.weights)
with self.assertRaises(ValueError):
# The "fake" files do exist, so this should fail.
cc.invoke()
class TestCompareSuites(unittest.TestCase):
def test_compareSuitesBasics(self):
with TemporaryDirectoryChanger():
cs = CompareSuites()
cs.addOptions()
cs.parse_args(["/path/to/fake1.h5", "/path/to/fake2.h5", "-I"])
self.assertEqual(cs.name, "compare-suites")
self.assertEqual(cs.args.reference, "/path/to/fake1.h5")
self.assertTrue(cs.args.skip_inspection)
self.assertIsNone(cs.args.weights)
class TestExpandBlueprints(unittest.TestCase):
def test_expandBlueprintsBasics(self):
ebp = ExpandBlueprints()
ebp.addOptions()
ebp.parse_args(["/path/to/fake.yaml"])
self.assertEqual(ebp.name, "expand-bp")
self.assertEqual(ebp.args.blueprints, "/path/to/fake.yaml")
# Since the file is fake, invoke() should exit early.
with mockRunLogs.BufferLog() as mock:
runLog.LOG.startLog("test_expandBlueprintsBasics")
runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
ebp.invoke()
self.assertIn("does not exist", mock.getStdout())
class TestExtractInputs(unittest.TestCase):
def test_extractInputsBasics(self):
with TemporaryDirectoryChanger() as newDir:
# build test DB
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
dbi = DatabaseInterface(r, o.cs)
dbPath = os.path.join(newDir.destination, f"{self._testMethodName}.h5")
dbi.initDB(fName=dbPath)
db = dbi.database
db.writeToDB(r)
# init the CLI
ei = ExtractInputs()
ei.addOptions()
ei.parse_args([dbPath])
# test the CLI initialization
self.assertEqual(ei.name, "extract-inputs")
self.assertEqual(ei.args.output_base, dbPath[:-3])
# run the CLI on a test DB, verify it worked via logging
with mockRunLogs.BufferLog() as mock:
runLog.LOG.startLog("test_extractInputsBasics")
runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
ei.invoke()
self.assertIn("Writing settings to", mock.getStdout())
self.assertIn("Writing blueprints to", mock.getStdout())
db.close()
class TestInjectInputs(unittest.TestCase):
def test_injectInputsBasics(self):
ii = InjectInputs()
ii.addOptions()
ii.parse_args(["/path/to/fake.h5"])
self.assertEqual(ii.name, "inject-inputs")
self.assertIsNone(ii.args.blueprints)
def test_injectInputsInvokeIgnore(self):
ii = InjectInputs()
ii.addOptions()
ii.parse_args(["/path/to/fake.h5"])
with mockRunLogs.BufferLog() as mock:
runLog.LOG.startLog("test_injectInputsInvokeIgnore")
runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
ii.invoke()
self.assertIn("No settings", mock.getStdout())
def test_injectInputsInvokeNoData(self):
with TemporaryDirectoryChanger():
# init CLI
ii = InjectInputs()
ii.addOptions()
bp = os.path.join(TEST_ROOT, "refSmallReactor.yaml")
ii.parse_args(["/path/to/fake.h5", "--blueprints", bp])
# invoke and check log
with self.assertRaises(FileNotFoundError):
# The "fake.h5" doesn't exist, so this should fail.
ii.invoke()
class TestMigrateInputs(unittest.TestCase):
def test_migrateInputsBasics(self):
mi = MigrateInputs()
mi.addOptions()
mi.parse_args(["--settings-path", "cs_path"])
self.assertEqual(mi.name, "migrate-inputs")
self.assertEqual(mi.args.settings_path, "cs_path")
class TestModifyCaseSettingsCommand(unittest.TestCase):
def test_modifyCaseSettingsCommandBasics(self):
mcs = ModifyCaseSettingsCommand()
mcs.addOptions()
mcs.parse_args(["--rootDir", "/path/to/", "--settingsWriteStyle", "medium", "fake.yaml"])
self.assertEqual(mcs.name, "modify")
self.assertEqual(mcs.args.rootDir, "/path/to/")
self.assertEqual(mcs.args.settingsWriteStyle, "medium")
self.assertEqual(mcs.args.patterns, ["fake.yaml"])
def test_modifyCaseSettingsCommandInvoke(self):
mcs = ModifyCaseSettingsCommand()
mcs.addOptions()
with TemporaryDirectoryChanger():
# copy over settings files
for fileName in [
"armiRun.yaml",
"refSmallReactor.yaml",
"refSmallReactorShuffleLogic.py",
]:
copyfile(os.path.join(TEST_ROOT, fileName), fileName)
# pass in --nTasks=333
mcs.parse_args(["--nTasks=333", "--rootDir", ".", "armiRun.yaml"])
# invoke the CLI
mcs.invoke()
# validate the change to nTasks was made
txt = open("armiRun.yaml", "r").read()
self.assertIn("nTasks: 333", txt)
class MockFakeReportsEntryPoint(ReportsEntryPoint):
name = "MockFakeReport"
def invoke(self):
return "mock fake"
class TestReportsEntryPoint(unittest.TestCase):
def test_cleanArgs(self):
rep = MockFakeReportsEntryPoint()
result = rep.invoke()
self.assertEqual(result, "mock fake")
class TestCompareIsotxsLibsEntryPoint(unittest.TestCase):
def test_compareIsotxsLibsBasics(self):
com = CompareIsotxsLibraries()
com.addOptions()
com.parse_args(["--fluxFile", "/path/to/fluxfile.txt", "reference", "comparisonFiles"])
self.assertEqual(com.name, "diff-isotxs")
self.assertIsNone(com.settingsArgument)
with self.assertRaises(FileNotFoundError):
# The provided files don't exist, so this should fail.
com.invoke()
class TestRunEntryPoint(unittest.TestCase):
def test_runEntryPointBasics(self):
rep = RunEntryPoint()
rep.addOptions()
rep.parse_args([ARMI_RUN_PATH])
self.assertEqual(rep.name, "run")
self.assertEqual(rep.settingsArgument, "required")
def test_runCommandHelp(self):
"""Ensure main entry point with no args completes."""
with self.assertRaises(SystemExit) as excinfo:
# have to override the pytest args
sys.argv = [""]
main()
self.assertEqual(excinfo.exception.code, 0)
def test_executeCommand(self):
"""Use executeCommand to call run.
But we expect it to fail because we provide a fictional settings YAML.
"""
with self.assertRaises(SystemExit) as excinfo:
# override the pytest args
sys.argv = ["run", "path/to/fake.yaml"]
main()
self.assertEqual(excinfo.exception.code, 1)
class TestRunSuiteCommand(unittest.TestCase):
def test_runSuiteCommandBasics(self):
rs = RunSuiteCommand()
rs.addOptions()
rs.parse_args(["/path/to/fake.yaml", "-l"])
self.assertEqual(rs.name, "run-suite")
self.assertIsNone(rs.settingsArgument)
# test the invoke method
with mockRunLogs.BufferLog() as mock:
runLog.LOG.startLog("test_runSuiteCommandBasics")
runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
rs.invoke()
self.assertIn("Finding potential settings files", mock.getStdout())
self.assertIn("Checking for valid settings", mock.getStdout())
self.assertIn("Primary Log Verbosity", mock.getStdout())
class TestVisFileEntryPointCommand(unittest.TestCase):
def test_visFileEntryPointBasics(self):
with TemporaryDirectoryChanger() as newDir:
# build test DB
self.o, self.r = loadTestReactor(
TEST_ROOT,
customSettings={"reloadDBName": "reloadingDB.h5"},
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
self.dbi = DatabaseInterface(self.r, self.o.cs)
dbPath = os.path.join(newDir.destination, f"{self._testMethodName}.h5")
self.dbi.initDB(fName=dbPath)
self.db = self.dbi.database
self.db.writeToDB(self.r)
# create Viz entry point
vf = VisFileEntryPoint()
vf.addOptions()
vf.parse_args([dbPath])
self.assertEqual(vf.name, "vis-file")
self.assertIsNone(vf.settingsArgument)
# test the invoke method
with mockRunLogs.BufferLog() as mock:
runLog.LOG.startLog("test_visFileEntryPointBasics")
runLog.LOG.setVerbosity(logging.INFO)
self.assertEqual("", mock.getStdout())
vf.invoke()
desired = "Creating visualization file for cycle 0, time node 0..."
self.assertIn(desired, mock.getStdout())
# test the parse method (using the same DB to save time)
vf = VisFileEntryPoint()
vf.parse([dbPath])
self.assertIsNone(vf.args.nodes)
self.assertIsNone(vf.args.min_node)
self.assertIsNone(vf.args.max_node)
self.assertEqual(vf.args.output_name, "test_visFileEntryPointBasics")
self.db.close()
================================================
FILE: armi/cli/tests/test_runSuite.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for runsuite cli entry point."""
import io
import sys
import unittest
from unittest.mock import patch
from armi import meta
from armi.cli import ArmiCLI
class TestRunSuiteSuite(unittest.TestCase):
def test_listCommand(self):
"""Ensure run-suite entry point is registered.
.. test:: The ARMI CLI can be correctly initialized.
:id: T_ARMI_CLI_CS0
:tests: R_ARMI_CLI_CS
"""
acli = ArmiCLI()
origout = sys.stdout
try:
out = io.StringIO()
sys.stdout = out
acli.listCommands()
finally:
sys.stdout = origout
self.assertIn("run-suite", out.getvalue())
def test_showVersion(self):
"""Test the ArmiCLI.showVersion method.
.. test:: The ARMI CLI's basic "--version" functionality works.
:id: T_ARMI_CLI_CS1
:tests: R_ARMI_CLI_CS
"""
origout = sys.stdout
try:
out = io.StringIO()
sys.stdout = out
ArmiCLI.showVersion()
finally:
sys.stdout = origout
self.assertIn("armi", out.getvalue())
self.assertIn(meta.__version__, out.getvalue())
@patch("armi.cli.ArmiCLI.executeCommand")
def test_run(self, mockExeCmd):
"""Test the ArmiCLI.run method.
.. test:: The ARMI CLI's import run() method works.
:id: T_ARMI_CLI_CS2
:tests: R_ARMI_CLI_CS
"""
correct = 0
acli = ArmiCLI()
mockExeCmd.return_value = correct
ret = acli.run()
self.assertEqual(ret, correct)
================================================
FILE: armi/conftest.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Per-directory pytest plugin configuration used only during development/testing.
This is a used to manipulate the environment under which pytest runs the unit tests. This can act as a one-stop-shop for
manipulating the sys.path, or the ARMI App used to run the tests.
Tests must be invoked via pytest for this to have any affect, for example::
$ pytest -n 6 armi
"""
import os
import matplotlib
from armi import apps, configure, context
from armi.settings import caseSettings
from armi.tests import TEST_ROOT
def pytest_sessionstart(session):
print("Initializing generic ARMI Framework application")
configure(apps.App())
bootstrapArmiTestEnv()
def bootstrapArmiTestEnv():
"""
Perform ARMI config appropriate for running unit tests.
.. tip:: This can be imported and run from other ARMI applications for test support.
"""
from armi.nucDirectory import nuclideBases
cs = caseSettings.Settings()
context.Mode.setMode(context.Mode.BATCH)
# Need to init burnChain. (See Reactor._initBurnChain)
with open(cs["burnChainFileName"]) as burnChainStream:
nuclideBases.imposeBurnChain(burnChainStream)
# turn on a non-interactive mpl backend to minimize errors related to initializing Tcl in parallel tests
matplotlib.use("agg")
# Set and create a test-specific FAST_PATH for parallel unit testing. Not all unit tests have operators, and
# operators are usually responsible for making FAST_PATH, so we make it here. It will be deleted by the atexit hook.
context.activateLocalFastPath()
if not os.path.exists(context.getFastPath()):
os.makedirs(context.getFastPath())
# some tests need to find the TEST_ROOT via an env variable when they're filling in templates with ``$ARMITESTBASE``
# in them or opening input files use the variable in an `!include` tag. Thus we provide it here.
os.environ["ARMITESTBASE"] = TEST_ROOT
================================================
FILE: armi/context.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing global constants that reflect the executing context of ARMI.
ARMI's global state information: operating system information, environment data, user data, memory
parallelism, temporary storage locations, and if operational mode (interactive, gui, or batch).
"""
import datetime
import enum
import gc
import getpass
import os
import sys
import time
from logging import DEBUG
# h5py needs to be imported here, so that the disconnectAllHdfDBs() call that gets bound to atexit
# below doesn't lead to a segfault on python exit.
#
# Minimal code to reproduce the issue:
#
# >>> import atexit
#
# >>> def willSegFault():
# >>> import h5py
#
# >>> atexit.register(willSegFault)
import h5py # noqa: F401
BLUEPRINTS_IMPORTED = False
BLUEPRINTS_IMPORT_CONTEXT = ""
# App name is used when spawning new tasks that should invoke a specific ARMI application. Sometimes
# these tasks only use ARMI functionality, so running `python -m armi` is fine. Other times, the
# task is specific to an application, requiring something like: `python -m myArmiApp`
APP_NAME = "armi"
class Mode(enum.Enum):
"""
Mode represents different run types possible in ARMI.
The modes can be Batch, Interactive, or GUI. Mode is generally auto-detected based on your
terminal. It can also be set in various CLI entry points. Each entry point has a ``--batch``
command line argument that can force Batch mode.
"""
BATCH = 1
INTERACTIVE = 2
GUI = 4
@classmethod
def setMode(cls, mode):
"""Set the run mode of the current ARMI case."""
global CURRENT_MODE
assert isinstance(mode, cls), "Invalid mode {}".format(mode)
CURRENT_MODE = mode
ROOT = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(ROOT, "..")
RES = os.path.join(ROOT, "resources")
DOC = os.path.abspath(os.path.join(PROJECT_ROOT, "doc"))
USER = getpass.getuser()
START_TIME = time.ctime()
# Set batch mode if not a TTY, which means you're on a cluster writing to a stdout file. In this
# mode you cannot respond to prompts. (This does not work reliably for both Windows and Linux so an
# os-specific solution is applied.)
IS_WINDOWS = ("win" in sys.platform) and ("darwin" not in sys.platform)
isatty = sys.stdout.isatty() if IS_WINDOWS else sys.stdin.isatty()
CURRENT_MODE = Mode.INTERACTIVE if isatty else Mode.BATCH
Mode.setMode(CURRENT_MODE)
MPI_COMM = None
# MPI_RANK represents the index of the CPU that is running.
# 0 is typically the primary CPU, while 1+ are typically workers.
MPI_RANK = 0
# MPI_SIZE is the total number of CPUs.
MPI_SIZE = 1
LOCAL = "local"
MPI_NODENAME = LOCAL
MPI_NODENAMES = [LOCAL]
try:
# Check for MPI
from mpi4py import MPI
from mpi4py.util import pkl5
MPI_COMM = pkl5.Intracomm(MPI.COMM_WORLD)
MPI_RANK = MPI_COMM.Get_rank()
MPI_SIZE = MPI_COMM.Get_size()
MPI_NODENAME = MPI.Get_processor_name()
MPI_NODENAMES = MPI_COMM.allgather(MPI_NODENAME)
except ImportError:
# stick with defaults
pass
except RuntimeError:
# likely from MPI not being on system, this is OK for many ARMI invocations
# Note this exception was introduced upon upgrading to mpi4py 4.1.1
pass
if sys.platform.startswith("win"):
# trying a Windows approach
APP_DATA = os.path.join(os.environ["APPDATA"], "armi")
APP_DATA = APP_DATA.replace("/", "\\")
else:
# non-Windows: /tmp/ if possible, if not home
if os.access("/tmp/", os.W_OK):
APP_DATA = "/tmp/.armi"
else:
APP_DATA = os.path.expanduser("~/.armi")
if MPI_NODENAMES.index(MPI_NODENAME) == MPI_RANK:
if not os.path.isdir(APP_DATA):
try:
os.makedirs(APP_DATA)
os.chmod(APP_DATA, 0o0777)
except OSError:
pass
if not os.path.isdir(APP_DATA):
raise OSError("Directory doesn't exist {0}".format(APP_DATA))
if MPI_COMM is not None:
# Make sure app data exists before workers proceed.
MPI_COMM.barrier()
MPI_DISTRIBUTABLE = MPI_SIZE > 1
_FAST_PATH = os.path.join(os.getcwd())
"""
A directory available for high-performance I/O.
.. warning:: This is not a constant and can change at runtime.
"""
_FAST_PATH_IS_TEMPORARY = False
"""Flag indicating whether or not the FAST_PATH should be cleaned up on exit."""
def activateLocalFastPath() -> None:
"""
Specify a local temp directory to be the fast path.
``FAST_PATH`` is often a local hard drive on a cluster node. It should be a high-performance
scratch space. Different processors on the same node should have different fast paths.
Notes
-----
This path will be obliterated when the job ends.
This path is set at import time, so if a series of unit tests come through that instantiate one
operator after the other, the path will already exist the second time. The directory is created
in the Operator constructor.
"""
global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA
# Try to fix pathing issues in Windows.
if os.name == "nt":
APP_DATA = APP_DATA.replace("/", "\\")
_FAST_PATH = os.path.join(
APP_DATA,
"{}{}-{}".format(
MPI_RANK,
os.environ.get("PYTEST_XDIST_WORKER", ""), # for parallel unit testing,
datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"),
),
)
_FAST_PATH_IS_TEMPORARY = True
def getFastPath() -> str:
"""
Callable to get the current FAST_PATH.
Notes
-----
This exists because it's dangerous to use ``FAST_PATH`` directly. as it can change between
import and runtime.
"""
return _FAST_PATH
def cleanFastPathAfterSimulation():
"""
Clean up temporary files after a run.
Some Windows HPC systems send a SIGBREAK signal when the user cancels a job, which is NOT
handled by ``atexit``. Notably, SIGBREAK does not exist outside Windows. For the SIGBREAK signal
to work with a Windows HPC, the ``TaskCancelGracePeriod`` option must be configured to be non-
zero. This sets the period between SIGBREAK and SIGTERM/SIGINT. To do cleanups in this case, we
must use the ``signal`` module. Actually, even then it does not work because MS ``mpiexec`` does
not pass signals through.
"""
from armi import runLog
from armi.utils.pathTools import cleanPath
disconnectAllHdfDBs()
printMsg = runLog.getVerbosity() <= DEBUG
if _FAST_PATH_IS_TEMPORARY and os.path.exists(_FAST_PATH):
if printMsg:
print(
"Cleaning up temporary files in: {}".format(_FAST_PATH),
file=sys.stdout,
)
try:
cleanPath(_FAST_PATH, mpiRank=MPI_RANK)
except Exception as error:
for outputStream in (sys.stderr, sys.stdout):
if printMsg:
print(
"Failed to delete temporary files in: {}\n error: {}".format(_FAST_PATH, error),
file=outputStream,
)
def disconnectAllHdfDBs() -> None:
"""
Forcibly disconnect all instances of HdfDB objects.
Notes
-----
This is a hack to help ARMI exit gracefully when the garbage collector and h5py have issues
destroying objects. The root cause for why this was having issues was never identified. It
appears that when several HDF5 files are open in the same run (e.g. when calling ``armi.init()``
multiple times from a post-processing script), when these h5py File objects were closed, the
garbage collector would raise an exception related to the repr'ing the object. We get around
this by using the garbage collector to manually disconnect all open HdfDBs.
"""
from armi.bookkeeping.db import Database
h5dbs = [db for db in gc.get_objects() if isinstance(db, Database)]
for db in h5dbs:
db.close()
================================================
FILE: armi/interfaces.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interfaces are objects of code that interact with ARMI. They read information off the state, perform calculations (or\
run external codes), and then store the results back in the state.
Learn all about interfaces in :doc:`/developer/guide`
See Also
--------
armi.operators : Schedule calls to various interfaces
armi.plugins : Register various interfaces
"""
import copy
from typing import Dict, List, NamedTuple, Tuple, Union
import numpy as np
from numpy.linalg import norm
from armi import getPluginManagerOrFail, runLog, settings, utils
from armi.reactor import parameters
from armi.utils import textProcessors
class STACK_ORDER: # noqa: N801
"""
Constants that help determine the order of modules in the interface stack.
Each module defines an ``ORDER`` constant that specifies where in this order it should be placed in the Interface
Stack.
.. impl:: Define an ordered list of interfaces.
:id: I_ARMI_OPERATOR_INTERFACES0
:implements: R_ARMI_OPERATOR_INTERFACES
At each time node during a simulation, an ordered collection of Interfaces are run (referred
to as the interface stack). But ARMI does not force the order upon the analyst. Instead,
each Interface registers where in that ordered list it belongs by giving itself an order
number (which can be an integer or a decimal). This class defines a set of constants which
can be imported and used by Interface developers to define that Interface's position in the
stack.
The constants defined are given names, based on common stack orderings in the ARMI
ecosystem. But in the end, these are just constant values, and the names they are given are
merely suggestions.
See Also
--------
armi.operators.operator.Operator.createInterfaces
armi.physics.neutronics.globalFlux.globalFluxInterface.ORDER
"""
BEFORE = -0.1
AFTER = 0.1
PREPROCESSING = 1.0
FUEL_MANAGEMENT = PREPROCESSING + 1
DEPLETION = FUEL_MANAGEMENT + 1
FUEL_PERFORMANCE = DEPLETION + 1
CROSS_SECTIONS = FUEL_PERFORMANCE + 1
CRITICAL_CONTROL = CROSS_SECTIONS + 1
FLUX = CRITICAL_CONTROL + 1
THERMAL_HYDRAULICS = FLUX + 1
REACTIVITY_COEFFS = THERMAL_HYDRAULICS + 1
TRANSIENT = REACTIVITY_COEFFS + 1
BOOKKEEPING = TRANSIENT + 1
POSTPROCESSING = BOOKKEEPING + 1
class TightCoupler:
"""
Data structure that defines tight coupling attributes that are implemented
within an Interface and called upon when ``interactAllCoupled`` is called.
.. impl:: The TightCoupler defines the convergence criteria for physics coupling.
:id: I_ARMI_OPERATOR_PHYSICS0
:implements: R_ARMI_OPERATOR_PHYSICS
During a simulation, the developers of an ARMI application frequently want to
iterate on some physical calculation until that calculation has converged to
within some small tolerance. This is typically done to solve the nonlinear
dependence of different physical properties of the reactor, like fuel
performance. However, what parameter is being tightly coupled is configurable
by the developer.
This class provides a way to calculate if a single parameter has converged
based on some convergence tolerance. The user provides the parameter,
tolerance, and a maximum number of iterations to define a basic convergence
calculation. If in the ``isConverged`` method the parameter has not converged,
the number of iterations is incremented, and this class will wait, presuming
another iteration is forthcoming.
Parameters
----------
param : str
The name of a parameter defined in the ARMI Reactor model.
tolerance : float
Defines the allowable error between the current and previous parameter values
to determine if the selected coupling parameter has converged.
maxIters : int
Maximum number of tight coupling iterations allowed
"""
_SUPPORTED_TYPES = [float, int, list, np.ndarray]
def __init__(self, param, tolerance, maxIters):
self.parameter = param
self.tolerance = tolerance
self.maxIters = maxIters
self._numIters = 0
self._previousIterationValue = None
self.eps = np.inf
def __repr__(self):
return (
f"<{self.__class__.__name__}, Parameter: {self.parameter}, Convergence Criteria: "
+ f"{self.tolerance}, Maximum Coupled Iterations: {self.maxIters}>"
)
def storePreviousIterationValue(self, val: _SUPPORTED_TYPES):
"""
Stores the previous iteration value of the given parameter.
Parameters
----------
val : _SUPPORTED_TYPES
the value to store. Is commonly equal to interface.getTightCouplingValue()
Raises
------
TypeError
Checks the type of the val against ``_SUPPORTED_TYPES`` before storing.
If invalid, a TypeError is raised.
"""
if type(val) not in self._SUPPORTED_TYPES:
raise TypeError(
f"{val} supplied has type {type(val)} which is not supported in {self}. "
f"Supported types: {self._SUPPORTED_TYPES}"
)
self._previousIterationValue = val
def isConverged(self, val: _SUPPORTED_TYPES) -> bool:
"""
Return boolean indicating if the convergence criteria between the current and previous iteration values are met.
Parameters
----------
val : _SUPPORTED_TYPES
The most recent value for computing convergence criteria. Is commonly equal to
interface.getTightCouplingValue()
Returns
-------
boolean
True (False) interface is (not) converged
Notes
-----
- On convergence, this class is automatically reset to its initial condition to avoid
retaining or holding a stale state. Calling this method will increment a counter that when
exceeded will clear the state. A warning will be reported if the state is cleared prior to
the convergence criteria being met.
- For computing convergence of arrays, only up to 2D is allowed. 3D arrays would arise from
considering component level parameters. However, converging on component level parameters
is not supported at this time.
Raises
------
ValueError
If the previous iteration value has not been assigned. The ``storePreviousIterationValue`` method must be
called first.
RuntimeError
Only support calculating norms for up to 2D arrays.
"""
if self._previousIterationValue is None:
raise ValueError(
f"Cannot check convergence of {self} with no previous iteration value set. Set using "
"`storePreviousIterationValue` first."
)
previous = self._previousIterationValue
# calculate convergence of val and previous
if isinstance(val, (int, float)):
self.eps = abs(val - previous)
else:
dim = self.getListDimension(val)
if dim == 1: # 1D array
self.eps = norm(np.subtract(val, previous), ord=2)
elif dim == 2: # 2D array
epsVec = []
for old, new in zip(previous, val):
epsVec.append(norm(np.subtract(old, new), ord=2))
self.eps = norm(epsVec, ord=np.inf)
else:
raise RuntimeError("Currently only support up to 2D arrays for calculating convergence of arrays.")
# Check if convergence is satisfied. If so, or if reached max number of iters, then reset
# the number of iterations
converged = self.eps < self.tolerance
if converged:
self._numIters = 0
else:
self._numIters += 1
if self._numIters == self.maxIters:
runLog.warning(
f"Maximum number of iterations for {self.parameter} reached without convergence! Prescribed "
f"convergence criteria is {self.tolerance}."
)
self._numIters = 0
return converged
@staticmethod
def getListDimension(listToCheck: list, dim: int = 1) -> int:
"""Return the dimension of a python list.
Parameters
----------
listToCheck: list
the supplied python list to have its dimension returned
dim: int, optional
the dimension of the list
Returns
-------
dim, int
the dimension of the list. Typically 1, 2, or 3 but can be arbitrary order, N.
"""
for v in listToCheck:
if isinstance(v, list):
dim += 1
dim = TightCoupler.getListDimension(v, dim)
break
return dim
class Interface:
"""
The eponymous Interface between the ARMI reactor data model and the Plugins.
.. impl:: The interface shall allow code execution at important operational points in time.
:id: I_ARMI_INTERFACE
:implements: R_ARMI_INTERFACE
The Interface class defines a number methods with names like ``interact***``.
These methods are called in order at each time node. This allows for an
individual Plugin defining multiple interfaces to insert code at the start
or end of a particular time node or cycle during reactor simulation. In this
fashion, the Plugins and thus the Operator control when their code is run.
The end goal of all this work is to allow the Plugins to carefully tune
when and how they interact with the reactor data model.
Interface instances are gathered into an interface stack in
:py:meth:`armi.operators.operator.Operator.createInterfaces`.
"""
# list containing interfaceClass
@classmethod
def getDependencies(cls, cs):
return []
@classmethod
def getInputFiles(cls, cs):
"""Return a MergeableDict containing files that should be considered "input"."""
return utils.MergeableDict()
name: Union[str, None] = None
"""
The name of the interface. This is undefined for the base class, and must be overridden by any
concrete class that extends this one.
"""
purpose = None
"""
The action performed by an Interface. This is not required be be defined by implementations of
Interface, but is used to form categories of interfaces.
"""
class Distribute:
"""Enum-like return flag for behavior on interface broadcasting with MPI."""
DUPLICATE = 1
NEW = 2
SKIP = 4
def __init__(self, r, cs):
"""
Construct an interface.
The ``r`` and ``cs`` arguments are required, but may be ``None``, where appropriate for the
specific ``Interface`` implementation.
Parameters
----------
r : Reactor
A reactor to attach to
cs : Settings
Settings object to use
Raises
------
RuntimeError
Interfaces derived from Interface must define their name
"""
if self.name is None:
raise RuntimeError(
"Interfaces derived from Interface must define their name ({}).".format(type(self).__name__)
)
self._enabled = True
self.reverseAtEOL = False
self._bolForce = False # override disabled flag in interactBOL if true.
self.cs = cs
self.r = r
self.o = r.o if r else None
self.coupler = _setTightCouplerByInterfaceFunction(self, cs)
def __repr__(self):
return "".format(self.name)
def _checkSettings(self):
"""Raises an exception if interface settings requirements are not met."""
pass
def nameContains(self, name):
return name in str(self.name)
def distributable(self):
"""
Return true if this can be MPI broadcast.
Notes
-----
Cases where this isn't possible include the database interface, where the SQL driver cannot
be distributed.
"""
return self.Distribute.DUPLICATE
def preDistributeState(self):
"""
Prepare for distribute state by returning all non-distributable attributes.
Examples
--------
>>> return {"neutronsPerFission", self.neutronsPerFission}
"""
return {}
def postDistributeState(self, toRestore):
"""Restore non-distributable attributes after a distributeState."""
pass
def attachReactor(self, o, r):
"""
Set this interfaces' reactor to the reactor passed in and sets default settings.
Parameters
----------
r : Reactor object
The reactor to attach
quiet : bool, optional
If true, don't print out the message while attaching
Notes
-----
This runs on all worker nodes as well as the primary.
"""
self.r = r
self.cs = o.cs
self.o = o
def detachReactor(self):
"""Delete the callbacks to reactor or operator. Useful when pickling, MPI sending, etc. to save memory."""
self.o = None
self.r = None
self.cs = None
def duplicate(self):
"""
Duplicate this interface without duplicating some of the large attributes (like the entire reactor).
Makes a copy of interface with detached reactor/operator/settings so that it can be attached to an operator at a
later point in time.
Returns
-------
Interface
The deepcopy of this interface with detached reactor/operator/settings
"""
# temporarily remove references to the interface. They will be reattached later.
o = self.o
self.o = None
r = self.r
self.r = None
cs = self.cs
self.cs = None
# a new sterile copy of the interface.
# With no record of operators, reactors, or cs, it can be added easily to a new operator
newI = copy.deepcopy(self)
# reattach current interface information
self.o = o
self.r = r
self.cs = cs
return newI
def getHistoryParams(self):
"""
Add these params to the history tracker for designated assemblies.
The assembly will get a print out of these params vs. time at EOL.
"""
return []
def getInterface(self, *args, **kwargs):
return self.o.getInterface(*args, **kwargs) if self.o else None
def interactInit(self):
"""
Interacts immediately after the interfaces are created.
Notes
-----
BOL interactions on other interfaces will not have occurred here.
"""
self._checkSettings()
def interactBOL(self):
"""Called at the Beginning-of-Life of a run, before any cycles start."""
if self._enabled:
self._initializeParams()
def _initializeParams(self):
"""
Assign the parameters for active interfaces so that they will be in the database.
Notes
-----
Parameters with defaults are not written to the database until they have been assigned
SINCE_ANYTHING. This is done to reduce database size, so that we don't write parameters to
the DB that are related to interfaces that are not not active.
"""
for paramDef in parameters.ALL_DEFINITIONS.inCategory(self.name):
if paramDef.default not in (None, parameters.NoDefault):
paramDef.assigned = parameters.SINCE_ANYTHING
def interactEOL(self):
"""Called at End-of-Life, after all cycles are complete."""
pass
def interactBOC(self, cycle=None):
"""Called at the beginning of each cycle."""
pass
def interactEOC(self, cycle=None):
"""Called at the end of each cycle."""
pass
def interactEveryNode(self, cycle, node):
"""Called at each time node/subcycle of every cycle."""
pass
def interactCoupled(self, iteration):
"""Called repeatedly at each time node/subcycle when tight physics coupling is active."""
pass
def getTightCouplingValue(self):
"""Abstract method to retrieve the value in which tight coupling will converge on."""
pass
def interactError(self):
"""Called if an error occurs."""
pass
def interactDistributeState(self):
"""Called after this interface is copied to a different (non-primary) MPI node."""
pass
def interactRestart(self, startNode: Tuple[int, int], previousNode: Tuple[int, int]):
"""Perform any actions prior to simulating a restart.
Interfaces may want to restore some state that would have existed at the start of ``startNode`` prior to calling
:meth:`interactBOL` for the desired start point. The database interface will be used prior to any interfaces
calling this method, so you can assume the reactor state has been correctly loaded from the database from the
``previousNode``. This helps ensure that interfaces restart at e.g., ``(cycle, node)=(4, 3)`` would see the same
data compared to the nominal simulation without a restart.
Parameters
----------
startNode
Pair of ``(cycle, node)`` for the requested restart point.
previousNode
Pair of ``(cycle, node)`` for the time node immediately preceeding ``startNode``.
"""
def isRequestedDetailPoint(self, cycle=None, node=None):
"""
Determine if this interface should interact at this reactor state (cycle/node).
Notes
-----
By default, detail points are either during the requested snapshots, if any exist, or all
cycles and nodes if none exist.
This is useful for peripheral interfaces (CR Worth, perturbation theory, transients) that
may or may not be requested during a standard run.
If both cycle and node are None, this returns True
Parameters
----------
cycle : int
The cycle number (or None to only consider node)
node : int
The timenode (BOC, MOC, EOC, etc.).
Returns
-------
bool
Whether or not this is a detail point.
"""
from armi.bookkeeping import snapshotInterface # avoid cyclic import
if cycle is None and node is None:
return True
if not self.cs["dumpSnapshot"]:
return True
for cnStamp in self.cs["dumpSnapshot"]:
ci, ni = snapshotInterface.extractCycleNodeFromStamp(cnStamp)
if cycle is None and ni == node:
# case where only node counts (like in equilibrium cases)
return True
if ci == cycle and ni == node:
return True
return False
def workerOperate(self, _cmd):
"""
Receive an MPI command and do MPI work on worker nodes.
Returns
-------
bool
True if this interface handled the incoming command. False otherwise.
"""
return False
def enabled(self, flag=None):
"""
Mechanism to allow interfaces to be attached but not running at the interaction points.
Must be implemented on the individual interface level hooks. If given no arguments, returns
status of enabled. If arguments, sets enabled to that flag. (True or False)
Notes
-----
These ``return`` statements are inconsistent, but not wrong.
"""
if flag is None:
return self._enabled
elif isinstance(flag, bool):
self._enabled = flag
else:
raise ValueError("Non-bool passed to assign {}.enable().".format(self))
def bolForce(self, flag=None):
"""
Run interactBOL even if this interface is disabled.
Parameters
----------
flag : boolean, optional
Will set the bolForce flag to this boolean
Returns
-------
bool
true if should run at BOL. No return if you pass an input.
Notes
-----
These ``return`` statements are inconsistent, but not wrong.
"""
if flag is None:
return self._bolForce
self._bolForce = flag
def writeInput(self, inName):
"""Write input file(s)."""
raise NotImplementedError()
def readOutput(self, outName):
"""Read output file(s)."""
raise NotImplementedError()
@staticmethod
def specifyInputs(cs) -> Dict[Union[str, settings.Setting], List[str]]:
"""
Return a collection of file names that are considered input files.
This is a static method (i.e. is not called on a particular instance of the class), since it
should not require an Interface to actually be constructed. This would require constructing
a reactor object, which is expensive.
The files returned by an implementation should be those that one would want copied to a
target location when cloning a Case or CaseSuite. These can be absolute paths, relative
paths, or glob patterns that will be interpolated relative to the input directory. Absolute
paths will not be copied anywhere.
The returned dictionary will enable the source Settings object to be updated to the new file
location. While the dictionary keys are recommended to be Setting objects, the name of the
setting as a string, e.g., "shuffleLogic", is still interpreted. If the string name does not
point to a valid setting then this will lead to a failure.
Note
----
This existed before the advent of ARMI plugins. Perhaps it can be better served as a plugin
hook. Potential future work.
See Also
--------
armi.cases.Case.clone() : Main user of this interface.
Parameters
----------
cs : Settings
The case settings for a particular Case
"""
return {}
def updatePhysicsCouplingControl(self):
"""Adjusts physics coupling settings depending on current state of run."""
pass
class InputWriter:
"""Use to write input files of external codes."""
def __init__(self, r=None, externalCodeInterface=None, cs=None):
self.externalCodeInterface = externalCodeInterface
self.eci = externalCodeInterface
self.r = r
self.cs = cs
def getInterface(self, name):
"""Get another interface by name."""
if self.externalCodeInterface:
return self.externalCodeInterface.getInterface(name)
return None
def write(self, fName):
"""Write the input file."""
raise NotImplementedError
class OutputReader:
"""
A generic representation of a particular module's output.
Attributes
----------
success : bool
False by default, set to True if the run is considered to have completed without error.
Notes
-----
Should ideally not require r, eci, and fname arguments and would rather just have an apply(reactor) method.
"""
def __init__(self, r=None, externalCodeInterface=None, fName=None, cs=None):
self.externalCodeInterface = externalCodeInterface
self.eci = self.externalCodeInterface
self.r = r
self.cs = cs
if fName:
self.output = textProcessors.TextProcessor(fName)
else:
self.output = None
self.fName = fName
self.success = False
def getInterface(self, name):
"""Get another interface by name."""
if self.externalCodeInterface:
return self.externalCodeInterface.getInterface(name)
return None
def read(self, fileName):
"""Read the output file."""
raise NotImplementedError
def apply(self, reactor):
"""
Apply the output back to a reactor state.
This provides a generic interface for the output data of anything to be applied to a reactor state. The
application could involve reading text or binary output or simply parameters to appropriate values in some other
data structure.
"""
raise NotImplementedError()
def _setTightCouplerByInterfaceFunction(interfaceClass, cs):
"""
Return an instance of a ``TightCoupler`` class or ``None``.
Parameters
----------
interfaceClass : Interface
Interface class that a ``TightCoupler`` object will be added to.
cs : Settings
Case settings that are parsed to determine if tight coupling is enabled globally and if both a target parameter
and convergence criteria defined.
"""
# No tight coupling if there is no purpose for the Interface defined.
if interfaceClass.purpose is None:
return None
if not cs["tightCoupling"] or (interfaceClass.purpose not in cs["tightCouplingSettings"]):
return None
parameter = cs["tightCouplingSettings"][interfaceClass.purpose]["parameter"]
tolerance = cs["tightCouplingSettings"][interfaceClass.purpose]["convergence"]
maxIters = cs["tightCouplingMaxNumIters"]
return TightCoupler(parameter, tolerance, maxIters)
def getActiveInterfaceInfo(cs):
"""
Return a list containing information for all of the Interface classes that are present.
This creates a list of tuples, each containing an Interface subclass and appropriate kwargs for adding them to an
Operator stack, given case settings. There should be entries for all Interface classes that are returned from
implementations of the describeInterfaces() function in modules present in the passed list of packages. The list is
sorted by the ORDER specified by the module in which the specific Interfaces are described.
Parameters
----------
cs : Settings
The case settings that activate relevant Interfaces
"""
interfaceInfo = []
for info in getPluginManagerOrFail().hook.exposeInterfaces(cs=cs):
interfaceInfo += info
interfaceInfo = [(iInfo.interfaceCls, iInfo.kwargs) for iInfo in sorted(interfaceInfo, key=lambda x: x.order)]
return interfaceInfo
def isInterfaceActive(klass, cs):
"""Return True if the Interface klass is active."""
return any(issubclass(k, klass) for k, _kwargs in getActiveInterfaceInfo(cs))
class InterfaceInfo(NamedTuple):
"""
Data structure with interface info.
Notes
-----
If kwargs is an empty dictionary, defaults from ``armi.operators.operator.Operator.addInterface`` will be applied.
See Also
--------
armi.operators.operator.Operator.createInterfaces : where these ultimately
activate various interfaces.
"""
order: int
interfaceCls: Interface
kwargs: dict
================================================
FILE: armi/matProps/__init__.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The package armi.matProps is a material library capable of representing and computing material properties.
The matProps package allows users to define materials in a custom YAML format. The format is simple, extensible, and
easy to use. Each material has a list of "properties" (like density, specific heat, vapor pressure, etc). Each of those
properties be an arbitrary function of multiple independent variables, or a look up table of one or more variables. Each
of these properties can define their own set of references, to allow for trustworthy modeling. A major idea in matProps
is that we separate out materials as "data", rather than representing them directly in Python as "code".
This package does not include any material data files. The unit tests in this package have many example YAML files, and
ARMI comes packaged with more real world examples at: ``armi/resources/materials/``. The user may create their own data
files to use with ``matProps`` in a directory, and pass in that path via ``armi.matProps.loadAll(path)``.
Loading Data
============
In your Python code, you can load a full set of matProps materials into memory with just one or two lines of code. You
just have to provide a path to a directory filled with correctly-formatted YAML files:
.. code-block:: python
import armi.matProps
pathToMaterialYAMLs = "path/to/materialDir/"
armi.matProps.loadSafe(pathToMaterialYAMLs)
If you do not specify a directory for the YAML files, there is a default location in your virtual environment you can
store the data files (in a package named ``material_data``):
.. code-block:: python
import armi.matProps
armi.matProps.loadSafe()
Adding a Property
=================
matProps comes with a large set of common material properties. But it is quite easy to add another material property to
your simulation, if you need to.
.. code-block:: python
from armi.matProps.prop import defProp
defProp("fuzz", "fuzziness", "1/m^2")
defProp("goo", "gooiness", "m^2/s")
defProp("squish", "squishiness", "1/Pa")
armi.matProps.loadSafe("path/to/hilarious/materials/")
A Note on Design
================
At the high-level, the ``matProps`` API exposes the functions in this file (``loadAll``, ``loadSafe``,
``getMaterials``, etc). And these functions all work off three global data collections:
``armi.matProps.loadedRootDirs``, ``armi.matProps.materials``, and ``armi.matProps.prop.properties``.
It is worth noting that this design centers around global data. This could have a more object-oriented approach where
the functions below and these three data sets are all stored in a class, e.g. via a ``MaterialLibrary`` class. This
would be more Pythonic, and allow for multiple collections of materials, say for testing. So far, no one has ever needed
multiple colletions of materials from matProps, because a single scientific model generally only needs one source of
truth for what materials are.
"""
import os
import sysconfig
import warnings
from glob import glob
from armi.matProps.material import Material
loadedRootDirs = []
materials = {}
def getPaths(rootDir: str) -> list:
"""Get the paths of all the YAML files in a given directory."""
if not os.path.exists(rootDir):
raise FileNotFoundError(f"Directory {rootDir} not found")
elif not os.path.isdir(rootDir):
raise NotADirectoryError(f"Input path {rootDir} is not a directory")
patterns = ["*.yaml", "*.yml"]
matFiles = []
for pattern in patterns:
matFiles.extend(glob(os.path.join(rootDir, "**", pattern), recursive=True))
return matFiles
def addMaterial(yamlPath: str, mat):
"""
Adds Material object instance to matProps.materials dict.
Parameters
----------
yamlPath: str
Yaml file path whose information is being parsed.
mat: Material
Material object whose data will be saved.
"""
global materials
if mat.name in materials:
msg = f"A material with the name `{mat.name}` as defined in ({yamlPath}) already exists."
raise KeyError(msg)
materials[mat.name] = mat
mat.save()
def loadAll(rootDir: str = None) -> None:
"""
Loads all material files from a particular directory. If a materials directory is not provided, this function will
attempt to find materials in the default location in the virtual environment.
Parameters
----------
rootDir: str
Directory whose YAML files will be loaded into matProps. The default is the materials_data location in the venv.
Notes
-----
Hidden in here is a default directory which you can load your YAML files from. Inside your Python virtual
environment, you can create a data directory named "materials_data", and store all your matProps formatted YAML
files. This is optional, of course, you can just explicitly pass a directory path into this method.
"""
global loadedRootDirs
if rootDir is None:
rootDir = os.path.join(sysconfig.getPaths()["purelib"], "materials_data")
if not os.path.exists(rootDir):
raise OSError(f"No material directory provided, and default not found: {rootDir}")
paths = getPaths(rootDir)
for yamlPath in paths:
mat = Material()
try:
mat.loadFile(yamlPath)
except Exception as exc:
msg = f"Failed to load `{yamlPath}`."
raise RuntimeError(msg) from exc
addMaterial(yamlPath, mat)
loadedRootDirs.append(rootDir)
def clear() -> None:
"""Clears all loaded materials in matProps."""
global materials
global loadedRootDirs
loadedRootDirs.clear()
materials.clear()
def loadSafe(rootDir: str = None) -> None:
"""
Safely load a single directory of matProps materials.
Loading a materials directory via this function will first clear out any other materials that are loaded into
matProps. If a materials directory is not provided, this function will attempt to find materials in the default
location in the virtual environment. This is meant to be a helpful tool for testing.
Parameters
----------
rootDir: str
Directory whose yaml files will be loaded into matProps.
The default is the materials_data location in the venv.
See Also
--------
loadAll : More flexible way to load materials into matProps.
"""
clear()
loadAll(rootDir)
def getHashes() -> dict:
"""Calls Material.hash() for each Material object in materials."""
global materials
hashes = {}
for material in materials.values():
hashes[material.name] = material.hash()
return hashes
def getMaterial(name: str) -> Material:
"""
Returns a material object with the given name from matProps.materials.
Parameters
----------
name: str
Name of material whose data user wishes to retrieve.
Returns
-------
Material
Material object returned from matProps.materials.
"""
global materials
try:
return materials[name]
except KeyError:
msg = f"No material named `{name}` was loaded within loaded data."
raise KeyError(msg) from None
def loadMaterial(yamlPath: str, saveMaterial: bool = False) -> Material:
"""
Loads an individual material file.
Parameters
----------
yamlPath: str
Path to YAML file that will be parsed into this object instance.
saveMaterial: bool
If True, Material object instance will be saved into matProps.materials.
Returns
-------
Material
Material object whose data is parsed from material file provided by yamlPath.
"""
mat = Material()
mat.loadFile(yamlPath)
if saveMaterial:
addMaterial(yamlPath, mat)
else:
msg = f"Loading material {mat} {mat.hash()}"
try:
# If possible, keep matProps free of ARMI imports
from armi import runLog
runLog.info(msg)
except ImportError:
print(msg)
return mat
def loadedMaterials() -> list:
"""
Returns all the Material objects that have been loaded into matProps.materials.
Returns
-------
list of Material
Loaded Material objects
"""
global materials
mats = []
for mat in materials.values():
mats.append(mat)
return mats
def getLoadedRootDirs() -> list:
"""
Returns a list of all of the loaded root directories.
Returns
-------
list of str
Loaded root directories
"""
global loadedRootDirs
return loadedRootDirs
def load_all(rootDir: str = None) -> None:
"""Pass-through to temporarily support an old API."""
warnings.warn("Please use matProps.loadAll, not matProps.load_all.", DeprecationWarning)
loadAll(rootDir)
def load_safe(rootDir: str = None) -> None:
"""Pass-through to temporarily support an old API."""
warnings.warn("Please use matProps.loadSafe, not matProps.load_safe.", DeprecationWarning)
loadSafe(rootDir)
def get_material(name: str) -> Material:
"""Pass-through to temporarily support an old API."""
warnings.warn("Please use matProps.getMaterial, not matProps.get_material.", DeprecationWarning)
return getMaterial(name)
def load_material(yamlPath: str, saveMaterial: bool = False) -> Material:
"""Pass-through to temporarily support an old API."""
warnings.warn("Please use matProps.loadMaterial, not matProps.load_material.", DeprecationWarning)
return loadMaterial(yamlPath, saveMaterial)
def loaded_materials() -> list:
"""Pass-through to temporarily support an old API."""
warnings.warn("Please use matProps.loadedMaterials, not matProps.loaded_materials.", DeprecationWarning)
return loadedMaterials()
def get_loaded_root_dirs() -> list:
"""Pass-through to temporarily support an old API."""
warnings.warn("Please use matProps.getLoadedRootDirs, not matProps.get_loaded_root_dirs.", DeprecationWarning)
return getLoadedRootDirs()
================================================
FILE: armi/matProps/constituent.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic material composition."""
class Constituent:
"""Makeup of the Material.composition."""
def __init__(self, name: str, minValue: float, maxValue: float, isBalance: bool):
"""
Constructor for Constituent object.
Parameters
----------
name: str
Name of constituent element
minValue: float
Minimum value of constituent
maxValue: float
Maximum value of constituent
isBalance: bool
Boolean used to denote if constituent is balance element (True) or not (False).
"""
self.name = name
"""Name of the constituent"""
self.minValue = minValue
"""Min value of the constituent"""
self.maxValue = maxValue
"""Max value of the constituent"""
self.isBalance = isBalance
"""Flag for indicating if the consitituent is intended to the balance of the composition"""
if self.minValue < 0.0:
msg = f"Constituent {self.name} has a negative minimum composition value."
raise ValueError(msg)
elif self.maxValue < self.minValue:
msg = f"Constituent {self.name} has an invalid maximum composition value. (max < min)"
raise ValueError(msg)
elif self.maxValue > 100.0:
msg = f"Constituent {self.name} has an invalid maximum composition value. (max > 100.0)"
raise ValueError(msg)
def __repr__(self):
"""Provides string representation of Constituent object."""
msg = f""
return msg
@staticmethod
def parseComposition(node):
"""
Method which parses "composition" node from yaml file and returns container of Contituent objects.
Returns list of Constituent objects. Each element is constructed from a map element in the "composition node".
Parameters
----------
node: dict
YAML object representing composition node.
Returns
-------
list : Constituent
List of Constituent objects representing elements of Material.
"""
composition = []
elementSet = set()
balanceName = ""
balanceMin = 100.0
balanceMax = 100.0
sumMin = 0.0
sumMax = 0.0
numBalance = 0
for element, nodeContent in node.items():
if element == "references":
continue
elementSet.add(element)
if nodeContent == "balance":
balanceName = element
numBalance += 1
elif type(nodeContent) is str or len(nodeContent) != 2:
msg = (
f"Composition values must be either a tuple of min/max values, or `balance`, but got: {nodeContent}"
)
raise TypeError(msg)
else:
constituentMin = nodeContent[0]
constituentMax = nodeContent[1]
sumMin += constituentMin
sumMax += constituentMax
part = Constituent(element, constituentMin, constituentMax, False)
composition.append(part)
if numBalance != 1:
msg = (
f"Composition node must have exactly one balance element. Composition node has {numBalance} balance "
"elements instead."
)
raise ValueError(msg)
if balanceName:
if sumMin > 100.0:
raise ValueError("Composition has a minimum composition summation greater than 100.0")
if sumMax >= 100.0:
balanceMin = 0.0
else:
balanceMin -= sumMax
balanceMax -= sumMin
balance = Constituent(balanceName, balanceMin, balanceMax, True)
composition.append(balance)
return composition
================================================
FILE: armi/matProps/function.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic class for a function to be defined in a YAML."""
class Function:
"""
An base class for computing material Properties. The word "function" here is used in the mathematical sense, to
describe a generic mathematical curve. The various Function types are read in from YAML, and interpreted at run
time. The sub-classes of Function have specific requirements on the YAML format.
"""
# This is the list of all nodes that are associated with functions in the YAML input file. Any node named something
# not in this list is assumed to be an independent variable for the function. This list needs to remain updated if
# any child class adds a new YAML node.
FUNCTION_NODES = {
"type", # All equations have this to define the child class type
"tabulated data", # Optional for all equations, required for table functions
"equation", # Used by SymbolicFunction for the equation definition
"functions", # Used by PiecewiseFunction to define the child functions
"reference temperature", # Optional for all equations
}
def __init__(self, mat, prop):
"""
Constructor for base Function class.
Parameters
----------
mat: Material
Material object with which this Function is associated
prop: Property
Property that is represented by this Function
"""
self.material = mat
"""A pointer back to the parent Material for this Function."""
self.property = prop
"""The Property this Function represents."""
self.independentVars: dict = {} # Keys are the independent variables, values are a tuple of the min/max bounds
self.tableData = None
"""A TableFunction containing verification data for this specific function.
Note that for actual TableFunction instances, the tableData property is NULL."""
self._referenceTemperature: float = -274.0
"""Reference temperature. Initialized be less than absolute zero in degrees Celsius"""
self._references = []
"""Reference data"""
def clear(self):
self.tableData = None
@staticmethod
def isTable():
"""Returns True if a subclass of TableFunction, otherwise False."""
return False
def getReferenceTemperature(self):
"""
Returns the reference temperature, in Celcius, if it is defined.
Returns
-------
float
Reference temperature, in Celcius
"""
# If this statement below is true, either the reference temperature was not provided in the material YAML file
# or was a non-physical value.
if self._referenceTemperature < -273.15:
raise ValueError("Reference temperature is undefined or set to less than absolute zero.")
return self._referenceTemperature
def getIndependentVariables(self):
"""
Returns the independent variables that are required for this function.
Returns
-------
list
list of independent variable strings
"""
return list(self.independentVars.keys())
def getMinBound(self, var) -> float:
"""
Returns the minimum bound for the requested variable.
Returns
-------
float
Minimum valid value
"""
return self.independentVars[var][0]
def getMaxBound(self, var) -> float:
"""
Returns the minimum bound for the requested variable.
Returns
-------
float
Maximum valid value
"""
return self.independentVars[var][1]
@property
def references(self) -> list:
return self._references
def calc(self, point: dict = None, **kwargs):
"""
Calculate the quantity of a specific Property.
The user must provide a "point" dictionary, or kwargs, but not both or neither.
Parameters
----------
point: dict
dictionary of independent variable/value pairs
kwargs:
dictionary of independent variable/value pairs, same purpose but to allow a nicer API.
Returns
-------
float
property evaluation
"""
# This method should take in one dictionary or a set of kwargs, but not both
if point is not None and kwargs:
raise ValueError("Please provide either a single dictionary or a set of kwargs, but not both.")
elif point is None and not kwargs:
raise ValueError("Please provide at least one input to this method.")
# select the inputs provided
if point:
data = point
else:
data = kwargs
# input sanity checking
if not self.independentVars.keys() <= data.keys():
raise KeyError(
f"Specified point {data} does contain the correct independent variables: {self.independentVars}"
)
elif not self.inRange(data):
raise ValueError(f"Requested calculation point, {data} is not in the valid range of the function")
return self._calcSpecific(data)
def inRange(self, point: dict) -> bool:
"""
Determine if a point is within range of the function.
Parameters
----------
point: dict
dictionary of independent variable/value pairs
Returns
-------
bool
True if the point is in the valid range, False otherwise.
"""
for var, bounds in self.independentVars.items():
if point[var] < bounds[0] or point[var] > bounds[1]:
return False
return True
def __repr__(self):
"""Provides string representation of Function object."""
return f"<{self.__class__.__name__}>"
@staticmethod
def _factory(mat, node, prop):
"""
Parsing a property node and using that information to construct a Function object. This method is responsible
for searching for the assigning the Function object to the appropriate child class instance.
Parameters
----------
mat: Material
Material object which is associated with the returned Function object
node: dict
YAML object representing root level node of material yaml file being parsed
prop: Property
Property object that is being populated on the Material
Returns
-------
Function
Function pointer parsed from the specified property.
"""
from armi.matProps.piecewiseFunction import PiecewiseFunction
from armi.matProps.symbolicFunction import SymbolicFunction
from armi.matProps.tableFunction1D import TableFunction1D
from armi.matProps.tableFunction2D import TableFunction2D
funTypes = {
"symbolic": SymbolicFunction,
"table": TableFunction1D,
"two dimensional table": TableFunction2D,
"piecewise": PiecewiseFunction,
}
funcNode = node["function"]
funcType = str(funcNode["type"])
func = funTypes[funcType](mat, prop)
func._parse(node)
return func
def _setBounds(self, node: dict, var: str):
"""
Validate and set the min and max bounds for a variable.
Parameters
----------
node: dict
dictionary that contains min and max values.
var: str
name of the variable
"""
if "min" not in node or "max" not in node:
raise KeyError(
f"The independent variable node, {var}, is not formatted correctly: {node}. If this node is not "
"intended to be an independent variable, please ensure that the Function.FUNCTION_NODES set is updated "
"properly."
)
minVal = float(node["min"])
maxVal = float(node["max"])
if maxVal < minVal:
raise ValueError(f"Maximum bound {maxVal} cannot be less than the minimum bound {minVal}")
self.independentVars[var] = (minVal, maxVal)
def _parse(self, node):
"""
Method used to parse property node and fill in appropriate Function data members.
Parameters
----------
node
YAML containing object to be parsed
"""
from armi.matProps.reference import Reference
from armi.matProps.tableFunction1D import TableFunction1D
from armi.matProps.tableFunction2D import TableFunction2D
funcNode = node["function"]
refTempNode = funcNode.get("reference temperature", None)
if refTempNode is not None:
self._referenceTemperature = float(refTempNode)
funcType = str(funcNode["type"])
references = node.get("references", [])
for ref in references:
self._references.append(Reference._factory(ref))
tabulatedNode = node.get("tabulated data", None)
if tabulatedNode:
if funcType == "two dimensional table":
self.tableData = TableFunction2D(self.material, self.property)
else:
self.tableData = TableFunction1D(self.material, self.property)
if self.isTable():
self._parseSpecific(node)
self.tableData = self
else:
self.tableData._parseSpecific(node)
elif self.isTable():
raise KeyError("Missing node `tabulated data`")
for var in funcNode:
if var not in self.FUNCTION_NODES:
self._setBounds(funcNode[var], var)
if not self.isTable():
self._parseSpecific(node)
def _parseSpecific(self, node):
"""
Abstract method that is used to parse information specific to Function child classes.
Parameters
----------
node
YAML containing object information to parse and fill in Function
"""
raise NotImplementedError()
def _calcSpecific(self, point: dict) -> float:
"""
Private method that contains the analytic expression used to return a property value.
Parameters
----------
point : dict
dictionary of independent variable/value pairs
Returns
-------
float
property evaluation at specified independent variable point
"""
raise NotImplementedError()
================================================
FILE: armi/matProps/interpolationFunctions.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some basic interpolation routines."""
import math
def findIndex(val: float, x: list) -> int:
"""
Find the location of the provided value in the provided collection.
Parameters
----------
val: float
Value whose index is needed in x
x: list
List of numbers
Returns
-------
int
Integer containing index wherein x[i] <= Tc <= x[i+1]
"""
if val < x[0]:
raise ValueError(f"Value {val} out of bounds: {x}")
for ii in range(len(x) - 1):
Tc1 = x[ii]
Tc2 = x[ii + 1]
if val >= Tc1 and val <= Tc2:
return ii
raise ValueError(f"Value {val} out of bounds: {x}")
def linearLinear(Tc: float, x: list, y: list) -> float:
"""
Find the approximate value on a XY table assuming a linear-linear curve.
Parameters
----------
Tc: float
Independent variable at which an interpolation value is desired.
x: list
List of independent variable values
y: list
List of dependent variable values
Returns
-------
float
Float containing final interpolation value based on a linear-linear interpolation.
"""
ii: int = findIndex(Tc, x)
Tc1: float = x[ii]
Tc2: float = x[ii + 1]
return (Tc - Tc1) / (Tc2 - Tc1) * (y[ii + 1] - y[ii]) + y[ii]
def logLinear(Tc: float, x: list, y: list) -> float:
"""
Find the approximate value on a XY table assuming a log-linear curve.
Parameters
----------
Tc: float
Independent variable at which an interpolation value is desired.
x: list
List of independent variable values
y: list
List of dependent variable values
Returns
-------
float
Float containing final interpolation value based on a log-linear interpolation.
"""
ii: int = findIndex(Tc, x)
Tc1: float = math.log10(x[ii])
Tc2: float = math.log10(x[ii + 1])
return (math.log10(Tc) - Tc1) / (Tc2 - Tc1) * (y[ii + 1] - y[ii]) + y[ii]
================================================
FILE: armi/matProps/material.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""How matProps defines a material class."""
import hashlib
from pathlib import Path
from ruamel.yaml import YAML
from armi.matProps import prop
from armi.matProps.constituent import Constituent
from armi.matProps.function import Function
from armi.matProps.materialType import MaterialType
class Material:
"""
The Material class is a generic container for all Material types, whether they contain ASME properties, fluid
properties, or steel properties.
It may be necessary to have multiple Material definitions for a single material containing different phases.
"""
validFileFormatVersions = [3.0, "TESTS"]
def __init__(self):
"""Constructor for Material class."""
self._saved = False
"""Boolean denoting whether or not Material object is saved in materials dict."""
self.materialType = None
"""Enum represting type for the Material object"""
self.composition = []
"""List of Constituent objects representing composition of Material."""
self.name = None
"""Name of Material object."""
self._sha1 = None
"""SHA1 value of parsed material file."""
def __repr__(self):
"""Provides string representation for Material class."""
return f""
def hash(self) -> str:
"""Returns the SHA1 hash value of a Material instance."""
return self._sha1
def saved(self) -> bool:
"""
Returns a bool value indicating whether the Material has been stored internally in the matProps.materials map
via matProps.addMaterial().
"""
return self._saved
def save(self):
"""Sets Material._saved flag to True."""
self._saved = True
@staticmethod
def dataCheckMaterialFile(filePath, rootNode):
"""
This is a partial data check of the material data file.
Checks the first level of data keywords and also check that the file format is a valid version.
Parameters
----------
filePath: str
Path containing name of YAML file whose file format and property nodes are checked.
rootNode: dict
Root YAML node of file parsed from filePath.
"""
file_format = Material.getNode(rootNode, "file format")
if file_format not in Material.validFileFormatVersions:
msg = f"Invalid file format version `{file_format}` used in: {filePath}"
raise ValueError(msg)
for propName in rootNode:
if propName in {"composition", "material type", "file format"}:
continue
if not prop.contains(propName):
msg = f"Invalid property node `{propName}` found in: {filePath}"
raise KeyError(msg)
@staticmethod
def getValidFileFormatVersions():
"""Get a vector of strings with all of the valid file format versions."""
return Material.validFileFormatVersions
@staticmethod
def getNode(node: dict, subnodeName: str):
"""
Searches a node for a child element and returns it.
Parameters
----------
node: dict
Parent level node from which a child element is searched.
subnodeName: str
Name of the child element that is queried from node.
"""
if subnodeName not in node:
msg = f"Missing YAML node `{subnodeName}`"
raise KeyError(msg)
return node[subnodeName]
def loadNode(self, node: dict):
"""
Loads YAML and parses information to fill in Material data members including all relevant Function objects.
Parameters
----------
node: dict
Material definition, like a dict that is loaded from a YAML file.
"""
self.materialType = MaterialType.fromString(self.getNode(node, "material type"))
self.composition = Constituent.parseComposition(self.getNode(node, "composition"))
for p in prop.properties:
if p.name and p.name in node:
setattr(self, p.symbol, Function._factory(self, node[p.name], p))
else:
# Any property not in the input file will be set to None.
setattr(self, p.symbol, None)
def loadFile(self, filePath: str):
"""
Loads yaml file and parses information to fill in Material data members including all relevant Function objects.
Parameters
----------
filePath: str
Path containing name of YAML file to parse.
"""
# load the file path
y = YAML(pure=True)
node = y.load(Path(filePath))
# grab the material name from the file name
n = Path(filePath).name
if n.lower().endswith(".yaml"):
n = n[:-5]
elif n.lower().endswith(".yml"):
n = n[:-4]
self.name = n
# Generate SHA1 value and set data member
sha1 = hashlib.sha1()
with open(filePath, "rb") as materialFile:
sha1.update(materialFile.read())
self._sha1 = sha1.hexdigest()
self.dataCheckMaterialFile(filePath, node)
self.loadNode(node)
================================================
FILE: armi/matProps/materialType.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some definition of material types: fluid, fuel, metal, etc."""
class MaterialType:
"""
A container for the methods used to differentiate between the types of materials.
The MaterialType class is used to determine whether the material contain ASME, fluid, fuel, or metal properties. It
may also be used for the phase of the material.
"""
"""Dictionary mapping material type strings to enum values."""
types = {
"Fuel": 1,
"Metal": 2,
"Fluid": 4,
"Ceramic": 8,
"ASME2015": 16,
"ASME2017": 32,
"ASME2019": 64,
}
def __init__(self, value: int = 0):
"""
Constructor for MaterialType class.
Parameters
----------
value: int
Integer enum value denoting material type.
"""
self._value: int = value
"""Enum value representing type of material."""
@staticmethod
def fromString(name: str) -> "MaterialType":
"""
Provides MaterialType object from a user provided string.
Parameters
----------
name: str
String from which a MaterialType object will be derived.
Returns
-------
MaterialType
"""
value: int = MaterialType.types.get(name, 0)
if value == 0:
msg = f"Invalid material type `{name}`, valid names are: {list(MaterialType.types.keys())}"
raise KeyError(msg)
return MaterialType(value)
def __repr__(self):
"""Provides string representation of MaterialType instance."""
name = "None"
for typ, val in self.types.items():
if val == self._value:
name = typ
break
return f""
def __eq__(self, other) -> bool:
"""
Support for "==" comparison operator.
Parameters
----------
other: MaterialType or int
RHS object that is compared to MaterialType instance.
Returns
-------
bool
True if objects ._value data members are equivalent, False otherwise.
"""
if type(other) is int:
return self._value == other
elif type(other) is MaterialType:
return self._value == other._value
else:
raise TypeError(f"Cannot compare MaterialType to type {type(other)}")
================================================
FILE: armi/matProps/piecewiseFunction.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A piecewise-defined function for used in material YAML files.
Each piece can be of any other type that matProps supports.
"""
import math
from armi.matProps.function import Function
class PiecewiseFunction(Function):
"""
A piecewise function is composed of many other subfunctions, any of which can be any subclass of the Function type,
including ``PiecewiseFunction``.
The PiecewiseFunction uses the ``Function.inRange`` method to determine which sub-function should be used for
computing the quantity. An example with the YAML format is::
function:
:
min:
max:
:
min:
max:
type: piecewise
functions:
- function:
:
min:
max:
:
min:
max:
type: ...
tabulated data: *alias # it is suggested that the same table is used for the entire range
- function:
:
min:
max:
:
min:
max:
type: ...
tabulated data: *alias # it is suggested that the same table is used for the entire range
"""
def __init__(self, mat, prop):
"""
Constructor for PiecewiseFunction object.
Parameters
----------
mat: Material
Material object with which this PiecewiseFunction is associated
prop: Property
Property that is represented by this PiecewiseFunction
"""
super().__init__(mat, prop)
self.functions = []
"""List of Function objects used to compose PiecewiseFunction object."""
def __repr__(self):
"""Provides string representation of PiecewiseFunction object."""
msg = ""
return msg
def clear(self) -> None:
for fun in self.functions:
del fun
self.functions.clear()
def _parseSpecific(self, node):
"""
Parses nodes that are specific to PiecewiseFunction objects.
Parameters
----------
node : dict
Dictionary containing the node whose values will be parsed to fill object.
"""
def checkOverlap(func1, func2):
"""Checks if the valid range for two functions overlaps on all dimensions."""
for var in self.independentVars:
min1, max1 = func1.independentVars[var]
min2, max2 = func2.independentVars[var]
if math.isclose(max1, min2) or math.isclose(min1, max2):
# This handles floating point comparison. Adjoining regions is allowed.
return False
if max1 < min2 or min1 > max2:
# overlap on this dimension, so no overlap overall
return False
# Overlap on all dimensions
return True
for subFunctionDef in node["function"]["functions"]:
func = self._factory(self.material, subFunctionDef, self.property)
self.functions.append(func)
# Ensure bounds have same variables in parent and child functions.
for subFunc in self.functions:
for var in self.independentVars:
if var not in subFunc.independentVars:
raise KeyError(
"Piecewise child function must have same variables for valid range as main function."
)
# Check for overlapping regions
for i, func1 in enumerate(self.functions):
for func2 in self.functions[i + 1 :]:
if checkOverlap(func1, func2):
raise ValueError(f"Piecewise child functions overlap: {func1}, {func2}")
def _calcSpecific(self, point: dict) -> float:
"""
Private method that contains the analytic expression used to return a property value.
Parameters
----------
point: dict
dictionary of independent variable/value pairs
Returns
-------
float
property evaluation at specified independent variable point
"""
for subFunc in self.functions:
if subFunc.inRange(point):
return subFunc.calc(point)
raise ValueError("PiecewiseFunction error, could not evaluate")
================================================
FILE: armi/matProps/point.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A single data point in a YAML file."""
class Point:
"""A single data point in a YAML file."""
def __init__(self, var1, var2, val):
"""
Constructor for Point class.
Parameters
----------
var1: float
Independent variable 1
var2: float
If provided, independent variable 2
val: float
Dependent variable value for property
"""
self.variable1 = var1
"""Value of first independent variable."""
self.variable2 = var2
"""Value of second independent variable."""
self.value = val
"""Value of Property dependent value"""
def __repr__(self):
"""Provides string representation of Point object."""
return f" {self.value}>"
================================================
FILE: armi/matProps/prop.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In the parlance of matProps, a material 'Property' is a physical characteristic of the material that can be described
mathematically. For instance, density, specific heat, specific gravity, coefficient of linear expansion, etc.
"""
properties = set()
PROPERTIES = {
"alpha_d": ("thermal diffusivity", "m^2/s", r"(\alpha_d)"),
"alpha_inst": ("instantaneous coefficient of thermal expansion", r"(1/^\circ{}C)", r"(\alpha_{inst})"),
"alpha_mean": ("mean coefficient of thermal expansion", r"(1/^\circ{}C)", r"(\alpha_{mean})"),
"c_p": ("specific heat capacity", r"U(J/(kg\dot{}^\circ{}C))U"),
"dH_fus": ("enthalpy of fusion", "J/kg", r"(\Delta H_{f})"),
"dH_vap": ("latent heat of vaporization", "J/kg", r"(\Delta H_{v})"),
"dl_l": ("linear expansion", "unitless", r"\Delta l_{percent}"),
"dV": ("volumetric expansion", r"m^3/(^\circ{}C)", r"\Delta V"),
"E": ("Young's modulus", "Pa"),
"Elong": ("elongation", "%", r"\epsilon"),
"eps_iso": ("strain from isochronous stress-strain curve", "unitless"),
"eps_t": ("design fatigue strain range", "unitless"),
"f": ("factor f from ASME.III.5 Fig. HBB-T-1432-2", "unitless"),
"G": ("electrical conductance", r"U(1/(\Omega\dot m))U"),
"gamma": ("surface tension", r"(N\dot m)", r"(\gamma)"),
"H": ("enthalpy", "J/kg"),
"H_calc_T": ("temperature from enthalpy", r"(^\circ{}C)", r"(^\circ{}C)"),
"HBW": ("Brinell Hardness", "BHN"),
"k": ("thermal conductivity", r"U(W/(m\dot{}^\circ{}C))U"),
"K_IC": ("fracture toughness", r"MPa\dot\sqrt(m)", r"K_{IC}"),
"kappa": ("isothermal compressibility", r"(1/Pa)", r"(\kappa)"),
"Kv_prime": ("factor Kv' from ASME.III.5 Fig. HBB-T-1432-3", "unitless", r"K_{v}^{'}"),
"mu_d": ("dynamic viscosity", r"(Pa\dot{}s)", r"(\mu_d)"),
"mu_k": ("kinematic viscosity", "m^2/s", r"(\mu_k)"),
"nu": ("Poisson's ratio", "unitless", r"(\nu)"),
"nu_g": ("vapor specific volume", "m^3/kg", r"\nu"),
"P_sat": ("vapor pressure", r"(Pa)", "P_{sat}"),
"rho": ("density", "kg/m^3", r"(\rho)"),
"S": ("shear modulus", "Pa"),
"Sa": ("allowable stress", "Pa"),
"SaFat": ("design fatigue stress", "Pa"),
"Sm": ("design stress", "Pa"),
"Smt": ("service reference stress", "Pa"),
"So": ("design reference stress", "Pa"),
"Sr": ("stress to rupture", "Pa"),
"St": ("time dependent design stress", "Pa"),
"Su": ("tensile strength", "Pa"),
"Sy": ("yield strength", "Pa"),
"T_boil": ("boiling temperature", r"(^\circ{}C)", r"(T_{boil})"),
"T_liq": ("liquidus temperature", r"(^\circ{}C)", r"(T_{liq})"),
"T_melt": ("melting temperature", r"(^\circ{}C)", r"(T_{melt})"),
"T_sol": ("solidus temperature", r"(^\circ{}C)", r"(T_{sol})"),
"tMaxSr": ("allowable time to rupture", "s"),
"tMaxSt": ("allowable time to allowable stress", "s"),
"TSRF": ("tensile strength reduction factor", "unitless"),
"v_sound": ("speed of sound", "m/s", r"(v_{sound})"),
"WSRF": ("weld strength reduction factor", "unitless"),
"YSRF": ("yield strength reduction factor", "unitless"),
}
class Property:
"""A Property of a material. Most properties are computed as temperature-dependent functions."""
def __init__(self, name: str, symbol: str, units: str, tex: str = None):
"""
Constructor for Property class.
Parameters
----------
name: str
Name of the property.
symbol: str
Symbol of the property.
units: str
String representing the units of the property.
tex: str (optional)
TeX symbol used to represent the property. Defaults to symbol.
"""
self.name: str = name
"""Name of the Property, used to retrieve the property from the data file"""
self.symbol: str = symbol
"""Symbol of the property, same as the module-level attribute and Material attribute"""
self.units: str = units
"""Units of the Property"""
self.TeX: str = tex if tex is not None else symbol
"""math-style TeX symbol"""
def __repr__(self):
"""Provides string representation of Property instance."""
return f""
def contains(name: str):
"""
Checks to see if a string representing a desired property is in the global properties list.
Parameters
----------
name: str
Name of the property whose value is searched for in global properties list.
Returns
-------
bool
True if name is in properties, False otherwise.
"""
global properties
return any(name == p.name for p in properties)
def defProp(symbol: str, name: str, units: str, tex: str = None):
"""
Method which constructs and adds Property objects to global properties object.
Parameters
----------
name: str
Name of the property.
symbol: str
Symbol of the property.
units: str
String representing the units of the property.
tex: str (optional)
TeX symbol used to represent the property. Defaults to symbol.
"""
global properties
if contains(name):
raise KeyError(f"Property already defined: {name}")
if tex is None:
tex = symbol
p = Property(name, symbol, units, tex)
properties.add(p)
def initialize():
"""Construct the global list of default properties in matProps."""
for symbol, vals in PROPERTIES.items():
name = vals[0]
units = vals[1]
tex = vals[2] if len(vals) > 2 else None
defProp(symbol, name, units, tex)
initialize()
================================================
FILE: armi/matProps/reference.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All data in the material YAMLs need to have a reference for the information source."""
UNDEFINED_REF_DATA = "NONE"
class Reference:
"""
A container for the source of the material's data. The Reference class is used to manage the material data's source
information and have methods to extract the data for generating the reference section of documentation.
"""
def __init__(self):
self._ref = ""
"""Entire reference in a single string"""
self._type = ""
"""Type of document (open literature|export controlled|test|your company name)"""
def __repr__(self):
if not self._ref:
return UNDEFINED_REF_DATA
elif not self._type:
return self._ref
else:
return f"{self._ref} ({self._type})"
@staticmethod
def _factory(node):
"""
Sets Reference data from a given reference node.
Parameters
----------
node: dict
Dictionary representing a child element from the "references" node.
Returns
-------
Reference
Reference object with data parsed from node.
"""
reference = Reference()
refNode = node["ref"]
if refNode:
reference._ref = str(refNode)
typeNode = node["type"]
if typeNode:
reference._type = str(typeNode)
return reference
def getRef(self):
"""Accessor which returns _ref value."""
return self._ref
def getType(self):
"""Accessor which returns _type value."""
return self._type
================================================
FILE: armi/matProps/symbolicFunction.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A generic symbolic function support for curves in a material YAML file."""
# Import math so that it is available for the eval statement
import math
from copy import copy
from sympy import symbols
from sympy.parsing import parse_expr
from sympy.utilities.lambdify import lambdastr
from armi.matProps.function import Function
class SymbolicFunction(Function):
"""
A symbolic function. A functional form defined in the YAML file is parsed.
An example with the YAML format is::
function:
:
min:
max:
:
min:
max:
...
type: symbolic
equation:
"""
def __init__(self, mat, prop):
"""
Constructor for SymbolicFunction object.
Parameters
----------
mat: Material
Material object with which this SymbolicFunction is associated
prop: Property
Property that is represented by this SymbolicFunction
"""
super().__init__(mat, prop)
self.eqn = None
self.sympyStr = None
def _parseSpecific(self, node):
"""
Parses nodes that are specific to Symbolic Function object.
Parameters
----------
node: dict
Dictionary containing the node whose values will be parsed to fill object.
"""
eqn = str(node["function"]["equation"])
try:
symbolList = []
for var in self.independentVars:
symbolList.append(symbols(var))
sympyEqn = parse_expr(eqn, evaluate=False)
self.sympyStr = lambdastr(symbolList, sympyEqn)
self.eqn = eval(self.sympyStr)
# Try evaluating the function at the maximum bound. This should result in a number if the equation is
# properly formatted. Bad equations will throw an error either in the `lambdastr` `eval` or this `float( )`
# line. This is important to catch poor equations now before they cause problems intermittently later (only
# when calc is called for that equation).
point = []
for var in self.independentVars:
point.append(self.getMaxBound(var))
float(self.eqn(*point))
except Exception as e:
raise ValueError(
f"Equation provided could not be interpreted:"
f" {eqn}, {getattr(self, 'sympyStr', 'Symbolic string not created yet.')}"
) from e
def _calcSpecific(self, point: dict) -> float:
"""
Returns an evaluation for a symbolic function.
Parameters
----------
point: dict
dictionary of independent variable/value pairs
"""
result = self.eqn(*[point[var] for var in self.independentVars])
if isinstance(result, complex):
raise ValueError(f"Function is undefined at {point}. Evaluates to complex number: {result}")
if math.isnan(result):
raise ValueError(f"Function is undefined at {point}. Evaluates to not a number.")
return float(result)
def __repr__(self):
"""Provides string representation of SymbolicFunction object."""
return f""
def __getstate__(self):
d = copy(self.__dict__)
d["eqn"] = None
return d
def __setstate__(self, s):
self.__dict__ = s
self.eqn = eval(self.sympyStr)
================================================
FILE: armi/matProps/tableFunction.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple implementation for a simple table to replace analytic curves in the YAML data files."""
from armi.matProps.function import Function
class TableFunction(Function):
"""An abstract TableFunction; the base class for other table lookup methods."""
@staticmethod
def isTable():
return True
def _setBounds(self, node: dict):
"""
Validate and set the min and max bounds for a variable.
Parameters
----------
node: dict
dictionary that contains min and max values.
"""
raise NotImplementedError()
================================================
FILE: armi/matProps/tableFunction1D.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple implementation for a one dimensional table to replace analytic curves in the YAML data files."""
from armi.matProps.interpolationFunctions import linearLinear
from armi.matProps.tableFunction import TableFunction
class TableFunction1D(TableFunction):
"""
A one dimensional table function, containing pairs of data.
An example with the YAML format is::
function:
: 0
type: table
tabulated data:
- [0.0, 0.0]
- [50, 1e99]
- [100, 2e-99]
- [150, 100]
"""
def __init__(
self,
mat,
prop,
):
"""
Constructor for TableFunction1D object.
Parameters
----------
mat: Material
Material object with which this TableFunction1D is associated
prop: Property
Property that is represented by this TableFunction1D
"""
super().__init__(mat, prop)
self._var1s = []
"""List of independent variable values for TableFunction1D object."""
self._values = []
"""List of property values for TableFunction1D object."""
def __repr__(self):
"""Provides string representation of TableFunction1D object."""
return ""
def _setBounds(self, node: dict, var: str):
"""
Validate and set the min and max bounds for a variable.
Parameters
----------
node: dict
dictionary that contains min and max values.
var: str
name of the variable
"""
self.independentVars[var] = (float(min(self._var1s)), float(max(self._var1s)))
def _parseSpecific(self, prop):
"""
Parses a temperature dependent table function.
Parameters
----------
prop: dict
Node containing tabulated data that needs to be parsed.
"""
tabulated_data = prop["tabulated data"]
for val in tabulated_data:
self._var1s.append(float(val[0]))
self._values.append(float(val[1]))
def _calcSpecific(self, point: dict) -> float:
"""
Performs a linear interpolation on tabular data.
Parameters
----------
point: dict
dictionary of independent variable/value pairs
"""
var = list(self.independentVars.keys())[0]
if var in point:
return linearLinear(point[var], self._var1s, self._values)
raise ValueError(f"Specified point does contain the correct independent variables: {self.independentVars}")
================================================
FILE: armi/matProps/tableFunction2D.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple implementation for a 2D table to replace analytic curves in the YAML data files."""
import copy
from armi.matProps.interpolationFunctions import findIndex, logLinear
from armi.matProps.tableFunction import TableFunction
class TableFunction2D(TableFunction):
"""
A 2 dimensional table function. The input format, below, is permitted to have null values in it, which if used
during the calculation/interpolation will throw a ValueError.
The YAML format demonstrating the two dimensional tabulated data is::
function:
: 0
: 1
type: two dimensional table
tabulated data:
- [null, [ 375., 400., 425., 450., 475., 500., 525., 550., 575., 600., 625., 650.]]
- [1., [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]
- [10., [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]]
- [300., [ 1., 1., 1., 1., 1., 1., 1., 1., .97, .91, .87, .84]]
- [30000., [ 1., 1., 1., 1., .93, .88, .83, .80, .75, null, null, null]]
- [300000.,[ 1., 1., 1., .89, .83, .79, .74, .70, .66, null, null, null]]
"""
def __init__(self, mat, prop):
"""
Constructor for TableFunction2D object.
Parameters
----------
mat: Material
Material object with which this TableFunction2D is associated
prop: Property
Property that is represented by this TableFunction2D
"""
super().__init__(mat, prop)
self._rowValues = []
"""List containing all of the time or cycle values for TableFunction2D object."""
self._columnValues = []
"""List containing all of the temperature values for TableFunction2D object."""
self._data = []
"""List containing all of the property values in TableFunction2D object."""
def __repr__(self):
"""Provides string representation of TableFunction2D object."""
return ""
def _setBounds(self, node: int, var: str):
"""
Validate and set the min and max bounds for a variable.
Parameters
----------
node: int
This number is zero for columns, and one for rows.
var: str
name of the variable
Notes
-----
The method declaration here does not match the one in the super class Function. The type of the "node" arguement
should be dict, but it is int. This is a surprising and acquard asymmetry.
"""
if node == 0:
cache = None
if self.independentVars:
# Need to re-arrange order.
cache = copy.deepcopy(self.independentVars)
self.independentVars = {}
self.independentVars[var] = (
float(min(self._columnValues)),
float(max(self._columnValues)),
)
if cache:
self.independentVars[list(cache.keys())[0]] = list(cache.values())[0]
elif node == 1:
self.independentVars[var] = (float(min(self._rowValues)), float(max(self._rowValues)))
else:
raise ValueError(f"The node value must be 0 or 1, but was: {node}")
def _parseSpecific(self, prop):
"""
Parses a 2D table function.
Parameters
----------
prop: dict
Node containing tabulated data that needs to be parsed.
"""
tabulatedData = prop["tabulated data"]
skippedFirst = False
for rowNode in tabulatedData:
if not skippedFirst:
for cValNode in rowNode[1]:
self._columnValues.append(float(cValNode))
self._data.append([])
skippedFirst = True
continue
currentRowVal = float(rowNode[0])
self._rowValues.append(currentRowVal)
var1DependentData = rowNode[1]
for cIndex in range(len(self._columnValues)):
value = var1DependentData[cIndex]
self._data[cIndex].append(None if value in ("null", None) else float(value))
def _calcSpecific(self, point: dict) -> float:
"""
Performs 2D interpolation on tabular data.
Parameters
----------
point: dict
dictionary of independent variable/value pairs
"""
columnVar = list(self.independentVars.keys())[0]
rowVar = list(self.independentVars.keys())[1]
if columnVar in point and rowVar in point:
columnVal = point[columnVar]
rowVal = point[rowVar]
else:
raise ValueError(f"Specified point does contain the correct independent variables: {self.independentVars}")
cIndex = findIndex(columnVal, self._columnValues)
rVal0 = logLinear(rowVal, self._rowValues, self._data[cIndex])
rVal1 = logLinear(rowVal, self._rowValues, self._data[cIndex + 1])
cVal0 = self._columnValues[cIndex]
cVal1 = self._columnValues[cIndex + 1]
return (columnVal - cVal0) / (cVal1 - cVal0) * (rVal1 - rVal0) + rVal0
================================================
FILE: armi/matProps/tests/__init__.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic testing tools for the matProps package."""
import math
import unittest
from armi.matProps.material import Material
class MatPropsFunTestBase(unittest.TestCase):
"""Base class that provides some common functionality for testing matProps Functions."""
def setUp(self):
self.testName = self.id().split(".")[-1]
searchStr = "test_"
if self.testName.startswith(searchStr):
self.testName = self.testName[len(searchStr) :]
@staticmethod
def polynomialEvaluation(powerMap, value):
"""
Perform a polynomial evaluation at a specified value.
Parameters
----------
powerMap : dict
Dictionary mapping power to its corresponding coefficient.
value: float
Independent variable to evaluate the polynomial at.
Returns
-------
float
The polynomial evaluation
"""
return sum(coefficient * pow(value, power) for power, coefficient in powerMap.items())
@staticmethod
def powerLawEvaluation(coefficients, value):
"""Perform a power law evaluation at a specified value."""
intercept = coefficients.get("intercept", 0.0)
outerMultiplier = coefficients.get("outer multiplier", 1.0)
innerAdder = coefficients["inner adder"]
exponent = coefficients["exponent"]
return intercept + outerMultiplier * (value + innerAdder) ** exponent
@staticmethod
def hyperbolicEvaluation(coefficients, value):
"""Perform a hyperbolic function evaluation at a specified value."""
intercept = coefficients["intercept"]
outerMultiplier = coefficients["outer multiplier"]
innerAdder = coefficients["inner adder"]
innerDenominator = coefficients["inner denominator"]
return intercept + outerMultiplier * math.tanh((value + innerAdder) / innerDenominator)
@staticmethod
def createEqnPoly(coefficients):
"""Creates a symbolic polynomial function from a dictionary of powers."""
eqn = ""
for power, value in coefficients.items():
if not eqn:
# Make sure we don't have a leading + sign
eqn += f"{value}*T**{power}"
else:
eqn += f" + {value}*T**{power}"
return eqn
@staticmethod
def createEqnPower(coefficients):
"""Creates a symbolic power law function from a dictionary of constants."""
eqn = f"{coefficients.get('intercept', '')}"
if "outer multiplier" in coefficients:
eqn += f" + {coefficients['outer multiplier']}*"
else:
eqn += " +"
eqn += f"(T + {coefficients['inner adder']})**{coefficients['exponent']}"
return eqn
@staticmethod
def createEqnHyper(coefficients):
"""Creates a symbolic hyperbolic function from a dictionary of constants."""
return (
f"{coefficients['intercept']} + "
f"{coefficients['outer multiplier']}*"
f"{coefficients['hyperbolic function']}("
f"(T+{coefficients['inner adder']})/{coefficients['inner denominator']})"
)
def _createFunctionWithoutTable(self, data=None):
"""
Helper function designed to create a basic viable yaml file without tabulated data in the function.
Parameters
----------
data : dict
A dictionary containing user specified function child nodes.
"""
funcBody = {"T": {"min": -100.0, "max": 500.0}}
funcBody.update(data or {})
materialData = {
"file format": "TESTS",
"composition": {"Fe": "balance"},
"material type": "Metal",
"density": {"function": funcBody},
}
mat = Material()
mat.loadNode(materialData)
return mat
def _createFunction(self, data=None, tableData=None, minT=-100.0, maxT=500.0):
"""
Helper function designed to create a basic viable yaml file.
Parameters
----------
data : dict
A dictionary containing user specified function child nodes.
tableData : dict
Table data to include in the function definition
minT : float
Float containing the minimum T variable value for the function.
maxT : float
Float containing the maximum T variable value for the function.
"""
funcBody = {"T": {"min": minT, "max": maxT}}
funcBody.update(data or {})
materialData = {
"file format": "TESTS",
"composition": {"Fe": "balance"},
"material type": "Metal",
"density": {"function": funcBody, "tabulated data": tableData or {}},
}
mat = Material()
mat.loadNode(materialData)
return mat
def belowMinimumCheck(self, yamlData, tableData=None):
"""Check if a ValueError is thrown if attempting to evaluate below the min value of a given T variable."""
mat = self._createFunction(yamlData, tableData)
func = mat.rho
with self.assertRaises(ValueError):
func.calc({"T": func.getMinBound("T") - 0.01})
def aboveMaximumCheck(self, yamlData, tableData=None):
"""Checksif a ValueError is thrown if attempting to evaluate above the max value of the T variable."""
mat = self._createFunction(yamlData, tableData)
func = mat.rho
with self.assertRaises(ValueError):
func.calc({"T": func.getMaxBound("T") + 0.01})
================================================
FILE: armi/matProps/tests/invalidTestFiles/badFileFormat.YAML
================================================
file format: INVALID
material type: Fluid
composition:
a: balance
density:
function:
T:
min: 100.0
max: 200.0
type: symbolic
equation: 1.0
================================================
FILE: armi/matProps/tests/invalidTestFiles/badProperty.yaml
================================================
file format: TESTS
material type: Metal
composition:
Na: 1.0
bad_prop: whatever
================================================
FILE: armi/matProps/tests/invalidTestFiles/duplicateComposition.yaml
================================================
file format: TESTS
material type: Fluid
composition:
a: [15, 30]
b: [10, 15]
b: [11, 16]
c: balance
density:
function:
T:
min: 100.0
max: 200.0
type: symbolic
equation: 1.0
================================================
FILE: armi/matProps/tests/testDir1/a.yaml
================================================
file format: TESTS
material type: Fluid
composition:
a: balance
references:
- ref: ACME II.2017, Table 3 pg 182
refType: open literature
density:
function:
T:
min: 100.0
max: 200.0
type: symbolic
equation: 1.0
================================================
FILE: armi/matProps/tests/testDir1/b.yaml
================================================
file format: TESTS
material type: Fluid
composition:
b: balance
density:
function:
T:
min: 100.0
max: 200.0
type: symbolic
equation: 2.0
================================================
FILE: armi/matProps/tests/testDir2/c.yml
================================================
file format: TESTS
material type: Fluid
composition:
c: balance
density:
function:
T:
min: 100.0
max: 200.0
type: symbolic
equation: 3.0
================================================
FILE: armi/matProps/tests/testDir2/d.yaml
================================================
file format: TESTS
material type: Fluid
composition:
d: balance
density:
function:
T:
min: 100.0
max: 200.0
type: symbolic
equation: 4.0
================================================
FILE: armi/matProps/tests/testDir3/a.yaml
================================================
file format: TESTS
material type: Fluid
composition:
a: balance
density:
function:
T:
min: 100.0
max: 200.0
type: symbolic
equation: 6.0
================================================
FILE: armi/matProps/tests/testDir3/e.yaml
================================================
file format: TESTS
material type: Fluid
composition:
e: balance
density:
function:
T:
min: 100.0
max: 200.0
type: symbolic
equation: 5.0
================================================
FILE: armi/matProps/tests/testDir4/sampleProperty.yaml
================================================
file format: TESTS
material type: Fluid
composition:
a: balance
density:
function:
T:
min: 101.0
max: 501.0
type: symbolic
equation: 1.0
specific heat capacity:
function:
T:
min: 102.0
max: 502.0
type: symbolic
equation: 2.0
thermal conductivity:
function:
T:
min: 103.0
max: 503.0
type: symbolic
equation: 3.0
thermal diffusivity:
function:
T:
min: 104.0
max: 504.0
type: symbolic
equation: 4.0
dynamic viscosity:
function:
T:
min: 105.0
max: 505.0
type: symbolic
equation: 5.0
kinematic viscosity:
function:
T:
min: 106.0
max: 506.0
type: symbolic
equation: 6.0
melting temperature:
function:
T:
min: 107.0
max: 507.0
type: symbolic
equation: 7.0
boiling temperature:
function:
T:
min: 108.0
max: 508.0
type: symbolic
equation: 8.0
latent heat of vaporization:
function:
T:
min: 109.0
max: 509.0
type: symbolic
equation: 9.0
enthalpy of fusion:
function:
T:
min: 110.0
max: 510.0
type: symbolic
equation: 10.0
surface tension:
function:
T:
min: 111.0
max: 511.0
type: symbolic
equation: 11.0
vapor pressure:
function:
T:
min: 112.0
max: 512.0
type: symbolic
equation: 12.0
isothermal compressibility:
function:
T:
min: 113.0
max: 513.0
type: symbolic
equation: 13.0
mean coefficient of thermal expansion:
function:
T:
min: 114.0
max: 514.0
type: symbolic
equation: 14.0
instantaneous coefficient of thermal expansion:
function:
T:
min: 115.0
max: 515.0
type: symbolic
equation: 15.0
Young's modulus:
function:
T:
min: 116.0
max: 516.0
type: symbolic
equation: 16.0
Poisson's ratio:
function:
T:
min: 117.0
max: 517.0
type: symbolic
equation: 17.0
yield strength:
function:
T:
min: 118.0
max: 518.0
type: symbolic
equation: 18.0
tensile strength:
function:
T:
min: 119.0
max: 519.0
type: symbolic
equation: 19.0
design stress:
function:
T:
min: 120.0
max: 520.0
type: symbolic
equation: 20.0
design reference stress:
function:
T:
min: 121.0
max: 521.0
type: symbolic
equation: 21.0
allowable stress:
function:
T:
min: 122.0
max: 522.0
type: symbolic
equation: 22.0
time dependent design stress:
function:
T:
min: 123.0
max: 523.0
type: symbolic
equation: 23.0
service reference stress:
function:
T:
min: 124.0
max: 524.0
type: symbolic
equation: 24.0
stress to rupture:
function:
T:
min: 125.0
max: 525.0
type: symbolic
equation: 25.0
tensile strength reduction factor:
function:
T:
min: 126.0
max: 526.0
type: symbolic
equation: 26.0
yield strength reduction factor:
function:
T:
min: 127.0
max: 527.0
type: symbolic
equation: 27.0
weld strength reduction factor:
function:
T:
min: 127.0
max: 527.0
type: symbolic
equation: 28.0
allowable time to rupture:
function:
T:
min: 128.0
max: 528.0
type: symbolic
equation: 29.0
allowable time to allowable stress:
function:
T:
min: 129.0
max: 529.0
type: symbolic
equation: 30.0
design fatigue strain range:
function:
T:
min: 130.0
max: 530.0
type: symbolic
equation: 31.0
strain from isochronous stress-strain curve:
function:
T:
min: 130.0
max: 530.0
type: symbolic
equation: 32.0
design fatigue stress:
function:
T:
min: 131.0
max: 531.0
type: symbolic
equation: 33.0
linear expansion:
function:
T:
min: 132.0
max: 532.0
type: symbolic
equation: 34.0
vapor specific volume:
function:
T:
min: 133.0
max: 533.0
type: symbolic
equation: 35.0
speed of sound:
function:
T:
min: 134.0
max: 534.0
type: symbolic
equation: 36.0
solidus temperature:
function:
T:
min: 135.0
max: 535.0
type: symbolic
equation: 37.0
liquidus temperature:
function:
T:
min: 136.0
max: 536.0
type: symbolic
equation: 38.0
volumetric expansion:
function:
T:
min: 137.0
max: 537.0
type: symbolic
equation: 39.0
enthalpy:
function:
T:
min: 138.0
max: 538.0
type: symbolic
equation: 40.0
temperature from enthalpy:
function:
T:
min: 139.0
max: 539.0
type: symbolic
equation: 41.0
fracture toughness:
function:
T:
min: 140.0
max: 540.0
type: symbolic
equation: 42.0
Brinell Hardness:
function:
T:
min: 141.0
max: 541.0
type: symbolic
equation: 43.0
factor f from ASME.III.5 Fig. HBB-T-1432-2:
function:
T:
min: 141.0
max: 541.0
type: symbolic
equation: 44.0
factor Kv' from ASME.III.5 Fig. HBB-T-1432-3:
function:
T:
min: 141.0
max: 541.0
type: symbolic
equation: 45.0
shear modulus:
function:
T:
min: 141.0
max: 541.0
type: symbolic
equation: 46.0
elongation:
function:
T:
min: 141.0
max: 541.0
type: symbolic
equation: 47.0
================================================
FILE: armi/matProps/tests/testMaterialsData/materialA.yaml
================================================
file format: TESTS
material type: Fluid
composition:
a: balance
density:
function:
T:
min: 201.0
max: 601.0
type: symbolic
equation: 101.0*T + 500
================================================
FILE: armi/matProps/tests/testMaterialsData/materialB.yaml
================================================
file format: TESTS
material type: Fluid
composition:
b: balance
specific heat capacity:
function:
T:
min: 202.0
max: 602.0
type: symbolic
equation: 102.0
================================================
FILE: armi/matProps/tests/testMaterialsData/materialsSubDir/materialC.yaml
================================================
file format: TESTS
material type: Fluid
composition:
c: balance
thermal conductivity:
function:
T:
min: 103.0
max: 503.0
type: symbolic
equation: 3.0
================================================
FILE: armi/matProps/tests/testMaterialsData/materialsSubDir/materialD.yaml
================================================
file format: TESTS
material type: Fluid
composition:
d: balance
thermal diffusivity:
function:
T:
min: 204.0
max: 604.0
type: symbolic
equation: 104.0
================================================
FILE: armi/matProps/tests/test_1DSymbolicFunction.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple examples to verify constant, polynomial, hyperbolic, and power law functional forms."""
import numpy as np
from armi.matProps.tests import MatPropsFunTestBase
class Test1DSymbolicFunction(MatPropsFunTestBase):
"""Test 1D symbolic functions."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.basePolynomialMap = {0: 5, 1: 2, 2: -3, 3: 4, 4: -5, 5: 6, 6: -7, 7: 8}
cls.basePolynomialData = {
"type": "symbolic",
"equation": cls.createEqnPoly(cls.basePolynomialMap),
}
cls.basePowerLawTerms = {
"exponent": 2.0,
"inner adder": 125.0,
"outer multiplier": 3.4,
"intercept": -2.5,
}
cls.basePowerLawData = {
"type": "symbolic",
"equation": cls.createEqnPower(cls.basePowerLawTerms),
}
cls.baseHyperbolicTerms = {
"hyperbolic function": "tanh",
"intercept": 5,
"outer multiplier": 2,
"inner denominator": 4,
"inner adder": 1,
}
cls.baseHyperbolicData = {
"type": "symbolic",
"equation": cls.createEqnHyper(cls.baseHyperbolicTerms),
}
cls.baseConstantData = {"type": "symbolic", "equation": "9123.5"}
def test_polynomialEqnIntInt(self):
"""
Evaluates a PolynomialFunction that has 8 power values that are all integers.
Ensure that the override methods PolynomialFunction._parseSpecific() and PolynomialFunction._calcSpecific() are
functioning appropriately. A minimal input with a defined polynomial function is provided. The polynomial is
comprised of all integer coefficients and powers to ensure that matProps can properly handle integer inputs. The
function is evaluated at several values in the valid range and compared to a lambda expression inside the test
method to make sure their results are equivalent.
"""
# these polynomials have up to 8 powers/terms (including 0)
mat = self._createFunction(self.basePolynomialData)
mat.name = self.testName
self.assertEqual(str(mat), f">")
# test using input dict for calc
self.assertAlmostEqual(mat.rho.calc({"T": 0}), self.polynomialEvaluation(self.basePolynomialMap, 0))
self.assertAlmostEqual(mat.rho.calc({"T": 50}), self.polynomialEvaluation(self.basePolynomialMap, 50))
self.assertAlmostEqual(mat.rho.calc({"T": 100}), self.polynomialEvaluation(self.basePolynomialMap, 100))
# test using kwargs for calc
self.assertAlmostEqual(mat.rho.calc(T=0), self.polynomialEvaluation(self.basePolynomialMap, 0))
self.assertAlmostEqual(mat.rho.calc(T=50), self.polynomialEvaluation(self.basePolynomialMap, 50))
self.assertAlmostEqual(mat.rho.calc(T=100), self.polynomialEvaluation(self.basePolynomialMap, 100))
def test_polynomialEqnFloatInt(self):
"""Evaluates a PolynomialFunction with floating point coefficients and integer point power terms."""
coefficientsMap = {0: -2.523536, 1: 5.374489, 2: 4.897134}
data = {"type": "symbolic", "equation": self.createEqnPoly(coefficientsMap)}
mat = self._createFunction(data)
func = mat.rho
# test using input dict for calc
self.assertAlmostEqual(func.calc({"T": -100.0}), self.polynomialEvaluation(coefficientsMap, -100.0))
self.assertAlmostEqual(func.calc({"T": 0.0}), self.polynomialEvaluation(coefficientsMap, 0.0))
self.assertAlmostEqual(func.calc({"T": 100.0}), self.polynomialEvaluation(coefficientsMap, 100.0))
self.assertAlmostEqual(func.calc({"T": 500.0}), self.polynomialEvaluation(coefficientsMap, 500.0))
# test using kwargs for calc
self.assertAlmostEqual(func.calc(T=-100.0), self.polynomialEvaluation(coefficientsMap, -100.0))
self.assertAlmostEqual(func.calc(T=0.0), self.polynomialEvaluation(coefficientsMap, 0.0))
self.assertAlmostEqual(func.calc(T=100.0), self.polynomialEvaluation(coefficientsMap, 100.0))
self.assertAlmostEqual(func.calc(T=500.0), self.polynomialEvaluation(coefficientsMap, 500.0))
def test_polynomialEqnFloatFloat(self):
"""Evaluates a PolynomialFunction with floating point coefficients and floating point power terms."""
coefficientsMap = {0.5: -2.5, 2.5: 5.389, 1.5: 4.375}
data = {"type": "symbolic", "equation": self.createEqnPoly(coefficientsMap)}
mat = self._createFunction(data, minT=0.0)
mat.name = self.testName
self.assertEqual(str(mat), f">")
func = mat.rho
self.assertAlmostEqual(func.calc({"T": 0.0}), self.polynomialEvaluation(coefficientsMap, 0.0))
self.assertAlmostEqual(func.calc({"T": 100.0}), self.polynomialEvaluation(coefficientsMap, 100.0))
self.assertAlmostEqual(func.calc({"T": 500.0}), self.polynomialEvaluation(coefficientsMap, 500.0))
def test_polynomialDiffFloatTypes(self):
"""Evaluates a PolynomialFunction with floating point coefficients power terms, checking exact values."""
coefficientsMap = {0.5: -2.5, 2.5: 5.389, 1.5: 4.375}
data = {"type": "symbolic", "equation": self.createEqnPoly(coefficientsMap)}
mat = self._createFunction(data, minT=0.0)
self.assertAlmostEqual(mat.rho.calc({"T": np.float64(0.0)}), 0.0)
self.assertAlmostEqual(mat.rho.calc({"T": np.float64(100.0)}), 543250.0)
self.assertAlmostEqual(mat.rho.calc({"T": np.float64(500.0)}), 30174283.91217429)
def test_symbolicEqnError(self):
"""Ensure symbolic equations fail correctly when given empty or nonsense inputs."""
# Leave out equation node
dataNoCoeff = {"type": "symbolic"}
with self.assertRaises(KeyError):
self._createFunction(dataNoCoeff)
# Provide invalid equation node.
dataBadCoeff = {"type": "symbolic", "equation": "NOT AN EQUATION"}
with self.assertRaises(ValueError):
self._createFunction(dataBadCoeff)
def test_powerEqn(self):
"""Evaluates a PowerLaw with floating point coefficients and exponents."""
mat = self._createFunction(self.basePowerLawData)
func = mat.rho
self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(self.basePowerLawTerms, 0))
self.assertAlmostEqual(func.calc({"T": 12.5}), self.powerLawEvaluation(self.basePowerLawTerms, 12.5))
self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(self.basePowerLawTerms, 25))
self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(self.basePowerLawTerms, 50))
self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(self.basePowerLawTerms, 75))
self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(self.basePowerLawTerms, 100))
def test_powerEqnAllInt(self):
"""Evaluates a PowerLaw with integer coefficients and exponents."""
coefficients = {
"exponent": 2,
"inner adder": 125,
"outer multiplier": 3,
"intercept": -2,
}
powerLawDataInt = {
"type": "symbolic",
"equation": self.createEqnPower(coefficients),
}
mat = self._createFunction(powerLawDataInt)
func = mat.rho
self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0))
self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25))
self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50))
self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75))
self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100))
def test_powerEqnFloatInt(self):
"""Evaluates a PowerLaw with a mixture of integer and floating point coefficients and exponents."""
coefficients = {
"exponent": 2.5,
"inner adder": 125,
"outer multiplier": 3.14159,
"intercept": -2,
}
powerLawDataInt = {
"type": "symbolic",
"equation": self.createEqnPower(coefficients),
}
mat = self._createFunction(powerLawDataInt)
func = mat.rho
self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0))
self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25))
self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50))
self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75))
self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100))
def test_powerEqnNoInter(self):
"""Evaluates a PowerLaw with no intercept term."""
coefficients = {"exponent": 2.0, "inner adder": 125.0, "outer multiplier": 3.4}
data = {"type": "symbolic", "equation": self.createEqnPower(coefficients)}
mat = self._createFunction(data)
# Intercept in self.powerLawEvaluation is 0.0 to reflect default value in matProps
self.assertAlmostEqual(mat.rho.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0))
self.assertAlmostEqual(mat.rho.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25))
self.assertAlmostEqual(mat.rho.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50))
self.assertAlmostEqual(mat.rho.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75))
self.assertAlmostEqual(mat.rho.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100))
def test_powerEqnNoOuter(self):
"""Evaluates a PowerLaw with no outer multiplier term."""
coefficients = {"exponent": 2.0, "inner adder": 125.0, "intercept": -2.5}
data = {"type": "symbolic", "equation": self.createEqnPower(coefficients)}
mat = self._createFunction(data)
func = mat.rho
# Outer multiplier in self.powerLawEvaluation is 1.0 to reflect default value in matProps
self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0))
self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25))
self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50))
self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75))
self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100))
def test_powerEqnNoOuterInter(self):
"""Evaluates a PowerLaw with no outer multiplier or intercept term."""
coefficients = {"exponent": 2.0, "inner adder": 125.0}
data = {"type": "symbolic", "equation": self.createEqnPower(coefficients)}
mat = self._createFunction(data)
func = mat.rho
self.assertAlmostEqual(func.calc({"T": 0}), self.powerLawEvaluation(coefficients, 0))
self.assertAlmostEqual(func.calc({"T": 25}), self.powerLawEvaluation(coefficients, 25))
self.assertAlmostEqual(func.calc({"T": 50}), self.powerLawEvaluation(coefficients, 50))
self.assertAlmostEqual(func.calc({"T": 75}), self.powerLawEvaluation(coefficients, 75))
self.assertAlmostEqual(func.calc({"T": 100}), self.powerLawEvaluation(coefficients, 100))
def test_constantsEval(self):
"""Evaluates a PowerLaw for integer and floating point values."""
mat = self._createFunction(self.baseConstantData)
func = mat.rho
self.assertAlmostEqual(func.calc({"T": 0}), 9123.5)
self.assertAlmostEqual(func.calc({"T": 12.5}), 9123.5)
self.assertAlmostEqual(func.calc({"T": 50}), 9123.5)
self.assertAlmostEqual(func.calc({"T": 100}), 9123.5)
def test_hyperbolicEqnEval(self):
"""Evaluates a HyperbolicFunction for integer and floating point values."""
mat = self._createFunction(self.baseHyperbolicData)
# test using input dict for calc
self.assertAlmostEqual(mat.rho.calc({"T": 0}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 0))
self.assertAlmostEqual(mat.rho.calc({"T": 12.5}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 12.5))
self.assertAlmostEqual(mat.rho.calc({"T": 50}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 50))
self.assertAlmostEqual(mat.rho.calc({"T": 100}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 100))
# test using kwargs for calc
self.assertAlmostEqual(mat.rho.calc(T=0), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 0))
self.assertAlmostEqual(mat.rho.calc(T=12.5), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 12.5))
self.assertAlmostEqual(mat.rho.calc(T=50), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 50))
self.assertAlmostEqual(mat.rho.calc(T=100), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 100))
def test_hyperbolicEqnEval2(self):
"""Evaluates a HyperbolicFunction for a different set of floating point values."""
coefficients = {
"hyperbolic function": "tanh",
"intercept": 3.829e8,
"outer multiplier": -4.672e8,
"inner denominator": 216.66,
"inner adder": -613.52,
}
data = {"type": "symbolic", "equation": self.createEqnHyper(coefficients)}
mat = self._createFunction(data)
func = mat.rho
expectedValue = self.hyperbolicEvaluation(coefficients, 500)
self.assertAlmostEqual(func.calc({"T": 500}), expectedValue, delta=expectedValue * 1e-5)
================================================
FILE: armi/matProps/tests/test_composition.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic tests of the Composition class."""
import os
import unittest
from ruamel.yaml.constructor import DuplicateKeyError
import armi.matProps
from armi.matProps.material import Material
class TestComposition(unittest.TestCase):
def setUp(self):
self.testName = self.id().split(".")[-1]
searchStr = "test_"
if self.testName.startswith(searchStr):
self.testName = self.testName[len(searchStr) :]
def _createFunction(self, compMap=None):
compValue = {}
if compMap is not None:
compValue = compMap
materialMap = {
"file format": "TESTS",
"composition": compValue,
"material type": "Metal",
"density": {
"function": {
"T": {"min": 100.0, "max": 200.0},
"type": "symbolic",
"equation": 1.0,
}
},
}
mat = Material()
mat.loadNode(materialMap)
return mat
def test_compositionMissing(self):
materialMap = {
"file format": "TESTS",
"material type": "Metal",
"density": "whatever",
}
mat = Material()
with self.assertRaisesRegex(KeyError, "Missing YAML node `composition`"):
mat.loadNode(materialMap)
def test_compositionInvTuple(self):
# Invalid doesn't have two elements
badCompMap = {"Fe": [1.0]}
with self.assertRaisesRegex(
TypeError,
"Composition values must be either a tuple of min/max values, or `balance`",
):
self._createFunction(badCompMap)
def test_compositionInvStr(self):
badCompMap = {"a": [0.5, 0.5], "b": "remainder"}
with self.assertRaisesRegex(
TypeError,
"Composition values must be either a tuple of min/max values, or `balance`",
):
self._createFunction(badCompMap)
def test_compositionMissBalance(self):
compMap = {"a": [0.25, 0.26], "b": [0.3, 0.31], "c": [0.45, 0.46]}
with self.assertRaisesRegex(ValueError, "exactly one balance element"):
self._createFunction(compMap)
def test_compositionBalanceNum(self):
compMap = {"a": [15.0, 15.1], "b": "balance", "c": "balance"}
with self.assertRaisesRegex(ValueError, "exactly one balance element"):
self._createFunction(compMap)
def test_compositionBalance(self):
compMap = {"a": [15.0, 20.0], "b": [30.0, 35.0], "c": "balance"}
mat = self._createFunction(compMap)
mat.name = self.testName
self.assertEqual(str(mat), f">")
c_minValue, c_maxValue = None, None
sumMin, sumMax = 0.0, 0.0
for compElement in mat.composition:
if compElement.name != "c":
self.assertFalse(compElement.isBalance)
compValue = compMap.get(compElement.name)
self.assertIsNotNone(compValue)
self.assertAlmostEqual(compElement.minValue, compValue[0])
self.assertAlmostEqual(compElement.maxValue, compValue[1])
sumMin += compElement.minValue
sumMax += compElement.maxValue
else:
self.assertTrue(compElement.isBalance)
c_minValue = compElement.minValue
c_maxValue = compElement.maxValue
self.assertAlmostEqual(c_minValue, 100.0 - sumMax)
self.assertAlmostEqual(c_maxValue, 100.0 - sumMin)
def test_compositionBalance2(self):
compMap = {
"a": [10.0, 15.0],
"b": [20.1, 35.1],
"c": [30.2, 50.2],
"d": "balance",
}
mat = self._createFunction(compMap)
mat.name = self.testName
self.assertEqual(str(mat), f">")
sumMin = 0.0
d_minValue, d_maxValue = None, None
for compElement in mat.composition:
if compElement.name != "d":
self.assertFalse(compElement.isBalance)
compValue = compMap.get(compElement.name)
self.assertIsNotNone(compValue)
self.assertAlmostEqual(compElement.minValue, compValue[0])
self.assertAlmostEqual(compElement.maxValue, compValue[1])
sumMin += compElement.minValue
else:
self.assertTrue(compElement.isBalance)
d_minValue = compElement.minValue
d_maxValue = compElement.maxValue
self.assertAlmostEqual(d_minValue, 0.0)
self.assertAlmostEqual(d_maxValue, 100.0 - sumMin)
def test_compositionMinValue(self):
compMap = {"a": [-1.0, 20.0], "b": "balance"}
with self.assertRaisesRegex(ValueError, "negative minimum"):
self._createFunction(compMap)
def test_compositionMaxValue(self):
compMap = {"a": [15.0, 14.9], "b": "balance"}
with self.assertRaisesRegex(ValueError, "max < min"):
self._createFunction(compMap)
def test_compositionMaxValue2(self):
compMap = {"a": [15.0, 100.1], "b": "balance"}
with self.assertRaisesRegex(ValueError, "max > 100.0"):
self._createFunction(compMap)
def test_compositionMinSum(self):
compMap = {
"a": [30.0, 30.1],
"b": [40.1, 40.2],
"c": [50.2, 50.3],
"d": "balance",
}
with self.assertRaisesRegex(ValueError, "minimum composition summation greater than 100.0"):
self._createFunction(compMap)
def test_compositionDuplicate(self):
duplicateTestFile = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"invalidTestFiles",
"duplicateComposition.yaml",
)
with self.assertRaises(DuplicateKeyError):
armi.matProps.loadMaterial(duplicateTestFile)
================================================
FILE: armi/matProps/tests/test_constituent.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic tests of the Constituent class."""
import unittest
from armi.matProps.constituent import Constituent
class TestConstituent(unittest.TestCase):
def test_errorHandling(self):
c = Constituent("Fe", 10.0, 25.0, False)
self.assertEqual(str(c), "")
c = Constituent("Fe", 0.0, 99.0, True)
self.assertEqual(str(c), "")
with self.assertRaises(ValueError):
Constituent("Fe", -10.0, 25.0, False)
with self.assertRaises(ValueError):
Constituent("Fe", 50.0, 101.0, False)
with self.assertRaises(ValueError):
Constituent("Fe", 50.0, 1.0, False)
def test_parseComposition(self):
# test we fail correctly when providing invalid inputs
with self.assertRaises(ValueError):
Constituent.parseComposition({})
with self.assertRaises(ValueError):
node = {"Fe": (0.1, 0.25)}
Constituent.parseComposition(node)
# a simple Iron-only material
node = {"Fe": "balance"}
c = Constituent.parseComposition(node)
self.assertEqual(len(c), 1)
self.assertEqual(c[0].maxValue, 100.0)
self.assertTrue(c[0].isBalance)
# a hypothetical steel-like material
node = {"C": (0.0, 10.0), "Cr": (0.0, 1.0), "Fe": "balance"}
c = Constituent.parseComposition(node)
self.assertEqual(len(c), 3)
self.assertEqual(c[0].maxValue, 10.0)
self.assertFalse(c[0].isBalance)
self.assertEqual(c[2].maxValue, 100.0)
self.assertTrue(c[2].isBalance)
================================================
FILE: armi/matProps/tests/test_functions.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the Function class."""
from armi.matProps.material import Material
from armi.matProps.tests import MatPropsFunTestBase
class TestFunctions(MatPropsFunTestBase):
"""Class which encapsulates the unit tests data and methods to test the matProps Function class."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.baseConstantData = {"type": "symbolic", "equation": "9123.5"}
def test_getReferences(self):
mat = self._createFunction(self.baseConstantData)
mat.rho._references = ["1", "2"]
self.assertEqual(mat.rho.references[0], "1")
self.assertEqual(mat.rho.references[1], "2")
def test_datafilesVarVals(self):
"""
Test to make sure that parsing variable values return the expected values when parsing "max" and "min" nodes for
the T variable.
"""
mat = self._createFunction(self.baseConstantData)
mat.name = self.testName
self.assertEqual(str(mat), f">")
density = mat.rho
self.assertEqual(density.getMinBound("T"), -100.0)
self.assertEqual(density.getMaxBound("T"), 500.0)
def test_datafilesMaxVar(self):
"""Test that makes sure a ValueError is thrown if the max of a variable is less than the min."""
with self.assertRaises(ValueError):
self._createFunction(self.baseConstantData, maxT=-101.0)
def test_datafilesInvType(self):
"""Test that makes sure a KeyError is thrown if an unsupported function type is provided."""
data = {"type": "fake function"}
with self.assertRaisesRegex(KeyError, "fake function"):
self._createFunction(data)
def test_refTempEval(self):
"""Test that a function with a reference temperature correctly parses and returns the expected value."""
testData = self.baseConstantData.copy()
testData.update({"reference temperature": 200.0})
mat = self._createFunction(testData)
func = mat.rho
self.assertAlmostEqual(func.getReferenceTemperature(), 200.0)
def test_refTempMissing(self):
"""Test that a ValueError is thrown when accessing a reference temperature value that is not provided."""
mat = self._createFunction(self.baseConstantData)
func = mat.rho
with self.assertRaisesRegex(ValueError, "Reference temperature is undefined"):
func.getReferenceTemperature()
def test_refTempInvalid(self):
"""Test to make sure that a ValueError is thrown if the provided reference temperature value is invalid."""
testData = self.baseConstantData.copy()
testData.update({"reference temperature": -273.25})
mat = self._createFunction(testData)
func = mat.rho
with self.assertRaisesRegex(ValueError, "Reference temperature is undefined"):
func.getReferenceTemperature()
def test_independentVars(self):
mat = self._createFunction(self.baseConstantData)
fun = mat.rho
self.assertEqual(len(fun.independentVars), 1)
self.assertEqual(fun.getIndependentVariables(), ["T"])
self.assertEqual(fun.getMinBound("T"), -100)
self.assertEqual(fun.getMaxBound("T"), 500)
with self.assertRaises(KeyError):
fun.getMinBound("X")
with self.assertRaises(KeyError):
fun.getMaxBound("Y")
def test_calcEdgeCases(self):
mat = self._createFunction(self.baseConstantData)
fun = mat.rho
with self.assertRaises(ValueError):
fun.calc({"T": 200}, T=300)
with self.assertRaises(ValueError):
fun.calc()
with self.assertRaises(KeyError):
fun.calc({"Z": 200})
# whoops, I forgot to declare a "max" value
materialData = {
"file format": "TESTS",
"composition": {"Fe": "balance"},
"material type": "Metal",
"density": {"function": {"T": {"min": 1.0}, "type": "symbolic", "equation": 1.0}},
}
mat = Material()
with self.assertRaises(KeyError):
mat.loadNode(materialData)
def test_references(self):
materialData = {
"file format": "TESTS",
"composition": {"Fe": "balance"},
"material type": "Metal",
"density": {
"function": {
"T": {"min": 1.0, "max": 10.0},
"type": "symbolic",
"equation": 1.0,
},
"references": [{"ref": "things", "type": "open literature"}],
},
}
mat = Material()
mat.loadNode(materialData)
self.assertEqual(len(mat.rho.references), 1)
self.assertEqual(mat.rho.references[0].getRef(), "things")
def test_tabulatedData(self):
tableData = [
[300, 25],
[400, 26.28],
[500, 26.26],
[600, 25.89],
[700, 25.19],
[800, 25.10],
[900, 26.32],
]
materialData = {
"file format": "TESTS",
"composition": {"Fe": "balance"},
"material type": "Metal",
"density": {
"function": {
"T": {"min": 1.0, "max": 10.0},
"type": "symbolic",
"equation": 1.0,
},
"tabulated data": tableData,
},
}
mat = Material()
mat.loadNode(materialData)
self.assertEqual(len(mat.rho.references), 0)
self.assertEqual(len(mat.rho.tableData._values), 7)
================================================
FILE: armi/matProps/tests/test_hashing.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program that runs tests for the TestHashValues class."""
import os
import unittest
import armi.matProps
class TestHashValues(unittest.TestCase):
"""Testing the material hashing logic."""
@classmethod
def setUpClass(cls):
cls.testDir = os.path.dirname(__file__)
def test_hash(self):
testFileA = os.path.join(self.testDir, "testDir1", "a.yaml")
testFileB = os.path.join(self.testDir, "testMaterialsData", "materialB.yaml")
matA = armi.matProps.loadMaterial(testFileA, False)
matB = armi.matProps.loadMaterial(testFileB, False)
hA = matA.hash()
hB = matB.hash()
# NOTE: We cannot check exact hashes, because of OS differences
self.assertEqual(len(hA), 40)
self.assertEqual(len(hB), 40)
self.assertNotEqual(hA, hB)
================================================
FILE: armi/matProps/tests/test_interpolationFunctions.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program that runs all of the tests contained in the TestInterpolationFunctions class."""
import unittest
import numpy as np
from scipy import interpolate
from armi.matProps.interpolationFunctions import findIndex, linearLinear, logLinear
class TestInterpolationFunctions(unittest.TestCase):
"""Class which creates tests for the matProps InterpolationFunctions files."""
def test_findIndex(self):
x = [2, 4, 6, 8]
self.assertEqual(findIndex(2, x), 0)
self.assertEqual(findIndex(3, x), 0)
self.assertEqual(findIndex(3.14, x), 0)
self.assertEqual(findIndex(4, x), 0) # NOTE: This is 0, not 1.
self.assertEqual(findIndex(4.001, x), 1)
self.assertEqual(findIndex(6, x), 1) # NOTE: This is 1, not 2.
self.assertEqual(findIndex(6.2, x), 2)
with self.assertRaises(ValueError):
findIndex(-9, x)
with self.assertRaises(ValueError):
findIndex(9, x)
def test_linearLinear(self):
"""
Test which validates the values returned from the linear-linear interpolation method.
Uses numpy linspace function to generate values at which interpolation will be performed.
"""
x = np.arange(10)
y = [1.0 + xx + xx**2 for xx in range(10)]
f = interpolate.interp1d(x, y, bounds_error=False)
for nn in np.linspace(0, 9, 20):
self.assertTrue(np.allclose(f(nn), linearLinear(nn, x.tolist(), y)))
def test_linearLinearInterpolation(self):
"""
Duplicate test validating that the correct values are returned from a linear-linear interpolation.
Differs from test_linearLinear by constructing interpolation points using standard lists instead of numpy
linspace.
"""
x = [0.0, 1.0]
y = [1.0, 2.0]
for xx, yy in [(0.0, 1.0), (0.5, 1.5), (1.0, 2.0)]:
self.assertAlmostEqual(yy, linearLinear(xx, x, y))
def test_linearLinearExtrapolation(self):
"""Check to make sure a ValueError is thrown if attempting an interpolation outside the function domain."""
x = [0.0, 1.0]
y = [1.0, 2.0]
with self.assertRaisesRegex(ValueError, "out of bounds"):
linearLinear(-2.0, x, y)
def test_logLinear(self):
"""Test that validates the values returned from the log-linear interpolation function."""
x = np.arange(1.0, 11.0)
y = -42.0 + x + x**-2
n_vals = np.interp(np.log10(np.linspace(1, 10, 20)), np.log10(x), y)
m_vals = [logLinear(nn, x, y) for nn in np.linspace(1, 10, 20)]
self.assertTrue(
np.allclose(n_vals, m_vals),
f"np: {n_vals}\nmatProps:{np.array(m_vals)}",
)
def test_logLinearExtrapolation(self):
"""A ValueError should be thrown if performing a log-linear interpolation outside the function domain."""
x = np.arange(1.0, 11.0)
y = -42.0 + x + x**-2
with self.assertRaisesRegex(ValueError, "out of bounds"):
logLinear(0.5, x, y)
================================================
FILE: armi/matProps/tests/test_material.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program that runs all of the tests in the TestMapPropsMaterial class."""
import os
import unittest
import armi.matProps
from armi.matProps.material import Material
from armi.matProps.materialType import MaterialType
THIS_DIR = os.path.dirname(__file__)
class TestMapPropsMaterial(unittest.TestCase):
"""Class which tests the functionality of the matProps Material class."""
@staticmethod
def _createFunction(materialType):
"""
Helper function used to construct a minimum viable YAML file for tests.
Parameters
----------
fileName
String containing name of yaml file being written
materialType
String containing the "material type" node value
"""
testNode = {
"file format": "TESTS",
"composition": {"Fe": "balance"},
"material type": materialType,
"density": {
"function": {
"T": {
"min": 100.0,
"max": 200.0,
},
"type": "symbolic",
"equation": 1.0,
}
},
}
mat = Material()
mat.loadNode(testNode)
return mat
def test_getValidFileFormatVersions(self):
versions = armi.matProps.Material.getValidFileFormatVersions()
self.assertGreater(len(versions), 1)
for version in versions:
if type(version) is not float:
self.assertEqual(version, "TESTS")
def test_loadFile(self):
mat = armi.matProps.Material()
self.assertEqual(str(mat), "")
fPath = os.path.join(THIS_DIR, "testMaterialsData", "materialA.yaml")
self.assertEqual(len(sorted(armi.matProps.materials.keys())), 0)
mat.loadFile(fPath)
self.assertEqual(len(sorted(armi.matProps.materials.keys())), 0)
def test_datafilesType(self):
materialTypeNames = [
"Fuel",
"Metal",
"Fluid",
"Ceramic",
"ASME2015",
"ASME2017",
"ASME2019",
]
for matTypeName in materialTypeNames:
parseType = self._createFunction(matTypeName).materialType
typeIdx = MaterialType.types[matTypeName]
expectedType = MaterialType(typeIdx)
self.assertEqual(parseType, expectedType)
def test_invalidFileFormat(self):
fPath = os.path.join(THIS_DIR, "invalidTestFiles", "badFileFormat.YAML")
mat = armi.matProps.Material()
with self.assertRaises(ValueError):
mat.loadFile(fPath)
def test_datafilesInvType(self):
with self.assertRaisesRegex(KeyError, "Invalid material type"):
self._createFunction("Solid")
def test_saveLogic(self):
mat = self._createFunction("Metal")
self.assertFalse(mat.saved())
mat.save()
self.assertTrue(mat.saved())
================================================
FILE: armi/matProps/tests/test_materialType.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the MaterialType class."""
import unittest
from armi.matProps.materialType import MaterialType
class TestMaterialType(unittest.TestCase):
def test_fromString(self):
mt = MaterialType.fromString("Fuel")
self.assertEqual(mt._value, 1)
mt = MaterialType.fromString("Metal")
self.assertEqual(mt._value, 2)
mt = MaterialType.fromString("Fluid")
self.assertEqual(mt._value, 4)
def test_repr(self):
mt = MaterialType.fromString("Fuel")
self.assertEqual(str(mt), "")
mt = MaterialType.fromString("Metal")
self.assertEqual(str(mt), "")
mt = MaterialType.fromString("Fluid")
self.assertEqual(str(mt), "")
def test_equality(self):
mt1 = MaterialType(1)
mt11 = MaterialType(1)
mt4 = MaterialType(4)
self.assertTrue(mt1 == mt1)
self.assertTrue(mt1 == mt11)
self.assertFalse(mt1 == mt4)
self.assertFalse(mt11 == mt4)
self.assertTrue(mt1 == 1)
self.assertTrue(mt11 == 1)
self.assertFalse(mt1 == 4)
self.assertFalse(mt11 == 4)
with self.assertRaises(TypeError):
self.assertTrue(mt1 == "1")
================================================
FILE: armi/matProps/tests/test_parsing.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test YAML parsers for all files in the matProps data directory to ensure that there are no parsing errors."""
import os
import tempfile
import unittest
from os import path
import armi.matProps
class TestParsing(unittest.TestCase):
"""Class which tests the parsing and material library loading functions of matProps."""
@property
def dirname(self):
"""Provide the directory where this file is located."""
return path.dirname(path.realpath(__file__))
@classmethod
def setUpClass(cls):
cls.dummyDataPath = path.join(path.dirname(path.realpath(__file__)), "testMaterialsData")
cls.dummyMatFiles = {}
for root, _, files in os.walk(cls.dummyDataPath):
for fileName in files:
if fileName.lower().endswith((".yaml", ".yml")):
cls.dummyMatFiles[fileName] = os.path.join(root, fileName)
armi.matProps.clear()
def tearDown(self):
armi.matProps.clear()
def test_datafilesMatOwner(self):
for matFile, matPath in self.dummyMatFiles.items():
matNam = path.splitext(matFile)[0]
# the default behavior is loadMaterial(matPath, false)
m = armi.matProps.loadMaterial(matPath)
self.assertIsNotNone(m)
with self.assertRaisesRegex(KeyError, f"No material named `{matNam}` was loaded within loaded data."):
armi.matProps.getMaterial(matNam)
m = armi.matProps.loadMaterial(self.dummyMatFiles[matFile], False)
self.assertIsNotNone(m)
with self.assertRaisesRegex(KeyError, f"No material named `{matNam}` was loaded within loaded data."):
armi.matProps.getMaterial(matNam)
# test the pass-through function load_material, instead of the preferred loadMaterial
m = armi.matProps.load_material(self.dummyMatFiles[matFile], True)
self.assertIsNotNone(m)
m = armi.matProps.getMaterial(matNam)
self.assertIsNotNone(m)
def test_multiDataLoadingLoadingAll(self):
armi.matProps.loadAll(self.dummyDataPath)
self.assertEqual(len(self.dummyMatFiles), len(armi.matProps.loadedMaterials()))
armi.matProps.clear()
self.assertEqual(0, len(armi.matProps.loadedMaterials()))
def test_loadSafe(self):
armi.matProps.clear()
self.assertEqual(0, len(armi.matProps.loadedMaterials()))
# verify that it is safe to call loadSafe() multiple times in a row
for _ in range(3):
armi.matProps.loadSafe(self.dummyDataPath)
self.assertEqual(len(self.dummyMatFiles), len(armi.matProps.loadedMaterials()))
# verify the correct behavior if a bad directory is provided
badDir = "does_not_exist_2924"
with self.assertRaisesRegex(FileNotFoundError, f"Directory {badDir} not found"):
# test with the pass through "load_safe", instead of the preferred loadSafe
armi.matProps.load_safe(badDir)
def test_dataLoadingPrioSameDir(self):
armi.matProps.loadAll(self.dummyDataPath)
with self.assertRaises(KeyError):
armi.matProps.loadAll(self.dummyDataPath)
# bonus test of getHashes
hashes = armi.matProps.getHashes()
self.assertGreater(len(hashes), 3)
for h in hashes:
self.assertGreater(len(h), 8)
self.assertIsInstance(h, str)
def test_datafilesBadPath(self):
badDir = "nopity-nopers-missing"
with self.assertRaisesRegex(FileNotFoundError, f"Directory {badDir} not found"):
armi.matProps.loadAll(badDir)
with self.assertRaisesRegex(NotADirectoryError, "Input path"):
armi.matProps.loadAll(path.abspath(__file__))
with tempfile.TemporaryDirectory() as tmpDirName:
armi.matProps.loadAll(tmpDirName)
def test_multiDataLoadingMultidir(self):
"""Tests loading multiple data directories.
Load all files present in the following subdirectories of the matProps repository: tests/testDir1 and
tests/testDir2.
"""
dir1 = path.join(self.dirname, "testDir1")
dir2 = path.join(self.dirname, "testDir2")
# Load the two directories
armi.matProps.loadAll(dir1)
armi.matProps.loadAll(dir2)
# Check that the two directories are in loaded materials
loadList = armi.matProps.get_loaded_root_dirs()
self.assertTrue(dir1 in loadList)
self.assertTrue(dir2 in loadList)
self.assertTrue(len(loadList) == 2)
# Create list of file names in two directories. They are unique
fileSet = set()
for fileName in os.listdir(dir1):
fileSet.add(path.splitext(fileName)[0])
for fileName in os.listdir(dir2):
fileSet.add(path.splitext(fileName)[0])
materialSet = set()
for material in armi.matProps.loadedMaterials():
materialSet.add(material.name)
self.assertTrue(fileSet == materialSet)
def test_dataLoadingPrioDiffDir(self):
"""
Tests that an error is raised for loading a material twice different directories.
Attempts to load all files present in the following subdirectories of the matProps repository: tests/testDir1
and tests/testDir3. Though that includes some duplicates that should raise an error.
"""
dir1 = path.join(self.dirname, "testDir1")
dir3 = path.join(self.dirname, "testDir3")
armi.matProps.loadAll(dir1)
with self.assertRaisesRegex(KeyError, "already exists"):
armi.matProps.loadAll(dir3)
matA = armi.matProps.getMaterial("a")
density = matA.rho
# Will evaluate to 1.0 if we have the data loaded from testDir1/a.yaml.
# If we load from testDir3/a.yaml it will have a different value
self.assertAlmostEqual(density.calc({"T": 150.0}), 1.0)
self.assertAlmostEqual(density.calc(T=150.0), 1.0)
def test_datafilesGetMat(self):
"""
Test a material retrieved by getMaterial(name) is the same as another material with the same name.
Also tests trying to access an unknown material.
"""
# test the deprecated "load_all", that is just a pass-through for "loadAll"
armi.matProps.load_all(self.dummyDataPath)
# test with the pass-through loaded_materials instead of the preferred loadedMaterials
for mat in armi.matProps.loaded_materials():
self.assertEqual(mat, armi.matProps.getMaterial(mat.name))
with self.assertRaisesRegex(KeyError, "No material named `Fahrvergnugen` was loaded"):
# test with the pass-through get_material instead of the preferred getMaterial
armi.matProps.get_material("Fahrvergnugen")
================================================
FILE: armi/matProps/tests/test_performance.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test rough matProps performance timing."""
import copy
import os
import pickle
import timeit
import unittest
import armi.matProps
# NOTE: This is a sketchy magic number for testing that are heavily machine dependent.
_LIMIT_SECONDS = 15
class TestPerformance(unittest.TestCase):
"""
The tests in this class are an early warning of matProps performance changes. It tests common operations that are
done with matProps to ensure their execution time remains in the correct ballpark.
"""
def test_load(self):
"""Tests the speed of loading a set of material files."""
armi.matProps.clear()
testFiles = os.path.join(os.path.dirname(__file__), "testMaterialsData")
t = timeit.timeit(lambda: (armi.matProps.loadAll(testFiles), armi.matProps.clear()), number=10)
self.assertLess(t, _LIMIT_SECONDS, msg="matProps material loading takes too long to execute.")
def test_pickle(self):
"""Tests the speed of pickling a set of material files. Pickling is important for multiprocessing."""
armi.matProps.clear()
# This directory's material has many properties so it is more representative for pickle size.
testFiles = os.path.join(os.path.dirname(__file__), "testDir4")
armi.matProps.loadAll(testFiles)
mat = armi.matProps.getMaterial("sampleProperty")
t = timeit.timeit(lambda: pickle.loads(pickle.dumps(mat)), number=100)
self.assertLess(t, _LIMIT_SECONDS, msg="matProps material pickling takes too long to execute.")
def test_calc(self):
"""Tests the speed of calculating a property value."""
armi.matProps.clear()
testFiles = os.path.join(os.path.dirname(__file__), "testMaterialsData")
armi.matProps.loadAll(testFiles)
# This material's density is a linear function.
mat = armi.matProps.getMaterial("materialA")
prop = mat.rho
t = timeit.timeit(lambda: prop.calc({"T": 300}), number=10000)
self.assertLess(t, _LIMIT_SECONDS, msg="matProps material calculation takes too long to execute.")
def test_deepcopy(self):
"""
Tests the speed of deepcopying a material. Copying is important for copying other objects that may be
referencing a matProps material.
"""
armi.matProps.clear()
# This directory's material has many properties so it is more representative for copy size.
testFiles = os.path.join(os.path.dirname(__file__), "testDir4")
armi.matProps.loadAll(testFiles)
mat = armi.matProps.getMaterial("sampleProperty")
t = timeit.timeit(lambda: copy.deepcopy(mat), number=100)
self.assertLess(t, _LIMIT_SECONDS, msg="matProps material copying takes too long to execute.")
================================================
FILE: armi/matProps/tests/test_piecewiseFunction.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests related to piecewise functions."""
from armi.matProps.material import Material
from armi.matProps.tests import MatPropsFunTestBase
class TestPiecewiseFunction(MatPropsFunTestBase):
"""Tests related to piecewise functions."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.basePiecewiseData = {
"type": "piecewise",
"T": {
"min": 0,
"max": 100,
},
"functions": [
{
"function": {
"T": {"min": 0, "max": 25.4},
"type": "symbolic",
"equation": "10",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 25.4, "max": 50},
"type": "symbolic",
"equation": "99",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 50, "max": 100},
"type": "symbolic",
"equation": "-99",
},
"tabulated data": None,
},
],
}
def test_piecewiseEqnEval(self):
"""Tests the parsing of a PiecewiseFunction and make sure it evaluates at the appropriate sub function."""
mat = self._createFunction(self.basePiecewiseData)
func = mat.rho
self.assertIn("PiecewiseFunction", str(func))
self.assertAlmostEqual(func.calc({"T": 0}), 10)
self.assertAlmostEqual(func.calc({"T": 25.4}), 10)
self.assertAlmostEqual(func.calc({"T": 25.41}), 99)
self.assertAlmostEqual(func.calc({"T": 50}), 99)
self.assertAlmostEqual(func.calc({"T": 50.1}), -99)
self.assertAlmostEqual(func.calc({"T": 100}), -99)
func.clear()
with self.assertRaises(ValueError):
func.calc({"T": 0})
def test_piecewiseEqnGap(self):
"""Test that PiecewiseFunction evaluates correctly with gaps."""
data = {
"type": "piecewise",
"functions": [
{
"function": {
"T": {"min": 0, "max": 20},
"type": "symbolic",
"equation": "10",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 30, "max": 50},
"type": "symbolic",
"equation": "99",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 50, "max": 100},
"type": "symbolic",
"equation": "-99",
},
"tabulated data": None,
},
],
}
mat = self._createFunction(data)
func = mat.rho
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": -1.0})
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": 25.0})
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": 101.0})
self.assertAlmostEqual(func.calc(T=0), 10)
self.assertAlmostEqual(func.calc(T=10), 10)
self.assertAlmostEqual(func.calc(T=20), 10)
self.assertAlmostEqual(func.calc(T=30), 99)
self.assertAlmostEqual(func.calc(T=40), 99)
self.assertAlmostEqual(func.calc(T=50), 99)
self.assertAlmostEqual(func.calc(T=75), -99)
self.assertAlmostEqual(func.calc(T=100), -99)
def test_piecewiseEqnPoly(self):
"""Test that makes a PiecewiseFunction composed of multiple PolynomialFunctions."""
poly1CoMap = {0: -2.5, 1: 5, 2: 4}
poly2CoMap = {0: 3.5, 1: 3, 2: -2, 3: 1}
poly3CoMap = {0: 4.5, 1: -2, 2: 3, 3: -2, 4: 1}
data = {
"type": "piecewise",
"functions": [
{
"function": {
"T": {"min": -100, "max": 100},
"type": "symbolic",
"equation": self.createEqnPoly(poly1CoMap),
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 100, "max": 300},
"type": "symbolic",
"equation": self.createEqnPoly(poly2CoMap),
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 300, "max": 500},
"type": "symbolic",
"equation": self.createEqnPoly(poly3CoMap),
},
"tabulated data": None,
},
],
}
mat = self._createFunction(data)
func = mat.rho
self.assertAlmostEqual(func.calc({"T": -100.0}), self.polynomialEvaluation(poly1CoMap, -100.0))
self.assertAlmostEqual(func.calc({"T": 0.0}), self.polynomialEvaluation(poly1CoMap, 0.0))
self.assertAlmostEqual(func.calc({"T": 100.0}), self.polynomialEvaluation(poly1CoMap, 100.0))
self.assertAlmostEqual(func.calc({"T": 200.0}), self.polynomialEvaluation(poly2CoMap, 200.0))
self.assertAlmostEqual(func.calc({"T": 300.0}), self.polynomialEvaluation(poly2CoMap, 300.0))
self.assertAlmostEqual(func.calc({"T": 400.0}), self.polynomialEvaluation(poly3CoMap, 400.0))
self.assertAlmostEqual(func.calc({"T": 500.0}), self.polynomialEvaluation(poly3CoMap, 500.0))
def test_piecewiseEqnPolyTable(self):
"""Test that makes a PiecewiseFunction composed of a mixture of polynomial and table functions."""
poly1CoMap = {0: 3.5, 1: 3, 2: -2, 3: 1}
poly2CoMap = {0: 4.5, 1: -2, 2: 3, 3: -2, 4: 1}
data = {
"type": "piecewise",
"functions": [
{
"function": {
"T": 0,
"type": "table",
},
"tabulated data": [[-100.0, -50.0], [0.0, 0.0], [100.0, 50.0]],
},
{
"function": {
"T": {"min": 100, "max": 300},
"type": "symbolic",
"equation": self.createEqnPoly(poly1CoMap),
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 300, "max": 500},
"type": "symbolic",
"equation": self.createEqnPoly(poly2CoMap),
},
"tabulated data": None,
},
],
}
mat = self._createFunction(data)
func = mat.rho
self.assertAlmostEqual(func.calc({"T": -100.0}), -50.0)
self.assertAlmostEqual(func.calc({"T": -50.0}), -25.0)
self.assertAlmostEqual(func.calc({"T": 0.0}), 0.0)
self.assertAlmostEqual(func.calc({"T": 50.0}), 25.0)
self.assertAlmostEqual(func.calc({"T": 100.0}), 50.0)
self.assertAlmostEqual(func.calc({"T": 200.0}), self.polynomialEvaluation(poly1CoMap, 200.0))
self.assertAlmostEqual(func.calc({"T": 300.0}), self.polynomialEvaluation(poly1CoMap, 300.0))
self.assertAlmostEqual(func.calc({"T": 400.0}), self.polynomialEvaluation(poly2CoMap, 400.0))
self.assertAlmostEqual(func.calc({"T": 500.0}), self.polynomialEvaluation(poly2CoMap, 500.0))
def test_inputCheckPiecewiseMinTemp(self):
"""Test to make sure an error is thrown when attempting to evaluate below the minimum valid range."""
self.belowMinimumCheck(self.basePiecewiseData)
def test_inputCheckPiecewiseMaxTemp(self):
"""Test to make sure an error is thrown when attempting to evaluate above the maximum valid range."""
self.aboveMaximumCheck(self.basePiecewiseData)
def _createFunction2D(self, data=None):
"""
Helper function designed to create a basic viable yaml file for a two dimensional function.
Parameters
----------
data : dict
A dictionary containing user specified function child nodes.
"""
funcBody = {"T": {"min": -100, "max": 100}, "t": {"min": -100, "max": 100}}
funcBody.update(data or {})
materialData = {
"file format": "TESTS",
"composition": {"Fe": "balance"},
"material type": "Metal",
"density": {"function": funcBody, "tabulated data": {}},
}
mat = Material()
mat.loadNode(materialData)
return mat
def test_piecewiseEqn2d(self):
"""Test that PiecewiseFunction evaluates correctly with multiple dimensions."""
data = {
"type": "piecewise",
"functions": [
{
"function": {
"T": {"min": 0, "max": 20},
"t": {"min": 0, "max": 20},
"type": "symbolic",
"equation": "10",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 30, "max": 40},
"t": {"min": 0, "max": 20},
"type": "symbolic",
"equation": "99",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 0, "max": 20},
"t": {"min": 30, "max": 40},
"type": "symbolic",
"equation": "20",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 30, "max": 40},
"t": {"min": 30, "max": 40},
"type": "symbolic",
"equation": "199",
},
"tabulated data": None,
},
],
}
mat = self._createFunction2D(data)
func = mat.rho
# Below var 1
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": -1.0, "t": 10})
# Middle gap var 1
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": 25.0, "t": 10})
# Above var 1
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": 45.0, "t": 10})
# Below var 2
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": 10, "t": -1})
# Middle gap var 2
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": 10, "t": 25})
# Above var 2
with self.assertRaisesRegex(ValueError, "PiecewiseFunction error, could not evaluate"):
func.calc({"T": 10, "t": 45})
self.assertAlmostEqual(func.calc(T=10, t=10), 10)
self.assertAlmostEqual(func.calc(T=10, t=35), 20)
self.assertAlmostEqual(func.calc(T=35, t=10), 99)
self.assertAlmostEqual(func.calc(T=35, t=35), 199)
def test_piecewiseEqnOverlap(self):
"""Test that PiecewiseFunction fails to load with overlapping regions."""
data = {
"type": "piecewise",
"functions": [
{
"function": {
"T": {"min": 0, "max": 20},
"t": {"min": 0, "max": 20},
"type": "symbolic",
"equation": "10",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 10, "max": 40},
"t": {"min": 0, "max": 20},
"type": "symbolic",
"equation": "99",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 0, "max": 20},
"t": {"min": 30, "max": 40},
"type": "symbolic",
"equation": "20",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 30, "max": 40},
"t": {"min": 30, "max": 40},
"type": "symbolic",
"equation": "199",
},
"tabulated data": None,
},
],
}
with self.assertRaisesRegex(ValueError, "Piecewise child functions overlap"):
self._createFunction2D(data)
def test_piecewiseEqnDiffVars(self):
"""Test that PiecewiseFunction fails to load when child functions use different variables."""
data = {
"type": "piecewise",
"functions": [
{
"function": {
"T": {"min": 0, "max": 20},
"t": {"min": 0, "max": 20},
"type": "symbolic",
"equation": "10",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 30, "max": 40},
"t": {"min": 0, "max": 20},
"type": "symbolic",
"equation": "99",
},
"tabulated data": None,
},
{
"function": {
"R": {"min": 0, "max": 20},
"t": {"min": 30, "max": 40},
"type": "symbolic",
"equation": "20",
},
"tabulated data": None,
},
{
"function": {
"T": {"min": 30, "max": 40},
"t": {"min": 30, "max": 40},
"type": "symbolic",
"equation": "199",
},
"tabulated data": None,
},
],
}
with self.assertRaisesRegex(KeyError, "Piecewise child function must have same variables"):
self._createFunction2D(data)
================================================
FILE: armi/matProps/tests/test_point.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program that runs all of the tests for the Point class.
Notes
-----
This file is used to verify the matProps stand-alone wheel installation. As such, it needs to remain small. Do not add
any tests to this file with explicit file IO: no temporary directories, and no test YAML files.
"""
import unittest
from armi.matProps.point import Point
class TestPoint(unittest.TestCase):
"""Unit tests for the matProps Point class."""
def test_string(self):
"""Test string representation of Point."""
p = Point(1, 2, 3)
self.assertEqual(str(p), " 3>")
================================================
FILE: armi/matProps/tests/test_property.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Program that runs all of the tests contained in PropertyTests class."""
import os
import unittest
from os import path
from armi.matProps import loadMaterial
from armi.matProps.prop import defProp, properties
class PropertyTests(unittest.TestCase):
"""Class which contains tests for the matProps Property class."""
@classmethod
def setUpClass(cls):
# Properties allowed for based on SDID.
cls.allowedPropertiesList = [
"density",
"specific heat capacity",
"thermal conductivity",
"thermal diffusivity",
"dynamic viscosity",
"kinematic viscosity",
"melting temperature",
"boiling temperature",
"surface tension",
"vapor pressure",
"electrical conductance",
"isothermal compressibility",
"mean coefficient of thermal expansion",
"instantaneous coefficient of thermal expansion",
"Young's modulus",
"shear modulus",
"elongation",
"Poisson's ratio",
"yield strength",
"tensile strength",
"design stress",
"design reference stress",
"allowable stress",
"time dependent design stress",
"service reference stress",
"stress to rupture",
"tensile strength reduction factor",
"yield strength reduction factor",
"weld strength reduction factor",
"allowable time to rupture",
"allowable time to allowable stress",
"design fatigue strain range",
"strain from isochronous stress-strain curve",
"design fatigue stress",
"linear expansion",
"vapor specific volume",
"speed of sound",
"solidus temperature",
"liquidus temperature",
"volumetric expansion",
"enthalpy",
"temperature from enthalpy",
"enthalpy of fusion",
"latent heat of vaporization",
"fracture toughness",
"Brinell Hardness",
"factor f from ASME.III.5 Fig. HBB-T-1432-2",
"factor Kv' from ASME.III.5 Fig. HBB-T-1432-3",
]
def test_propertiesUnique(self):
"""Ensure the Property.name and Property.symbol are all unique inside the matProps.properties container."""
num = len(properties)
self.assertEqual(num, len({p.name for p in properties}))
self.assertEqual(num, len({p.symbol for p in properties}))
def test_propertiesNames(self):
"""Ensure that we have the correct set of Properties in matProps."""
propertySet = {p.name for p in properties}
allowedPropertiesSet = set(self.allowedPropertiesList)
self.assertEqual(propertySet, allowedPropertiesSet)
def test_propertiesInvName(self):
"""Ensure loadNode fails correctly when provided when provided an unknown property."""
tempFileName = os.path.join(os.path.dirname(__file__), "invalidTestFiles", "badProperty.yaml")
with self.assertRaisesRegex(KeyError, "Invalid property node"):
loadMaterial(tempFileName)
def test_propertiesDefinitions(self):
"""
Check a logic branch in the Function.factory method which initializes armi.matProps.Function objects to be
null. armi.matProps.Function objects only get set to a non-null object if the appropriate property node is
provided in the YAML file. A test YAML file with only the density property provided. It checks to make sure that
the Material.rho object corresponding with density is not a null object and performs an evaluation. A check is
then performed on the Material.k object. This object, which corresponds to the thermal conductivity property,
should be null as it is not defined in the test YAML file.
"""
# Only the density property exists for the material below. It is a constant function
yamlFilePath = path.join(path.dirname(path.realpath(__file__)), "testDir1", "a.yaml")
mat = loadMaterial(yamlFilePath)
# Name of density function is rho for materials
self.assertIsNotNone(mat.rho)
self.assertAlmostEqual(mat.rho.calc({"T": 150.0}), 1.0)
# k corresponds to thermal conductivity which is not provided in test file.
self.assertIsNone(mat.k)
def test_spotCheckAllPropsDict(self):
"""Spot check every property at least once, using a dictionary of input values."""
pathToTestYaml = path.join(path.dirname(path.realpath(__file__)), "testDir4")
testMat = loadMaterial(path.join(pathToTestYaml, "sampleProperty.yaml"))
self.assertAlmostEqual(testMat.rho.calc({"T": 300.0}), 1.0)
self.assertAlmostEqual(testMat.c_p.calc({"T": 300.0}), 2.0)
self.assertAlmostEqual(testMat.k.calc({"T": 300.0}), 3.0)
self.assertAlmostEqual(testMat.alpha_d.calc({"T": 300.0}), 4.0)
self.assertAlmostEqual(testMat.mu_d.calc({"T": 300.0}), 5.0)
self.assertAlmostEqual(testMat.mu_k.calc({"T": 300.0}), 6.0)
self.assertAlmostEqual(testMat.T_melt.calc({"T": 300.0}), 7.0)
self.assertAlmostEqual(testMat.T_boil.calc({"T": 300.0}), 8.0)
self.assertAlmostEqual(testMat.dH_vap.calc({"T": 300.0}), 9.0)
self.assertAlmostEqual(testMat.dH_fus.calc({"T": 300.0}), 10.0)
self.assertAlmostEqual(testMat.gamma.calc({"T": 300.0}), 11.0)
self.assertAlmostEqual(testMat.P_sat.calc({"T": 300.0}), 12.0)
self.assertAlmostEqual(testMat.kappa.calc({"T": 300.0}), 13.0)
self.assertAlmostEqual(testMat.alpha_mean.calc({"T": 300.0}), 14.0)
self.assertAlmostEqual(testMat.alpha_inst.calc({"T": 300.0}), 15.0)
self.assertAlmostEqual(testMat.E.calc({"T": 300.0}), 16.0)
self.assertAlmostEqual(testMat.nu.calc({"T": 300.0}), 17.0)
self.assertAlmostEqual(testMat.Sy.calc({"T": 300.0}), 18.0)
self.assertAlmostEqual(testMat.Su.calc({"T": 300.0}), 19.0)
self.assertAlmostEqual(testMat.Sm.calc({"T": 300.0}), 20.0)
self.assertAlmostEqual(testMat.So.calc({"T": 300.0}), 21.0)
self.assertAlmostEqual(testMat.Sa.calc({"T": 300.0}), 22.0)
self.assertAlmostEqual(testMat.St.calc({"T": 300.0}), 23.0)
self.assertAlmostEqual(testMat.Smt.calc({"T": 300.0}), 24.0)
self.assertAlmostEqual(testMat.Sr.calc({"T": 300.0}), 25.0)
self.assertAlmostEqual(testMat.TSRF.calc({"T": 300.0}), 26.0)
self.assertAlmostEqual(testMat.YSRF.calc({"T": 300.0}), 27.0)
self.assertAlmostEqual(testMat.WSRF.calc({"T": 300.0}), 28.0)
self.assertAlmostEqual(testMat.tMaxSr.calc({"T": 300.0}), 29.0)
self.assertAlmostEqual(testMat.tMaxSt.calc({"T": 300.0}), 30.0)
self.assertAlmostEqual(testMat.eps_t.calc({"T": 300.0}), 31.0)
self.assertAlmostEqual(testMat.eps_iso.calc({"T": 300.0}), 32.0)
self.assertAlmostEqual(testMat.SaFat.calc({"T": 300.0}), 33.0)
self.assertAlmostEqual(testMat.dl_l.calc({"T": 300.0}), 34.0)
self.assertAlmostEqual(testMat.nu_g.calc({"T": 300.0}), 35.0)
self.assertAlmostEqual(testMat.v_sound.calc({"T": 300.0}), 36.0)
self.assertAlmostEqual(testMat.T_sol.calc({"T": 300.0}), 37.0)
self.assertAlmostEqual(testMat.T_liq.calc({"T": 300.0}), 38.0)
self.assertAlmostEqual(testMat.dV.calc({"T": 300.0}), 39.0)
self.assertAlmostEqual(testMat.H.calc({"T": 300.0}), 40.0)
self.assertAlmostEqual(testMat.H_calc_T.calc({"T": 300.0}), 41.0)
self.assertAlmostEqual(testMat.K_IC.calc({"T": 300.0}), 42.0)
self.assertAlmostEqual(testMat.HBW.calc({"T": 300.0}), 43.0)
self.assertAlmostEqual(testMat.f.calc({"T": 300.0}), 44.0)
self.assertAlmostEqual(testMat.Kv_prime.calc({"T": 300.0}), 45.0)
self.assertAlmostEqual(testMat.S.calc({"T": 300.0}), 46.0)
self.assertAlmostEqual(testMat.Elong.calc({"T": 300.0}), 47.0)
def test_spotCheckAllPropsKwargs(self):
"""Spot check every property at least once, using kwargs."""
pathToTestYaml = path.join(path.dirname(path.realpath(__file__)), "testDir4")
testMat = loadMaterial(path.join(pathToTestYaml, "sampleProperty.yaml"))
self.assertAlmostEqual(testMat.rho.calc(T=300.0), 1.0)
self.assertAlmostEqual(testMat.c_p.calc(T=300.0), 2.0)
self.assertAlmostEqual(testMat.k.calc(T=300.0), 3.0)
self.assertAlmostEqual(testMat.alpha_d.calc(T=300.0), 4.0)
self.assertAlmostEqual(testMat.mu_d.calc(T=300.0), 5.0)
self.assertAlmostEqual(testMat.mu_k.calc(T=300.0), 6.0)
self.assertAlmostEqual(testMat.T_melt.calc(T=300.0), 7.0)
self.assertAlmostEqual(testMat.T_boil.calc(T=300.0), 8.0)
self.assertAlmostEqual(testMat.dH_vap.calc(T=300.0), 9.0)
self.assertAlmostEqual(testMat.dH_fus.calc(T=300.0), 10.0)
self.assertAlmostEqual(testMat.gamma.calc(T=300.0), 11.0)
self.assertAlmostEqual(testMat.P_sat.calc(T=300.0), 12.0)
self.assertAlmostEqual(testMat.kappa.calc(T=300.0), 13.0)
self.assertAlmostEqual(testMat.alpha_mean.calc(T=300.0), 14.0)
self.assertAlmostEqual(testMat.alpha_inst.calc(T=300.0), 15.0)
self.assertAlmostEqual(testMat.E.calc(T=300.0), 16.0)
self.assertAlmostEqual(testMat.nu.calc(T=300.0), 17.0)
self.assertAlmostEqual(testMat.Sy.calc(T=300.0), 18.0)
self.assertAlmostEqual(testMat.Su.calc(T=300.0), 19.0)
self.assertAlmostEqual(testMat.Sm.calc(T=300.0), 20.0)
self.assertAlmostEqual(testMat.So.calc(T=300.0), 21.0)
self.assertAlmostEqual(testMat.Sa.calc(T=300.0), 22.0)
self.assertAlmostEqual(testMat.St.calc(T=300.0), 23.0)
self.assertAlmostEqual(testMat.Smt.calc(T=300.0), 24.0)
self.assertAlmostEqual(testMat.Sr.calc(T=300.0), 25.0)
self.assertAlmostEqual(testMat.TSRF.calc(T=300.0), 26.0)
self.assertAlmostEqual(testMat.YSRF.calc(T=300.0), 27.0)
self.assertAlmostEqual(testMat.WSRF.calc(T=300.0), 28.0)
self.assertAlmostEqual(testMat.tMaxSr.calc(T=300.0), 29.0)
self.assertAlmostEqual(testMat.tMaxSt.calc(T=300.0), 30.0)
self.assertAlmostEqual(testMat.eps_t.calc(T=300.0), 31.0)
self.assertAlmostEqual(testMat.eps_iso.calc(T=300.0), 32.0)
self.assertAlmostEqual(testMat.SaFat.calc(T=300.0), 33.0)
self.assertAlmostEqual(testMat.dl_l.calc(T=300.0), 34.0)
self.assertAlmostEqual(testMat.nu_g.calc(T=300.0), 35.0)
self.assertAlmostEqual(testMat.v_sound.calc(T=300.0), 36.0)
self.assertAlmostEqual(testMat.T_sol.calc(T=300.0), 37.0)
self.assertAlmostEqual(testMat.T_liq.calc(T=300.0), 38.0)
self.assertAlmostEqual(testMat.dV.calc(T=300.0), 39.0)
self.assertAlmostEqual(testMat.H.calc(T=300.0), 40.0)
self.assertAlmostEqual(testMat.H_calc_T.calc(T=300.0), 41.0)
self.assertAlmostEqual(testMat.K_IC.calc(T=300.0), 42.0)
self.assertAlmostEqual(testMat.HBW.calc(T=300.0), 43.0)
self.assertAlmostEqual(testMat.f.calc(T=300.0), 44.0)
self.assertAlmostEqual(testMat.Kv_prime.calc(T=300.0), 45.0)
self.assertAlmostEqual(testMat.S.calc(T=300.0), 46.0)
self.assertAlmostEqual(testMat.Elong.calc(T=300.0), 47.0)
def test_defPropDup(self):
with self.assertRaises(KeyError):
defProp("rho", "density", "kg/m^3", "rho")
================================================
FILE: armi/matProps/tests/test_references.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the Reference."""
import unittest
from armi.matProps.reference import Reference
class TestReference(unittest.TestCase):
"""Unit tests for Reference."""
def test_str(self):
ref = Reference()
ref._ref = "REF123"
ref._type = "TYPE321"
self.assertEqual(str(ref), "REF123 (TYPE321)")
def test_getRef(self):
ref = Reference()
ref._ref = "REF234"
self.assertEqual(ref.getRef(), "REF234")
def test_getType(self):
ref = Reference()
ref._type = "TYPE789"
self.assertEqual(ref.getType(), "TYPE789")
def test_factory(self):
node = {"ref": "REF234", "type": "TYPE789"}
ref = Reference._factory(node)
self.assertEqual(str(ref), "REF234 (TYPE789)")
self.assertEqual(ref.getRef(), "REF234")
self.assertEqual(ref.getType(), "TYPE789")
================================================
FILE: armi/matProps/tests/test_symbolicFunction.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the symbolic function class."""
import copy
import math
import pickle
import unittest
import numpy as np
from armi.matProps.material import Material
class TestSymbolicFunction(unittest.TestCase):
"""Unit tests for the symbolic function class."""
def setUp(self):
self.yaml = {
"file format": "TESTS",
"material type": "Metal",
"composition": {"a": "balance"},
"density": {
"function": {
"type": "symbolic",
"X": {"min": -10, "max": 500.0},
"Y": {"min": 1.0, "max": 20.0},
"Z": {"min": -30.0, "max": -10.0},
"equation": 1.0,
}
},
}
def loadMaterial(self, num=1):
"""Loads the material file based on `self.yaml` and returns the material object."""
mat = Material()
mat.loadNode(self.yaml)
return mat
def functionTest(self, func, num=1):
"""
Takes a function as input to compare against matProps material output.
It is assumed that `self.yaml` has been updated to match the provided evaluation function.
"""
mat = self.loadMaterial(num=num)
prop = mat.rho
for x in np.linspace(prop.getMinBound("X"), prop.getMaxBound("X"), 20):
for y in np.linspace(prop.getMinBound("Y"), prop.getMaxBound("Y"), 20):
for z in np.linspace(prop.getMinBound("Z"), prop.getMaxBound("Z"), 20):
received = prop.calc({"X": x, "Y": y, "Z": z})
expected = func(x, y, z)
self.assertAlmostEqual(
received,
expected,
msg=(
f"Material property evaluation does not match for: {prop.sympyStr} at ({x}, {y}, {z}).\n"
f" Received: {received}, Expected: {expected}"
),
delta=abs(
expected / 1e8
), # very large numbers can have floating point differences at low decimal count
)
def setEqnField(self, eqn):
self.yaml["density"]["function"]["equation"] = eqn
def test_symbolicMult(self):
"""
Test multiplication operator for symbolic equations.
Four combinations of spacing and the operator are tested for multiplying a variable and a constant as well as
multiplying two variables. For each input, the property is evaluated at 20 evenly spaced points per independent
variable within the valid range.
"""
func = lambda x, y, z: x * 20
self.setEqnField("X * 20")
self.functionTest(func, 1)
self.setEqnField("X*20")
self.functionTest(func, 2)
self.setEqnField("X* 20")
self.functionTest(func, 3)
self.setEqnField("X *20")
self.functionTest(func, 4)
func = lambda x, y, z: x * y
self.setEqnField("X * Y")
self.functionTest(func, 5)
self.setEqnField("X*Y")
self.functionTest(func, 6)
self.setEqnField("X*Y")
self.functionTest(func, 7)
self.setEqnField("X *Y")
self.functionTest(func, 8)
def test_symbolicExponent(self):
"""
Test exponent operator for symbolic equations.
Four combinations of spacing and the operator are tested for raising a variable by a constant as well as raising
a constant by a constant. For each input, the property is evaluated at 20 evenly spaced points per independent
variable within the valid range.
"""
func = lambda x, y, z: x**3
self.setEqnField("X ** 3")
self.functionTest(func, 1)
self.setEqnField("X**3")
self.functionTest(func, 2)
self.setEqnField("X** 3")
self.functionTest(func, 3)
self.setEqnField("X **3")
self.functionTest(func, 4)
func = lambda x, y, z: 1.1**y
self.setEqnField("1.1 ** Y")
self.functionTest(func, 5)
self.setEqnField("1.1**Y")
self.functionTest(func, 6)
self.setEqnField("1.1** Y")
self.functionTest(func, 7)
self.setEqnField("1.1 **Y")
self.functionTest(func, 8)
def test_symbolicDiv(self):
"""
Test division operator for symbolic equations.
The four combinations of spacing and the operator are tested for dividing a variable and a constant as well as
dividing two variables. For each input, the property is evaluated at 20 evenly spaced points per independent
variable within the valid range.
"""
func = lambda x, y, z: x / 3
self.setEqnField("X / 3")
self.functionTest(func, 1)
self.setEqnField("X/3")
self.functionTest(func, 2)
self.setEqnField("X/ 3")
self.functionTest(func, 3)
self.setEqnField("X /3")
self.functionTest(func, 4)
func = lambda x, y, z: x / y
self.setEqnField("X / Y")
self.functionTest(func, 5)
self.setEqnField("X/Y")
self.functionTest(func, 6)
self.setEqnField("X/ Y")
self.functionTest(func, 7)
self.setEqnField("X /Y")
self.functionTest(func, 8)
def test_symbolicAdd(self):
"""
Test addition operator for symbolic equations.
Four combinations of spacing and the operator are tested for adding a variable and a constant as well as adding
two variables. For each input, the property is evaluated at 20 evenly spaced points per independent variable
within the valid range.
"""
func = lambda x, y, z: x + 3
self.setEqnField("X + 3")
self.functionTest(func, 1)
self.setEqnField("X+3")
self.functionTest(func, 2)
self.setEqnField("X+ 3")
self.functionTest(func, 3)
self.setEqnField("X +3")
self.functionTest(func, 4)
func = lambda x, y, z: x + y
self.setEqnField("X + Y")
self.functionTest(func, 5)
self.setEqnField("X+Y")
self.functionTest(func, 6)
self.setEqnField("X+ Y")
self.functionTest(func, 7)
self.setEqnField("X +Y")
self.functionTest(func, 8)
def test_symbolicSub(self):
"""
Test subtraction operator for symbolic equations.
Four combinations of spacing and the operator are tested for subtracting a variable and a constant as well
as subtracting two variables. For each input, the property is evaluated at 20 evenly spaced points per
independent variable within the valid range.
"""
func = lambda x, y, z: x - 3
self.setEqnField("X - 3")
self.functionTest(func, 1)
self.setEqnField("X-3")
self.functionTest(func, 2)
self.setEqnField("X- 3")
self.functionTest(func, 3)
self.setEqnField("X -3")
self.functionTest(func, 4)
func = lambda x, y, z: x - z
self.setEqnField("X - Z")
self.functionTest(func, 5)
self.setEqnField("X-Z")
self.functionTest(func, 6)
self.setEqnField("X- Z")
self.functionTest(func, 7)
self.setEqnField("X -Z")
self.functionTest(func, 8)
def test_symbolicParens(self):
"""
Test the grouping operator for symbolic equations.
Various combinations of grouping is tested with spacing on a simple addition operation. For each input, the
property is evaluated at 20 evenly spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: x + 3
self.setEqnField("(X + 3)")
self.functionTest(func, 1)
self.setEqnField("(X) + 3")
self.functionTest(func, 2)
self.setEqnField("X + (3)")
self.functionTest(func, 3)
self.setEqnField("(X) + (3)")
self.functionTest(func, 4)
self.setEqnField("(X ) + 3")
self.functionTest(func, 5)
self.setEqnField("( X) + 3")
self.functionTest(func, 6)
self.setEqnField("( X ) + 3")
self.functionTest(func, 7)
self.setEqnField("( X + 3)")
self.functionTest(func, 8)
self.setEqnField("(X + 3 )")
self.functionTest(func, 9)
def test_symbolicSine(self):
"""
Test sine operator for symbolic equations.
Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20
evenly spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: math.sin(x)
self.setEqnField("sin(X)")
self.functionTest(func, 1)
self.setEqnField("sin (X)")
self.functionTest(func, 2)
self.setEqnField("sin( X)")
self.functionTest(func, 3)
self.setEqnField("sin(X )")
self.functionTest(func, 4)
def test_symbolicCosine(self):
"""
Test cosine operator for symbolic equations.
Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly
spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: math.cos(x)
self.setEqnField("cos(X)")
self.functionTest(func, 1)
self.setEqnField("cos (X)")
self.functionTest(func, 2)
self.setEqnField("cos( X)")
self.functionTest(func, 3)
self.setEqnField("cos(X )")
self.functionTest(func, 4)
def test_symbolicTan(self):
"""
Test tangent operator for symbolic equations.
Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly
spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: math.tan(x)
self.setEqnField("tan(X)")
self.functionTest(func, 1)
self.setEqnField("tan (X)")
self.functionTest(func, 2)
self.setEqnField("tan( X)")
self.functionTest(func, 3)
self.setEqnField("tan(X )")
self.functionTest(func, 4)
def test_symbolicSinh(self):
"""
Test hyperbolic sine operator for symbolic equations.
Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly
spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: math.sinh(x)
self.setEqnField("sinh(X)")
self.functionTest(func, 1)
self.setEqnField("sinh (X)")
self.functionTest(func, 2)
self.setEqnField("sinh( X)")
self.functionTest(func, 3)
self.setEqnField("sinh(X )")
self.functionTest(func, 4)
def test_symbolicCosh(self):
"""
Test hyperbolic cosine operator for symbolic equations.
Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly
spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: math.cosh(x)
self.setEqnField("cosh(X)")
self.functionTest(func, 1)
self.setEqnField("cosh (X)")
self.functionTest(func, 2)
self.setEqnField("cosh( X)")
self.functionTest(func, 3)
self.setEqnField("cosh(X )")
self.functionTest(func, 4)
def test_symbolicTanh(self):
"""
Test hyperbolic tangent operator for symbolic equations.
Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly
spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: math.tanh(x)
self.setEqnField("tanh(X)")
self.functionTest(func, 1)
self.setEqnField("tanh (X)")
self.functionTest(func, 2)
self.setEqnField("tanh( X)")
self.functionTest(func, 3)
self.setEqnField("tanh(X )")
self.functionTest(func, 4)
def test_symbolicNatLog(self):
"""
Test natural logarithm operator for symbolic equations.
Both log and ln variations of the function name are tested. Four combinations of spacing and the operator are
tested for each function name. For each input, the property is evaluated at 20 evenly spaced points per
independent variable within the valid range.
"""
func = lambda x, y, z: math.log(y)
self.setEqnField("ln(Y)")
self.functionTest(func, 1)
self.setEqnField("ln (Y)")
self.functionTest(func, 2)
self.setEqnField("ln( Y)")
self.functionTest(func, 3)
self.setEqnField("ln(Y )")
self.functionTest(func, 4)
self.setEqnField("log(Y)")
self.functionTest(func, 5)
self.setEqnField("log (Y)")
self.functionTest(func, 6)
self.setEqnField("log( Y)")
self.functionTest(func, 7)
self.setEqnField("log(Y )")
self.functionTest(func, 8)
def test_symbolicLog10(self):
"""
Test base ten logarithm operator for symbolic equations.
Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly
spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: math.log10(y)
self.setEqnField("log10(Y)")
self.functionTest(func, 1)
self.setEqnField("log10 (Y)")
self.functionTest(func, 2)
self.setEqnField("log10( Y)")
self.functionTest(func, 3)
self.setEqnField("log10(Y )")
self.functionTest(func, 4)
def test_symbolicExp(self):
"""
Test exponential operator for symbolic equations.
Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly
spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: math.exp(y)
self.setEqnField("exp(Y)")
self.functionTest(func, 1)
self.setEqnField("exp (Y)")
self.functionTest(func, 2)
self.setEqnField("exp( Y)")
self.functionTest(func, 3)
self.setEqnField("exp(Y )")
self.functionTest(func, 4)
def test_symbolicComposition(self):
"""
Test composition of functions for symbolic equations.
Four different functions are tested that are composites of other functions. For each input, the property is
evaluated at 20 evenly spaced points per independent variable within the valid range.
"""
# Multiple functions on one side of multiplication/divide
func = lambda x, y, z: x / (math.exp(y) + z)
self.setEqnField("X / (exp(Y) + Z)")
self.functionTest(func, 1)
# Multiple functions inside trig function
func = lambda x, y, z: x * math.sin(z**y)
self.setEqnField("X * sin(Z**Y)")
self.functionTest(func, 2)
# Multiple functions inside hyperbolic function
func = lambda x, y, z: math.tanh((x + 30) ** math.cos(y) + z * 0.2)
self.setEqnField("tanh((X+30) ** cos(Y) + Z*0.2)")
self.functionTest(func, 3)
# Many sets of nested parentheses
func = lambda x, y, z: ((x / (y * z + 1.0)) + 2.5) * 10.2
self.setEqnField("((X / (Y*Z + 1.0)) + 2.5)*10.2")
self.functionTest(func, 4)
def test_symbolicOrdop(self):
"""
Test order of operations for symbolic equations.
Five different equations are evaluated that test different components of order precedence. For each input, the
property is evaluated at 20 evenly spaced points per independent variable within the valid range.
"""
# multiplication and division before addition and subtraction
func = lambda x, y, z: (x * y) + z
self.setEqnField("X * Y + Z")
self.functionTest(func, 1)
func = lambda x, y, z: x + (y * z)
self.setEqnField("X + Y * Z")
self.functionTest(func, 2)
# Left to right for same precedence operators
func = lambda x, y, z: (x * y) / z
self.setEqnField("X * Y / Z")
self.functionTest(func, 3)
# Exponents before multiplication/division
func = lambda x, y, z: ((x + 30) ** 1.1) * (y**2)
self.setEqnField("(X+30) ** 1.1 * Y ** 2")
self.functionTest(func, 4)
# Parentheses before exponents
func = lambda x, y, z: (x + 30) ** (y / 2) - z
self.setEqnField("(X+30) ** (Y/2) - Z")
self.functionTest(func, 5)
def test_symbolicWhitespace(self):
"""
Test excess whitespace is ignored for symbolic equations.
Two different equations are evaluated with varying amounts of whitespace introduced to ensure they produce the
same results. For each input, the property is evaluated at 20 evenly spaced points per independent variable
within the valid range of the property.
"""
func = lambda x, y, z: x + y + z
self.setEqnField(" X + Y + Z")
self.functionTest(func, 1)
self.setEqnField(" X + Y + Z")
self.functionTest(func, 2)
self.setEqnField("X + Y + Z")
self.functionTest(func, 3)
self.setEqnField("X + Y + Z")
self.functionTest(func, 4)
self.setEqnField(" X + Y + Z")
self.functionTest(func, 5)
func = lambda x, y, z: math.sin(x) * y + z
self.setEqnField("sin (X) * Y + Z")
self.functionTest(func, 6)
self.setEqnField(" sin( X ) * Y + Z")
self.functionTest(func, 7)
self.setEqnField("sin(X ) * Y + Z")
self.functionTest(func, 8)
def test_symbolicIntFloat(self):
"""
Test handling of integers and floats for symbolic equations.
Multiple equations are tested that verify that when integers are used in equations they do not result in integer
multiplication and division in Python and are instead treated as floating point numbers. For each input, the
property is evaluated at 20 evenly spaced points per independent variable within the valid range.
"""
func = lambda x, y, z: x / 2.0 + 3.0
self.setEqnField("X / 2 + 3")
self.functionTest(func, 1)
self.setEqnField("X / 2.0 + 3.0")
self.functionTest(func, 2)
func = lambda x, y, z: (x + 30) ** (4.0 / 3.0)
self.setEqnField("(X + 30) ** (4/3)")
self.functionTest(func, 3)
self.setEqnField("(X + 30) ** (4.0/3.0)")
self.functionTest(func, 4)
def test_symbolicBadParens(self):
"""
Test unbalanced parentheses results in errors for symbolic equations.
Multiple equations are tested that verify that various combinations of unbalanced parentheses are detected and
result in an error when parsing the input. Additionally, an expression with extraneous but balanced parentheses
is tested for correctness. For that input, the property is evaluated at 20 evenly spaced points per independent
variable within the valid range.
"""
with self.assertRaises(ValueError):
self.setEqnField("(X + Y")
self.loadMaterial(num=1)
with self.assertRaises(ValueError):
self.setEqnField("((X) + Y")
self.loadMaterial(num=2)
with self.assertRaises(ValueError):
self.setEqnField("(X) + Y)")
self.loadMaterial(num=3)
with self.assertRaises(ValueError):
self.setEqnField("exp(X")
self.loadMaterial(num=4)
with self.assertRaises(ValueError):
self.setEqnField("exp X")
self.loadMaterial(num=5)
with self.assertRaises(ValueError):
self.setEqnField("(((((X + Y)))) + (Z)))")
self.loadMaterial(num=6)
# Test extraneous parentheses as well
func = lambda x, y, z: x + y + z
self.setEqnField("(((((X + Y)))) + (Z))")
self.functionTest(func, 7)
def test_symbolicUndefined(self):
"""
Test that undefined functions results in errors for symbolic equations.
A logarithmic function is evaluated at two points in the valid range to show that the material input is parsed
correctly. The function is then evaluated at a value that results in a negative expression inside the logarithm
which is undefined.
"""
self.setEqnField("ln(X)")
mat = self.loadMaterial(num=1)
prop = mat.rho
self.assertAlmostEqual(prop.calc({"X": 3, "Y": 3, "Z": -20}), math.log(3))
self.assertAlmostEqual(prop.calc({"X": 100, "Y": 3, "Z": -20}), math.log(100))
with self.assertRaises(ValueError):
prop.calc({"X": -5, "Y": 3, "Z": -20})
def test_symbolicCaps(self):
"""
Test bad capitalization results in errors for symbolic equations.
Multiple equations are tested that verify that various combinations of capitalization are detected and result in
an error when parsing the inputs.
"""
with self.assertRaises(ValueError):
self.setEqnField("x + Y")
self.loadMaterial(num=1)
with self.assertRaises(ValueError):
self.setEqnField("TAN(X) + Y")
self.loadMaterial(num=2)
with self.assertRaises(ValueError):
self.setEqnField("Tan(X) + Y")
self.loadMaterial(num=3)
with self.assertRaises(ValueError):
self.setEqnField("eXP(X) + Y")
self.loadMaterial(num=4)
def test_symbolicImpmult(self):
"""
Test implicit multiplication results in errors for symbolic equations.
Multiple equations are tested that verify that various combinations of implicit multiplication are detected and
result in an error when parsing the inputs.
"""
with self.assertRaises(ValueError):
self.setEqnField("2 X")
self.loadMaterial(num=1)
with self.assertRaises(ValueError):
self.setEqnField("X 2")
self.loadMaterial(num=2)
with self.assertRaises(ValueError):
self.setEqnField("2X")
self.loadMaterial(num=3)
with self.assertRaises(ValueError):
self.setEqnField("2(X)")
self.loadMaterial(num=4)
with self.assertRaises(ValueError):
self.setEqnField("X(2)")
self.loadMaterial(num=5)
with self.assertRaises(ValueError):
self.setEqnField("X (2)")
self.loadMaterial(num=6)
with self.assertRaises(ValueError):
self.setEqnField("2 sin(X)")
self.loadMaterial(num=7)
def test_symbolicVarVar(self):
"""
Test repeat variables for symbolic equations.
Multiple equations are tested that verify that various combinations of repeat variable usage evaluate correctly.
For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid
range.
"""
func = lambda x, y, z: x * x + y / x + z * x
self.setEqnField("X * X + Y / X + Z * X")
self.functionTest(func, 1)
func = lambda x, y, z: math.tan(x * y) + math.cos(x * y) + math.exp(z * y)
self.setEqnField("tan(X * Y) + cos(X * Y) + exp(Z * Y)")
self.functionTest(func, 2)
def test_symbolicScientific(self):
"""
Test scientific notation for symbolic equations.
Multiple equations are tested that verify that various combinations of both upper and lower case scientific
notation evaluate correctly. For each input, the property is evaluated at 20 evenly spaced points per
independent variable within the valid range.
"""
# Test upper case E
func = lambda x, y, z: 3e5 / x
self.setEqnField("3E5 / X")
self.functionTest(func, 1)
func = lambda x, y, z: 1.23e-3 * x
self.setEqnField("1.23E-3 * X")
self.functionTest(func, 2)
# Test lower case e
func = lambda x, y, z: 3e5 / x
self.setEqnField("3e5 / X")
self.functionTest(func, 3)
func = lambda x, y, z: 1.23e-3 * x
self.setEqnField("1.23e-3 * X")
self.functionTest(func, 4)
def test_symbolicExamples(self):
"""Test a handful of complicated symbolic equations."""
# example 1
func = lambda x, y, z: 10**5 * (
(-540 / (1 + math.exp(-0.02 * (x - 220))) + 520) + (-120 / (1 + math.exp(-0.02 * (x - 122))) + 92)
)
self.setEqnField("10**5*((-540/(1+exp(-0.02*(X-220)))+ 520)+ (-120/(1+exp(-0.02*(X-122)))+ 92))")
self.functionTest(func, 1)
# example 2
func = lambda x, y, z: 222.0 + 225.2 * (1 - (x + 273.15) / 2500) + 512.2 * (1 - (x + 273.15) / 2502) ** 0.5
self.setEqnField("222.0 + 225.2 * (1 - (X + 273.15) / 2500) + 512.2 * (1 - (X + 273.15) / 2502) ** 0.5")
self.functionTest(func, 2)
# example 3
func = lambda x, y, z: (2.2e11 - 7.2e6 * x - 4.2e2 * x**2) * (y / (4.2 - 2.2 * y))
self.setEqnField("(2.2E11 - 7.2E6 * X - 4.2E2 * X**2) * (Y / (4.2 - 2.2 * Y))")
self.functionTest(func, 3)
def test_symbolicBadparse(self):
"""Test incorrect expressions results in errors for symbolic equations."""
# Not a math equation
self.setEqnField("Not an equation")
with self.assertRaises(ValueError):
self.loadMaterial(num=1)
# Unknown variable
self.setEqnField("X + Y + W")
with self.assertRaises(ValueError):
self.loadMaterial(num=2)
# Missing an operator
self.setEqnField("X Y")
with self.assertRaises(ValueError):
self.loadMaterial(num=3)
# Missing equation field
del self.yaml["density"]["function"]["equation"]
with self.assertRaises(KeyError):
self.loadMaterial(num=4)
def test_pickleSymbolicFunction(self):
"""Downstream usages might need to pickle a material. Ensure symbolic expression can be pickled."""
self.setEqnField("X + Y")
mat = self.loadMaterial()
stream = pickle.dumps(mat)
mat2 = pickle.loads(stream)
self.assertEqual(mat.rho.getMinBound("X"), mat2.rho.getMinBound("X"))
self.assertEqual(
mat.rho.calc({"X": 0.0, "Y": 10, "Z": -10}),
mat2.rho.calc({"X": 0.0, "Y": 10, "Z": -10}),
)
self.assertEqual(
mat.rho.calc({"X": 300.0, "Y": 15, "Z": -10}),
mat2.rho.calc({"X": 300.0, "Y": 15, "Z": -10}),
)
def test_numpyEvals(self):
"""Test that numpy floats and integers work in evaluations same as integers and floats."""
self.setEqnField("X * 2.0")
mat = self.loadMaterial()
func = lambda x: x * 2
self.assertAlmostEqual(mat.rho.calc(X=np.float64(10), Y=5.0, Z=-10.0), func(10))
self.assertAlmostEqual(mat.rho.calc(X=np.int64(10), Y=5.0, Z=-10.0), func(10))
def test_largeExponentials(self):
"""Test that exponentials don't overflow."""
# If sympy is allowed to simplify this expression it will try to evaluate e^-1400 which will overflow. The
# remainder of the values are chosen just to get a reasonable magnitude expression based on the min/max bounds
# for X/Y.
self.setEqnField("exp(-1400.0 + 2.6*(X*0.1+30*Y))")
mat = self.loadMaterial()
func = lambda x, y: math.exp(-1400 + 2.6 * (x * 0.1 + 30 * y))
self.assertAlmostEqual(mat.rho.calc(X=300, Y=5.0, Z=-10.0), func(300, 5))
def test_symbolicOutofbounds(self):
"""Test evaluation outside of bounds results in ValueError for symbolic equations."""
mat = self.loadMaterial()
prop = mat.rho
mins = [prop.getMinBound(var) for var in ["X", "Y", "Z"]]
maxs = [prop.getMaxBound(var) for var in ["X", "Y", "Z"]]
for i in range(3):
minsEdited = copy.copy(mins)
maxsEdited = copy.copy(maxs)
minsEdited[i] -= 0.1
maxsEdited[i] += 0.1
with self.assertRaises(ValueError):
prop.calc({"X": minsEdited[0], "Y": minsEdited[1], "Z": minsEdited[2]})
with self.assertRaises(ValueError):
prop.calc({"X": maxsEdited[0], "Y": maxsEdited[1], "Z": maxsEdited[2]})
class TestBrokenSymbolicFunctions(unittest.TestCase):
def test_complexNumbers(self):
yaml = {
"file format": "TESTS",
"material type": "Metal",
"composition": {"a": "balance"},
"density": {
"function": {
"type": "symbolic",
"X": {"min": -10, "max": 500.0},
"Y": {"min": 1.0, "max": 20.0},
"Z": {"min": -30.0, "max": -10.0},
"equation": 1.0,
}
},
}
mat = Material()
mat.loadNode(yaml)
# stomp all over the equation, to force it to return a complex number
mat.rho.eqn = eval("lambda x, y, z: 1.0 + 2.0j")
with self.assertRaises(ValueError):
mat.rho._calcSpecific({"X": 1, "Y": 2, "Z": -20})
def test_isNan(self):
yaml = {
"file format": "TESTS",
"material type": "Metal",
"composition": {"a": "balance"},
"density": {
"function": {
"type": "symbolic",
"X": {"min": -10, "max": 500.0},
"Y": {"min": 1.0, "max": 20.0},
"Z": {"min": -30.0, "max": -10.0},
"equation": 1.0,
}
},
}
mat = Material()
mat.loadNode(yaml)
# stomp all over the equation, to force it to return a complex number
mat.rho.eqn = eval("lambda x, y, z: math.nan")
with self.assertRaises(ValueError):
mat.rho._calcSpecific({"X": 1, "Y": 2, "Z": -20})
================================================
FILE: armi/matProps/tests/test_tableFunctions.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests 1D and 2D table Functions."""
import numpy as np
from armi.matProps.tableFunction2D import TableFunction2D
from armi.matProps.tests import MatPropsFunTestBase
class TestTableFunctions(MatPropsFunTestBase):
"""Tests 1D and 2D table Functions."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.baseOneDimTableData = {"type": "table", "T": 0}
cls.baseOneDimTable = [[0.0, 5.0], [100.0, 105.0]]
cls.baseTwoDimTableData = {
"type": "two dimensional table",
"T": 0,
"t": 1,
}
cls.baseTwoDimTable = [
[None, [2.0, 200.0, 632.4555]],
[1.0, [10.0, 208.0, 640.4555]],
[100.0, [110.0, 308.0, 740.4555]],
[316.2278, [135, 333, 765.4555]],
]
def test_interpolation1Dtable(self):
"""Test interpolation for a two-point one-dimensional table."""
mat = self._createFunction(self.baseOneDimTableData, self.baseOneDimTable)
mat.name = self.testName
self.assertEqual(str(mat), f">")
func = mat.rho
self.assertIn("TableFunction1D", str(func))
for index in range(9):
val = float(index) * 12.5
self.assertAlmostEqual(func.calc({"T": np.float64(val)}), 5.0 + val)
self.assertAlmostEqual(func.calc({"T": val}), 5.0 + val)
# directly check error is correctly raised if the variable is unknown
with self.assertRaises(ValueError):
func._calcSpecific({"X": 1})
def test_interpolation1DtableMissnode(self):
"""Test to make sure a KeyError is thrown if 'tabulated data' node is absent."""
with self.assertRaisesRegex(KeyError, "tabulated data"):
self._createFunctionWithoutTable(self.baseOneDimTableData)
def test_interpolation1Dtable2(self):
"""Test interpolation for a many-point one-dimensional table."""
data = {"type": "table", "T": {"min": 900, "max": 250}}
tableData = [
[250, 25.68],
[300, 25.97],
[400, 26.28],
[500, 26.26],
[600, 25.89],
[700, 25.19],
[759.7, 24.61],
[800, 25.10],
[900, 26.32],
]
mat = self._createFunction(data, tableData)
mat.name = self.testName
self.assertEqual(str(mat), f">")
self.assertAlmostEqual(mat.rho.calc(T=250), 25.68)
self.assertAlmostEqual(mat.rho.calc(T=275), 25.825)
self.assertAlmostEqual(mat.rho.calc(T=500), 26.26)
self.assertAlmostEqual(mat.rho.calc(T=512.5), 26.21375)
self.assertAlmostEqual(mat.rho.calc(T=729.7), 24.9014572864322)
self.assertAlmostEqual(mat.rho.calc(T=759.7), 24.61)
with self.assertRaises(ValueError):
mat.rho.calc(T=999)
# bonus test of method to clear table data
self.assertIsNotNone(mat.rho.tableData)
mat.rho.clear()
self.assertIsNone(mat.rho.tableData)
def test_interpolation1DtableInt(self):
"""Test interpolation for one-dimensional tables with all integer values."""
tableData = [
[250, 5],
[300, 6],
[400, 7],
[500, 8],
[600, 9],
[700, 10],
[800, 11],
[900, 12],
]
mat = self._createFunction(self.baseOneDimTableData, tableData, minT=250, maxT=900)
mat.name = self.testName
self.assertEqual(str(mat), f">")
self.assertAlmostEqual(mat.rho.calc(T=275), 5.5)
self.assertAlmostEqual(mat.rho.calc(T=312.5), 6.125)
def test_interpolationTable2D(self):
"""Test that evaluates TableFunction2D for different combinations of integer and floating values."""
mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)
mat.name = self.testName
self.assertEqual(str(mat), f">")
func = mat.rho
self.assertIn("TableFunction2D", str(func))
self.assertAlmostEqual(func.calc({"T": 2, "t": 1}), 10)
self.assertAlmostEqual(func.calc({"T": 2, "t": 100.0}), 110)
self.assertAlmostEqual(func.calc({"T": 200, "t": 1}), 208)
self.assertAlmostEqual(func.calc({"T": 200, "t": 100}), 308)
self.assertAlmostEqual(func.calc({"T": 100, "t": 1}), 108)
self.assertAlmostEqual(func.calc({"T": 100, "t": 100}), 208)
self.assertAlmostEqual(func.calc({"T": 2, "t": 10}), 60)
self.assertAlmostEqual(func.calc({"T": 100, "t": 10}), 158)
self.assertAlmostEqual(func.calc({"T": 2, "t": 316.2278}), 135)
self.assertAlmostEqual(func.calc({"T": 632.4555, "t": 1}), 640.4555)
self.assertAlmostEqual(func.calc({"T": 200, "t": 316.2278}), 333)
self.assertAlmostEqual(func.calc({"T": 632.4555, "t": 100}), 740.4555)
self.assertAlmostEqual(func.calc({"T": 632.4555, "t": 316.2278}), 765.4555)
self.assertAlmostEqual(func.calc({"T": 200, "t": 177.828}), 320.500006)
self.assertAlmostEqual(func.calc({"T": 355.6559, "t": 100}), 463.6559)
self.assertAlmostEqual(func.calc({"T": 355.6559, "t": 177.828}), 476.155906)
def test_interpolationTable2DMissNode(self):
"""Test to make sure TableFunction2D throws a KeyError if 'tabulated data' node is absent."""
with self.assertRaisesRegex(KeyError, "tabulated data"):
self._createFunctionWithoutTable(self.baseTwoDimTableData)
def test_inputCheckTable2Doutbounds(self):
"""Ensure a ValueError is thrown when evaluating out of the valid bounds."""
mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)
func = mat.rho
with self.assertRaises(ValueError):
func.calc({"T": 1.99, "t": 1.0})
with self.assertRaises(ValueError):
func.calc({"T": 632.4655, "t": 1.0})
with self.assertRaises(ValueError):
func.calc({"T": 2.0, "t": 0.99})
with self.assertRaises(ValueError):
func.calc({"T": 2.0, "t": 316.2378})
def test_inputCheckTableMinVar(self):
"""Test to make sure an error is raised when attempting to evaluate below the valid range."""
self.belowMinimumCheck(self.baseOneDimTableData, self.baseOneDimTable)
def test_inputCheckTableMaxVar(self):
"""Test to make sure an error is raised when attempting to evaluate above the valid range."""
self.aboveMaximumCheck(self.baseOneDimTableData, self.baseOneDimTable)
def test_inputCheckTable2DMinVar1(self):
"""Test to make sure an error is raised when attempting to evaluate below the valid range."""
mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)
func = mat.rho
with self.assertRaises(ValueError):
func.calc({"T": 1, "t": 50})
def test_inputCheckTable2DMaxVar1(self):
"""Test to make sure an error is raised when attempting to evaluate above the valid range."""
mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)
func = mat.rho
with self.assertRaises(ValueError):
func.calc({"T": 650, "t": 50})
def test_inputCheckTable2DMinVar2(self):
"""Ensure an ValueError is raised when evaluating below the valid range."""
mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)
func = mat.rho
with self.assertRaises(ValueError):
func.calc({"T": 1, "t": 0})
def test_table2DsetBounds(self):
mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)
fun = mat.rho
# staring values
self.assertEqual(fun.independentVars["T"], (2.0, 632.4555))
self.assertEqual(fun.independentVars["t"], (1.0, 316.2278))
# calling _setBounds will wipe out the "t" variable, but not update "T"
fun._columnValues = [123, 987]
fun._setBounds(0, "T")
self.assertEqual(fun.independentVars["T"], (2.0, 632.4555))
with self.assertRaises(KeyError):
fun.independentVars["t"]
# Here we update "T" with new column values
fun._columnValues = [123, 987]
fun._setBounds(0, "X")
self.assertEqual(fun.independentVars["X"], (123.0, 987.0))
# Here we update the new variable "X" with new row values
fun._rowValues = [11, 99]
fun._setBounds(1, "X")
self.assertEqual(fun.independentVars["T"], (2.0, 632.4555))
self.assertEqual(fun.independentVars["X"], (11.0, 99.0))
with self.assertRaises(KeyError):
fun.independentVars["t"]
# Bad inputs
with self.assertRaises(ValueError):
fun._setBounds(2, "X")
def test_inputCheckTable2DMaxVar2(self):
"""Ensure an ValueError is raised when evaluating above the valid range."""
mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)
func = mat.rho
with self.assertRaises(ValueError):
func.calc({"T": 1, "t": 1000})
def test_calcSpec2dEdgeCase(self):
f = TableFunction2D("mat", "prop")
f.independentVars = {"T": (250.0, 800.0), "t": (1, 3)}
# This should fail correctly when given a bad input param
with self.assertRaises(ValueError):
f._calcSpecific({"Pa": 1.0})
================================================
FILE: armi/materials/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The material package defines compositions and material-specific properties.
Properties in scope include temperature dependent thermo/mechanical properties
(like heat capacity, linear expansion coefficients, viscosity, density),
and material-specific nuclear properties that can't exist at the nuclide level
alone (like :py:mod:`thermal scattering laws `).
As the fundamental macroscopic building blocks of any physical object,
these are highly important to reactor analysis.
This module handles the dynamic importing of all the materials defined here at the
framework level as well as in all the attached plugins. It is expected that most teams
will have special material definitions that they will want to define.
It may also make sense in the future to support user-input materials that are not
hard-coded into the app.
The base class for all materials is in :py:mod:`armi.materials.material`.
"""
import importlib
import inspect
import pkgutil
from typing import List
from armi.materials.material import Material
# This will frequently be updated by the CONF_MATERIAL_NAMESPACE_ORDER setting
# during reactor construction (see armi.reactor.reactors.factory).
_MATERIAL_NAMESPACE_ORDER = ["armi.materials"]
def setMaterialNamespaceOrder(order):
"""
Set the material namespace order at the Python interpreter, global level.
.. impl:: Material collections are defined with an order of precedence in the case
of duplicates.
:id: I_ARMI_MAT_ORDER
:implements: R_ARMI_MAT_ORDER
An ARMI application will need materials. Materials can be imported from
any code the application has access to, like plugin packages. This leads to
the situation where one ARMI application will want to import multiple
collections of materials. To handle this, ARMI keeps a list of material
namespaces. This is an ordered list of importable packages that ARMI
can search for a particular material by name.
This automatic exploration of an importable package saves the user the
tedium have having to import or include hundreds of materials manually somehow.
But it comes with a caveat; the list is ordered. If two different namespaces in
the list include a material with the same name, the first one found in the list
is chosen, i.e. earlier namespaces in the list have precedence.
"""
global _MATERIAL_NAMESPACE_ORDER
_MATERIAL_NAMESPACE_ORDER = order
def importMaterialsIntoModuleNamespace(path, name, namespace, updateSource=None):
"""
Import all Material subclasses into the top subpackage.
This allows devs to use ``from armi.materials import HT9``
This can be used in plugins for similar purposes.
.. warning::
Do not directly import materials from this namespace in code. Use the full module
import instead. This is just for material resolution. This will be replaced with a more
formal material registry in the future.
Parameters
----------
path : str
Path to package/module being imported
name : str
module name
namespace : dict
The namespace
updateSource : str, optional
Change DATA_SOURCE on import to a different string.
Useful for saying where plugin materials are coming from.
"""
for _modImporter, modname, _ispkg in pkgutil.walk_packages(path=path, prefix=name + "."):
if "test" not in modname:
mod = importlib.import_module(modname)
for item, obj in mod.__dict__.items():
try:
if issubclass(obj, Material):
namespace[item] = obj
if updateSource:
obj.DATA_SOURCE = updateSource
except TypeError:
# some non-class local
pass
importMaterialsIntoModuleNamespace(__path__, __name__, globals())
def iterAllMaterialClassesInNamespace(namespace):
"""
Iterate over all Material subclasses found in a namespace.
Notes
-----
Useful for testing.
"""
for obj in namespace.__dict__.values():
if inspect.isclass(obj):
if issubclass(obj, Material):
yield obj
def resolveMaterialClassByName(name: str, namespaceOrder: List[str] = None):
"""
Find the first material class that matches a name in an ordered namespace.
Names can either be fully resolved class paths (e.g. ``armi.materials.uZr:UZr``)
or simple class names (e.g. ``UZr``). In the latter case, the
``CONF_MATERIAL_NAMESPACE_ORDER`` setting to allows users to choose which
particular material of a common name (like UO2 or HT9) gets used.
Input files usually specify a material like UO2. Which particular implementation
gets used (Framework's UO2 vs. a user plugins UO2 vs. the Kentucky Transportation
Cabinet's UO2) is up to the user at runtime.
.. impl:: Materials can be searched across packages in a defined namespace.
:id: I_ARMI_MAT_NAMESPACE
:implements: R_ARMI_MAT_NAMESPACE
During the runtime of an ARMI application, but particularly during the
construction of the reactor in memory, materials will be requested by name. At
that point, this code is called to search for that material name. The search
goes through the ordered list of Python namespaces provided. The first time an
instance of that material is found, it is returned. In this way, the first
items in the material namespace list take precedence.
When a material name is passed to this function, it may be either a simple
name like the string ``"UO2"`` or it may be much more specific, like
``armi.materials.uraniumOxide:UO2``.
Parameters
----------
name : str
The material class name to find, e.g. ``"UO2"``. Optionally, a module path
and class name can be provided with a colon separator as ``module:className``,
e.g. ``armi.materials.uraniumOxide:UO2`` for direct specification.
namespaceOrder : list of str, optional
A list of namespaces in order of preference in which to search for the
material. If not passed, the value in the global ``MATERIAL_NAMESPACE_ORDER``
will be used, which is often set by the ``CONF_MATERIAL_NAMESPACE_ORDER``
setting (e.g. during reactor construction). Any value passed into this argument
will be ignored if the ``name`` is provided with a ``modulePath``.
Returns
-------
matCls : armi.materials.material.Material
The material
Raises
------
KeyError
When material of name cannot be found in namespaces.
Examples
--------
>>> resolveMaterialClassByName("UO2", ["something.else.materials", "armi.materials"])
See Also
--------
armi.reactor.reactors.factory
Applies user settings to default namespace order.
"""
if ":" in name:
# assume direct package path like `armi.materials.uZr:UZr`
modPath, clsName = name.split(":")
mod = importlib.import_module(modPath)
return getattr(mod, clsName)
namespaceOrder = namespaceOrder or _MATERIAL_NAMESPACE_ORDER
for namespace in namespaceOrder:
mod = importlib.import_module(namespace)
if hasattr(mod, name):
return getattr(mod, name)
raise KeyError(
f"Cannot find material named `{name}` in any of: {str(namespaceOrder)}. "
"Please update inputs or plugins. See CONF_MATERIAL_NAMESPACE_ORDER setting."
)
================================================
FILE: armi/materials/air.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple air material.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials import material
from armi.utils.units import G_PER_CM3_TO_KG_PER_M3, getTk
class Air(material.Fluid):
"""
Dry, Near Sea Level.
Correlations based off of values in Incropera, Frank P., et al.
Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002.
Elemental composition from PNNL-15870 Rev. 1
https://www.pnnl.gov/main/publications/external/technical_reports/PNNL-15870Rev1.pdf
"""
"""
temperature ranges based on where values are more than 1% off of reference
"""
propertyValidTemperature = {
"pseudoDensity": ((100, 2400), "K"),
"heat capacity": ((100, 1300), "K"),
"thermal conductivity": ((200, 850), "K"),
}
def setDefaultMassFracs(self):
"""
Set mass fractions.
Notes
-----
Mass fraction reference McConn, Ronald J., et al. Compendium of
material composition data for radiation transport modeling. No.
PNNL-15870 Rev. 1. Pacific Northwest National Lab.(PNNL), Richland,
WA (United States), 2011.
https://www.pnnl.gov/main/publications/external/technical_reports/PNNL-15870Rev1.pdf
"""
self.setMassFrac("C", 0.000124)
self.setMassFrac("N", 0.755268)
self.setMassFrac("O", 0.231781)
self.setMassFrac("AR", 0.012827)
def pseudoDensity(
self,
Tk=None,
Tc=None,
):
"""
Returns density of Air in g/cc.
This is from Table A.4 in
Fundamentals of Heat and Mass Transfer Incropera, DeWitt
Parameters
----------
Tk : float, optional
temperature in degrees Kelvin
Tc : float, optional
temperature in degrees Celsius
Notes
-----
In ARMI, we define pseudoDensity() and density() as the same for Fluids.
Returns
-------
density : float
mass density in g/cc
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("pseudoDensity", Tk)
inv_Tk = 1.0 / Tk
rho_kgPerM3 = 1.15675e03 * inv_Tk**2 + 3.43413e02 * inv_Tk + 2.99731e-03
return rho_kgPerM3 / G_PER_CM3_TO_KG_PER_M3
def specificVolumeLiquid(self, Tk=None, Tc=None):
"""Returns the liquid specific volume in m^3/kg of this material given Tk in K or Tc in C."""
return 1 / (1000.0 * self.pseudoDensity(Tk, Tc))
def thermalConductivity(self, Tk=None, Tc=None):
"""
Returns thermal conductivity of Air in g/cc.
This is from Table A.4 in Fundamentals of Heat and Mass Transfer
Incropera, DeWitt
Parameters
----------
Tk : float, optional
temperature in degrees Kelvin
Tc : float, optional
temperature in degrees Celsius
Returns
-------
thermalConductivity : float
thermal conductivity in W/m*K
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tk)
thermalConductivity = 2.13014e-08 * Tk**3 - 6.31916e-05 * Tk**2 + 1.11629e-01 * Tk - 2.00043e00
return thermalConductivity * 1e-3
def heatCapacity(self, Tk=None, Tc=None):
"""
Returns heat capacity of Air in g/cc.
This is from Table A.4 in Fundamentals of Heat and Mass Transfer
Incropera, DeWitt
Parameters
----------
Tk : float, optional
temperature in degrees Kelvin
Tc : float, optional
temperature in degrees Celsius
Returns
-------
heatCapacity : float
heat capacity in J/kg*K
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tk)
return (
sum(
[
+1.38642e-13 * Tk**4,
-6.47481e-10 * Tk**3,
+1.02345e-06 * Tk**2,
-4.32829e-04 * Tk,
+1.06133e00,
]
)
* 1000.0
) # kJ / kg K to J / kg K
================================================
FILE: armi/materials/alloy200.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Alloy-200 are wrought commercially pure nickel.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from numpy import interp
from armi.materials.material import Material
from armi.utils.units import getTk
class Alloy200(Material):
references = {
"linearExpansion": [
"Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf"
],
"refDens": ["Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf"],
"referenceMaxPercentImpurites": [
"Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf"
],
}
modelConst = {
"a0": 1.21620e-5,
"a1": 8.30010e-9,
"a2": -3.94985e-12,
"TRefa": 20, # Constants for thermal expansion
}
propertyValidTemperature = {"linear expansion": ((73.15, 1273.15), "K")}
referenceMaxPercentImpurites = [
("C", 0.15),
("MN", 0.35),
("S", 0.01),
("SI", 0.35),
("CU", 0.25),
("FE", 0.40),
]
linearExpansionTableK = [
73.15,
173.15,
373.15,
473.15,
573.15,
673.15,
773.15,
873.15,
973.15,
1073.15,
1173.15,
1273.15,
]
linearExpansionTable = [
10.1e-6,
11.3e-6,
13.3e-6,
13.9e-6,
14.3e-6,
14.8e-6,
15.2e-6,
15.6e-6,
15.8e-6,
16.2e-6,
16.5e-6,
16.7e-6,
]
def linearExpansion(self, Tk=None, Tc=None):
r"""
Returns instantaneous coefficient of thermal expansion of Alloy 200.
Parameters
----------
Tk : float, optional
temperature in degrees Kelvin
Tc : float, optional
temperature in degrees Celsius
Returns
-------
linearExpansion : float
instantaneous coefficient of thermal expansion of Alloy 200 (1/C)
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tk)
return interp(Tk, self.linearExpansionTableK, self.linearExpansionTable)
def setDefaultMassFracs(self):
"""
Notes
-----
It is assumed half the max composition for the impurities and the rest is Ni.
"""
nickleMassFrac = 1.0
for elementSymbol, massFrac in self.referenceMaxPercentImpurites:
assumedMassFrac = massFrac * 0.01 / 2.0
self.setMassFrac(elementSymbol, assumedMassFrac)
nickleMassFrac -= assumedMassFrac
self.setMassFrac("NI", nickleMassFrac)
self.refDens = 8.9
================================================
FILE: armi/materials/b4c.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Boron carbide; a very typical reactor control material.
Note that this material defaults to a theoretical density fraction of 0.9, reflecting the difficulty of producing B4C at
100% theoretical density in real life. To get different fraction, use the `TD_frac` material modification in your
assembly definition.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi import runLog
from armi.materials import material
from armi.utils.units import getTc
class B4C(material.Material):
DEFAULT_MASS_DENSITY = 2.52
DEFAULT_THEORETICAL_DENSITY_FRAC = 0.90
enrichedNuclide = "B10"
NATURAL_B10_NUM_FRAC = 0.199
propertyValidTemperature = {"linear expansion percent": ((25, 600), "C")}
def __init__(self):
self.b10NumFrac = self.NATURAL_B10_NUM_FRAC
super().__init__()
def applyInputParams(self, B10_wt_frac=None, theoretical_density=None, TD_frac=None, *args, **kwargs):
if B10_wt_frac is not None:
# we can't just use the generic enrichment adjustment here because the
# carbon has to change with enrich.
self.adjustMassEnrichment(B10_wt_frac)
if theoretical_density is not None:
runLog.warning(
"The 'theoretical_density' material modification for B4C will be "
"deprecated. Update your inputs to use 'TD_frac' instead.",
single=True,
)
if TD_frac is not None:
runLog.warning(
f"Both 'theoretical_density' and 'TD_frac' are specified for {self}. 'TD_frac' will be used."
)
else:
self.updateTD(theoretical_density)
if TD_frac is not None:
self.updateTD(TD_frac)
def updateTD(self, td: float) -> None:
self.theoreticalDensityFrac = td
self.clearCache()
def setNewMassFracsFromMassEnrich(self, massEnrichment):
r"""
Calculate the mass fractions for a given mass enrichment and set it on any parent.
Parameters
----------
massEnrichment : float
The mass enrichment as a fraction.
Returns
-------
boron10MassGrams, boron11MassGrams, carbonMassGrams : float
The resulting mass of each nuclide/element
Notes
-----
B-10: 10.012 g/mol
B-11: 11.009 g/mol
Carbon: 12.0107 g/mol
4 moles of boron/1 mole of carbon
grams of boron-10 = 10.012 g/mol* 4 mol * 0.199 = 7.969552 g
grams of boron-11 = 11.009 g/mol* 4 mol * 0.801 = 35.272836 g
grams of carbon= 12.0107 g/mol * 1 mol = 12.0107 g
from number enrichment mi:
mB10 = nB10*AB10 /(nB10*AB10 + nB11*AB11)
"""
if massEnrichment < 0 or massEnrichment > 1:
raise ValueError(f"massEnrichment {massEnrichment} is unphysical for B4C")
nb = self.parent.nuclideBases if self.parent else None
if nb is None:
b10AtomicMass = 10.01293728
b11AtomicMass = 11.0093054803
cAtomicMass = 12.011137118560828
else:
b10AtomicMass = nb.byName["B10"].weight
b11AtomicMass = nb.byName["B11"].weight
cAtomicMass = nb.byName["C"].weight
b10NumEnrich = (massEnrichment / b10AtomicMass) / (
massEnrichment / b10AtomicMass + (1 - massEnrichment) / b11AtomicMass
)
b11NumEnrich = 1.0 - b10NumEnrich
boron10MassGrams = b10AtomicMass * b10NumEnrich * 4.0
boron11MassGrams = b11AtomicMass * b11NumEnrich * 4.0
carbonMassGrams = cAtomicMass
gTotal = boron10MassGrams + boron11MassGrams + carbonMassGrams
boron10MassGrams /= gTotal
boron11MassGrams /= gTotal
carbonMassGrams /= gTotal
if self.parent:
self.parent.setMassFracs({"B10": boron10MassGrams, "B11": boron11MassGrams, "C": carbonMassGrams})
return boron10MassGrams, boron11MassGrams, carbonMassGrams
def setDefaultMassFracs(self) -> None:
r"""B4C mass fractions. Using Natural B4C. 19.9% B-10/ 80.1% B-11
Boron: 10.811 g/mol
Carbon: 12.0107 g/mol.
4 moles of boron/1 mole of carbon
grams of boron-10 = 10.01 g/mol* 4 mol * 0.199 = 7.96796 g
grams of boron-11 = 11.01 g/mol* 4 mol * 0.801 = 35.27604 g
grams of carbon= 12.0107 g/mol * 1 mol = 12.0107 g
total=55.2547 g.
Mass fractions are computed from this.
"""
nb = self.parent.nuclideBases if self.parent else None
if nb is None:
b10AtomicMass = 10.01293728
b11AtomicMass = 11.0093054803
else:
b10AtomicMass = nb.byName["B10"].weight
b11AtomicMass = nb.byName["B11"].weight
massEnrich = self.getMassEnrichmentFromNumEnrich(self.b10NumFrac, b10AtomicMass, b11AtomicMass)
gBoron10, gBoron11, gCarbon = self.setNewMassFracsFromMassEnrich(massEnrichment=massEnrich)
self.setMassFrac("B10", gBoron10)
self.setMassFrac("B11", gBoron11)
self.setMassFrac("C", gCarbon)
self.refDens = self.DEFAULT_MASS_DENSITY
# TD reference : Dunner, Heuvel, "Absorber Materials for control rod systems of fast breeder reactors"
# Journal of nuclear materials, 124, 185-194, (1984)."
self.theoreticalDensityFrac = self.DEFAULT_THEORETICAL_DENSITY_FRAC # normally is around 0.88-93.
@staticmethod
def getMassEnrichmentFromNumEnrich(
b10NumFrac: float, b10AtomicMass: float = None, b11AtomicMass: float = None
) -> float:
"""Given a B10 number fraction, give the B10 weight fraction."""
if b10AtomicMass is None:
b10AtomicMass = 10.01293728
if b11AtomicMass is None:
b11AtomicMass = 11.0093054803
return b10NumFrac * b10AtomicMass / (b10NumFrac * b10AtomicMass + (1.0 - b10NumFrac) * b11AtomicMass)
def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:
"""
Return density that preserves mass when thermally expanded in 2D.
Notes
-----
Applies theoretical density of B4C to parent method
"""
return material.Material.pseudoDensity(self, Tk, Tc) * self.theoreticalDensityFrac
def density(self, Tk: float = None, Tc: float = None) -> float:
"""
Return density that preserves mass when thermally expanded in 3D.
Notes
-----
Applies theoretical density of B4C to parent method
"""
return material.Material.density(self, Tk, Tc) * self.theoreticalDensityFrac
def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
"""Boron carbide expansion. Very preliminary."""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tc)
deltaT = Tc - 25
dLL = deltaT * 4.5e-6
return dLL * 100
================================================
FILE: armi/materials/be9.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Beryllium is a lightweight metal with lots of interesting nuclear use-cases.
It has a nice (n,2n) reaction and is an inhalation hazard.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.nucDirectory import thermalScattering as tsl
from armi.utils.units import getTk
class Be9(Material):
"""Beryllium."""
thermalScatteringLaws = (tsl.fromNameAndCompound("BE", tsl.BE_METAL),)
propertyValidTemperature = {"linear expansion percent": ((50, 1560.0), "K")}
def setDefaultMassFracs(self):
self.setMassFrac("BE9", 1.0)
self.refDens = 1.85
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""
Finds the linear expansion coefficient of Be9. given T in C
returns m/m-K
Based on http://www-ferp.ucsd.edu/LIB/PROPS/PANOS/be.html
which is in turn based on Fusion Engineering and Design . FEDEEE 5(2), 141-234 (1987).
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tk)
return 1e-4 * (8.4305 + 1.1464e-2 * Tk - 2.9752e-6 * Tk**2)
================================================
FILE: armi/materials/caH2.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calcium Hydride.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import SimpleSolid
class CaH2(SimpleSolid):
"""CalciumHydride."""
def setDefaultMassFracs(self):
"""Default mass fractions.
http://atom.kaeri.re.kr/ton/
iso atomic percent abundance and atomic mass of 20-calcium
| 20-Ca-40 96.941% 39.9625912
| 20-Ca-42 0.647% 41.9586183
| 20-Ca-43 0.135% 42.9587668
| 20-Ca-44 2.086% 43.9554811
| 20-Ca-46 0.004% 45.9536928
| 20-Ca-48 0.187% 47.9525335
atomic weight of H2 2.01565
weight of CaH2 42.09367285
| weight% of Ca-40 in CaH2 0.920331558
| weight% of Ca-42 in CaH2 0.006449241
| weight% of Ca-43 in CaH2 0.001377745
| weight% of Ca-44 in CaH2 0.02178264
| weight% of Ca-46 in CaH2 4.3668E-05
| weight% of Ca-48 in CaH2 0.002130278
| weight% of H2 in CaH2 0.047884869
"""
self.setMassFrac("CA", 0.952115131)
self.setMassFrac("H", 0.047884869)
def density(self, Tk=None, Tc=None):
"""Mass density.
http://en.wikipedia.org/wiki/Calcium_hydride
Returns
-------
density : float
grams / cc
"""
return 1.70
================================================
FILE: armi/materials/californium.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Californium is a synthetic element made in nuclear reactors.
It is interesting in that it has a large spontaneous fission decay mode that produces lots of neutrons. It's often used
as a neutron source.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import SimpleSolid
class Californium(SimpleSolid):
def setDefaultMassFracs(self):
self.setMassFrac("CF252", 1.0)
def density(self, Tk=None, Tc=None):
"""https://en.wikipedia.org/wiki/Californium."""
return 15.1 # g/cm3
================================================
FILE: armi/materials/concrete.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Concrete.
Concrete is often used to provide structural support of nuclear equipment. It can also provide radiation shielding.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
class Concrete(Material):
"""Simple concreate material.
https://web.archive.org/web/20221103120449/https://physics.nist.gov/cgi-bin/Star/compos.pl?matno=144
"""
def setDefaultMassFracs(self):
self.setMassFrac("H", 0.010000)
self.setMassFrac("C", 0.001000)
self.setMassFrac("O16", 0.529107)
self.setMassFrac("NA23", 0.016000)
self.setMassFrac("MG", 0.002000)
self.setMassFrac("AL", 0.033872)
self.setMassFrac("SI", 0.337021)
self.setMassFrac("K", 0.013000)
self.setMassFrac("CA", 0.044000)
self.setMassFrac("FE", 0.014000)
def density(self, Tk=None, Tc=None):
return 2.3000 # g/cm3
================================================
FILE: armi/materials/copper.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Copper metal.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTk
class Cu(Material):
propertyValidTemperature = {"linear expansion percent": ((40.43, 788.83), "K")}
def setDefaultMassFracs(self):
self.setMassFrac("CU63", 0.6915)
self.setMassFrac("CU65", 0.3085)
def density(self, Tk=None, Tc=None):
return 8.913 # g/cm3
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Return the linear expansion percent for Copper.
Notes
-----
Digitized using Engauge Digitizer from Figure 21 of
Thrust Chamber Life Prediction - Volume I - Mechanical and Physical
Properties of High Performance Rocket Nozzle Materials (NASA CR - 134806)
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tk)
return 5.0298e-07 * Tk**2 + 1.3042e-03 * Tk - 4.3097e-01
================================================
FILE: armi/materials/cs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cesium.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Fluid
from armi.utils.units import getTk
class Cs(Fluid):
"""Cesium."""
def setDefaultMassFracs(self):
self.setMassFrac("CS133", 1.0)
def pseudoDensity(self, Tk=None, Tc=None):
"""The 2D/3D density of liquid Cesium.
https://en.wikipedia.org/wiki/Caesium
Notes
-----
In ARMI, we define pseudoDensity() and density() as the same for Fluids.
"""
Tk = getTk(Tc, Tk)
if Tk < self.meltingPoint():
return 1.93 # g/cm3
else:
return 1.843 # g/cm3
def meltingPoint(self):
return 301.7 # K
================================================
FILE: armi/materials/custom.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Custom materials are ones that you can specify all the number densities yourself.
Useful for benchmarking when you have a particular specified material density. Use the isotopic input described in
:ref:`bp-input-file`.
The density function gets applied from custom isotopics by
:py:meth:`armi.reactor.blueprints.isotopicOptions.CustomIsotopic.apply`.
"""
from armi.materials.material import Material
class Custom(Material):
"""Custom Materials have user input properties."""
enrichedNuclide = "U235"
def __init__(self):
"""
During construction, set default density to 1.0. That way, people can set number densities without having to set
a density and it will work. This will generally be overwritten in practice by a constant user-input density.
"""
Material.__init__(self)
self.customDensity = 1.0
def pseudoDensity(self, Tk=None, Tc=None):
"""
The density value is set in the loading input.
In some cases it needs to be set after full core assemblies are populated (e.g. for CustomLocation materials),
so the missing density warning will appear no matter what.
"""
return self.customDensity
def setMassFrac(self, *args, **kwargs):
if self.customDensity == 1.0:
raise ValueError("Cannot set mass fractions on Custom materials unless a density is defined.")
Material.setMassFrac(self, *args, **kwargs)
================================================
FILE: armi/materials/graphite.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graphite is often used as a moderator in gas-cooled nuclear reactors.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.nucDirectory import thermalScattering as tsl
from armi.utils import units
class Graphite(Material):
"""
Graphite.
.. [INL-EXT-16-38241] McEligot, Donald, Swank, W. David, Cottle, David L., and Valentin,
Francisco I. Thermal Properties of G-348 Graphite. United States: N. p., 2016. Web. doi:10.2172/1330693.
https://www.osti.gov/biblio/1330693
"""
thermalScatteringLaws = (tsl.fromNameAndCompound("C", tsl.GRAPHITE_10P),)
def setDefaultMassFracs(self):
"""
Set graphite to carbon.
Room temperature density from [INL-EXT-16-38241]_, table 2.
"""
self.setMassFrac("C", 1.0)
self.refDens = 1.8888
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
This is dL/L0 for graphite.
From [INL-EXT-16-38241]_, page 4.
"""
Tc = units.getTc(Tc, Tk)
return 100 * (-1.454e-4 + 4.812e-6 * Tc + 1.145e-9 * Tc**2)
================================================
FILE: armi/materials/hafnium.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hafnium is an element that has high capture cross section across multiple isotopes.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import SimpleSolid
from armi.nucDirectory import nucDir
class Hafnium(SimpleSolid):
def setDefaultMassFracs(self):
for a, abund in nucDir.getNaturalMassIsotopics("HF"):
self.setMassFrac("HF{0}".format(a), abund)
def density(self, Tk=None, Tc=None):
r"""http://www.lenntech.com/periodic/elements/hf.htm."""
return 13.07
================================================
FILE: armi/materials/hastelloyN.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hastelloy-N is a high-nickel structural material invented by ORNL for handling molten fluoride salts.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTc, getTk
class HastelloyN(Material):
r"""
Hastelloy N alloy (UNS N10003).
.. [Haynes] Haynes International, H-2052D 2020
(http://haynesintl.com/docs/default-source/pdfs/new-alloy-brochures/corrosion-resistant-alloys/brochures/n-brochure.pdf)
.. [SAB] Sabharwall, et. al.
Feasibility Study of Secondary Heat Exchanger Concepts for the Advanced High Temperature Reactor
INL/EXT-11-23076, 2011
"""
materialIntro = (
"Hastelloy N alloy is a nickel-base alloy that was invented at Oak RIdge National Laboratories "
"as a container material for molten fluoride salts. It has good oxidation resistance to hot fluoride "
"salts in the temperature range of 704 to 871C (1300 to 1600F)"
)
propertyValidTemperature = {
"thermal conductivity": ((473.15, 973.15), "K"),
"heat capacity": ((373.15, 973.15), "K"),
"thermal expansion": ((293.15, 1173.15), "K"),
}
refTempK = 293.15
def setDefaultMassFracs(self):
"""
Hastelloy N mass fractions.
From [Haynes]_.
"""
self.setMassFrac("CR", 0.07)
self.setMassFrac("MO", 0.16)
self.setMassFrac("FE", 0.04) # max.
self.setMassFrac("SI", 0.01) # max.
self.setMassFrac("MN", 0.0080) # max.
self.setMassFrac("V", 0.0005) # max.
self.setMassFrac("C", 0.0006)
self.setMassFrac("CO", 0.0020) # max.
self.setMassFrac("CU", 0.0035) # max.
self.setMassFrac("W", 0.005) # max.
self.setMassFrac("AL", 0.0025) # max.
self.setMassFrac("TI", 0.0025) # max.
self.setMassFrac("NI", 1.0 - sum(self.massFrac.values())) # balance
self.refDens = 8.86
def thermalConductivity(self, Tk=None, Tc=None):
r"""
Calculates the thermal conductivity of Hastelloy N.
Second order polynomial fit to data from [Haynes]_.
Parameters
----------
Tk : float
Temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
Hastelloy N thermal conductivity (W/m-K)
"""
Tc = getTc(Tc, Tk)
Tk = getTk(Tc=Tc)
self.checkPropertyTempRange("thermal conductivity", Tk)
return 1.92857e-05 * Tc**2 + 3.12857e-03 * Tc + 1.17743e01 # W/m-K
def heatCapacity(self, Tk=None, Tc=None):
r"""
Calculates the specific heat capacity of Hastelloy N.
Sixth order polynomial fit to data from Table 2-20 [SAB]_ (R^2=0.97).
Parameters
----------
Tk : float
Temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
Hastelloy N specific heat capacity (J/kg-C)
"""
Tc = getTc(Tc, Tk)
Tk = getTk(Tc=Tc)
self.checkPropertyTempRange("heat capacity", Tk)
return (
+3.19981e02
+ 2.47421e00 * Tc
- 2.49306e-02 * Tc**2
+ 1.32517e-04 * Tc**3
- 3.58872e-07 * Tc**4
+ 4.69003e-10 * Tc**5
- 2.32692e-13 * Tc**6
)
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""
Average thermal expansion dL/L. Used for computing hot dimensions.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
%dLL(T) in m/m/K
"""
Tc = getTc(Tc, Tk)
refTempC = getTc(Tk=self.refTempK)
return 100.0 * self.meanCoefficientThermalExpansion(Tc=Tc) * (Tc - refTempC)
def meanCoefficientThermalExpansion(self, Tk=None, Tc=None):
r"""
Mean coefficient of thermal expansion for Hastelloy N.
Second order polynomial fit of data from [Haynes]_.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
mean coefficient of thermal expansion in m/m/C
"""
Tc = getTc(Tc, Tk)
Tk = getTk(Tc=Tc)
self.checkPropertyTempRange("thermal expansion", Tk)
return 2.60282e-12 * Tc**2 + 7.69859e-10 * Tc + 1.21036e-05
================================================
FILE: armi/materials/ht9.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple/academic/incomplete HT9 ferritic-martensitic stainless steel material.
This is a famous SFR cladding/duct material because it doesn't void swell that much.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi import materials
from armi.utils import units
class HT9(materials.Material):
"""
Simplified HT9 stainless steel.
.. warning:: This is an academic-quality material.
When more detail is desired, a custom material should be implemented via a
user-provided plugin.
.. [MFH] Metallic Fuels Handbook
Hofman, G. L., Billone, M. C., Koenig, J. F., Kramer, J. M., Lambert, J. D. B., Leibowitz, L.,
Orechwa, Y., Pedersen, D. R., Porter, D. L., Tsai, H., and Wright, A. E. Metallic Fuels Handbook.
United States: N. p., 2019. Web. doi:10.2172/1506477.
https://www.osti.gov/biblio/1506477-metallic-fuels-handbook
"""
propertyValidTemperature = {"linear expansion": ((293, 1050), "K")}
def setDefaultMassFracs(self):
"""
HT9 mass fractions.
From E.2-1 of [MFH]_.
https://www.osti.gov/biblio/1506477-metallic-fuels-handbook
"""
self.setMassFrac("C", 0.002)
self.setMassFrac("MN", 0.005)
self.setMassFrac("SI", 0.0025)
self.setMassFrac("NI", 0.0055)
self.setMassFrac("CR", 0.1175)
self.setMassFrac("MO", 0.01)
self.setMassFrac("W", 0.0055)
self.setMassFrac("V", 0.0030)
self.setMassFrac("FE", 1.0 - sum(self.massFrac.values()))
self.refDens = 7.778
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Gets the linear expansion from E.2.2.2 in [MFH]_ for HT9.
The ref gives dL/L0 in percent and is valid from 293 - 1050 K.
"""
tk = units.getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", tk)
return -0.16256 + 1.62307e-4 * tk + 1.42357e-6 * tk**2 - 5.50344e-10 * tk**3
def thermalConductivity(self, Tk=None, Tc=None):
"""
Thermal conductivity in W/m-K).
From [MFH]_, E.2.2.3, eq 5.
.. tip:: This can probably be sped up with a polynomial evaluator.
"""
Tk = units.getTk(Tc, Tk)
return 29.65 - 6.668e-2 * Tk + 2.184e-4 * Tk**2 - 2.527e-7 * Tk**3 + 9.621e-11 * Tk**4
================================================
FILE: armi/materials/inconel.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inconel is a austenitic nickel-chromium superalloy.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import SimpleSolid
class Inconel(SimpleSolid):
references = {
"mass fractions": "https://www.specialmetals.com/documents/technical-bulletins/inconel/inconel-alloy-617.pdf",
"density": "https://www.specialmetals.com/documents/technical-bulletins/inconel/inconel-alloy-617.pdf",
}
def setDefaultMassFracs(self):
self.setMassFrac("NI", 0.52197)
self.setMassFrac("CR", 0.22)
self.setMassFrac("CO59", 0.125)
self.setMassFrac("MO", 0.09)
self.setMassFrac("AL27", 0.0115)
self.setMassFrac("C", 0.001)
self.setMassFrac("FE", 0.015)
self.setMassFrac("MN55", 0.005)
self.setMassFrac("SI", 0.005)
self.setMassFrac("TI", 0.003)
self.setMassFrac("CU", 0.0025)
self.setMassFrac("B10", 0.00003 * 0.1997)
self.setMassFrac("B11", 0.00003 * (1.0 - 0.1997))
def density(self, Tk=None, Tc=None):
return 8.3600
class Inconel617(Inconel):
"""
Note: historically the 'Inconel' material represented the high-nickel alloy
Inconel 617. This material enables the user to know with certainty that
this material represents Inconel 617 and doesn't break any older models.
"""
================================================
FILE: armi/materials/inconel600.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inconel600.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTc
class Inconel600(Material):
propertyValidTemperature = {
"heat capacity": ((20, 900), "C"),
"linear expansion": ((21.0, 900.0), "C"),
"linear expansion percent": ((21.0, 900.0), "C"),
"thermal conductivity": ((20.0, 800.0), "C"),
}
references = {
"mass fractions": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"density": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"thermalConductivity": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"specific heat": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"linear expansion percent": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"linear expansion": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
}
refTempK = 294.15
def __init__(self):
Material.__init__(self)
self.refDens = 8.47 # g/cc
# Only density measurement presented in the reference. Presumed to be performed at 21C since
# this was the reference temperature for linear expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
"NI": 0.7541,
"CR": 0.1550,
"FE": 0.0800,
"C": 0.0008,
"MN55": 0.0050,
"S": 0.0001,
"SI": 0.0025,
"CU": 0.0025,
}
for element, massFrac in massFracs.items():
self.setMassFrac(element, massFrac)
def thermalConductivity(self, Tk=None, Tc=None):
r"""
Returns the thermal conductivity of Inconel600.
Parameters
----------
Tk : float, optional
temperature in (K)
Tc : float, optional
Temperature in (C)
Returns
-------
thermalCond : float
thermal conductivity in W/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tc)
thermalCond = 3.4938e-6 * Tc**2 + 1.3403e-2 * Tc + 14.572
return thermalCond # W/m-C
def heatCapacity(self, Tk=None, Tc=None):
r"""
Returns the specific heat capacity of Inconel600.
Parameters
----------
Tk : float, optional
Temperature in Kelvin.
Tc : float, optional
Temperature in degrees Celsius.
Returns
-------
heatCapacity : float
heat capacity in J/kg/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tc)
heatCapacity = 7.4021e-6 * Tc**2 + 0.20573 * Tc + 441.3
return heatCapacity # J/kg-C
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""
Returns percent linear expansion of Inconel600.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExpPercent in %-m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tc)
linExpPercent = 3.722e-7 * Tc**2 + 1.303e-3 * Tc - 2.863e-2
return linExpPercent
def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf.
Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
to convert from percent strain to strain, then differentiated with respect to temperature to
find the correlation for instantaneous linear expansion.
i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
correlation is 2*a/100*Tc + b/100
2*(3.722e-7/100.0)*Tc + 1.303e-3/100.0
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExp in m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tc)
linExp = 7.444e-9 * Tc + 1.303e-5
return linExp
================================================
FILE: armi/materials/inconel625.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inconel625.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTc
class Inconel625(Material):
propertyValidTemperature = {
"heat capacity": ((221.0, 1093.0), "C"),
"linear expansion": ((21.0, 927.0), "C"),
"linear expansion percent": ((21.0, 927.0), "C"),
"thermal conductivity": ((21.0, 982.0), "C"),
}
references = {
"mass fractions": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf",
"density": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf",
"linearExpansionPercent": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf",
"linearExpansion": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf",
"thermalConductivity": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf",
"specific heat": "http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf",
}
refTempK = 294.15
def __init__(self):
Material.__init__(self)
self.refDens = 8.44 # g/cc
# Only density measurement presented in the reference.
# Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
"NI": 0.6188,
"CR": 0.2150,
"FE": 0.0250,
"MO": 0.0900,
"TA181": 0.0365,
"C": 0.0005,
"MN55": 0.0025,
"SI": 0.0025,
"P31": 0.0001,
"S": 0.0001,
"AL27": 0.0020,
"TI": 0.0020,
"CO59": 0.0050,
}
for element, massFrac in massFracs.items():
self.setMassFrac(element, massFrac)
def thermalConductivity(self, Tk=None, Tc=None):
r"""
Returns the thermal conductivity of Inconel625.
Parameters
----------
Tk : float, optional
Temperature in Kelvin.
Tc : float, optional
Temperature in degrees Celsius.
Returns
-------
thermalCond : float
thermal conductivity in W/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tc)
thermalCond = 2.7474e-6 * Tc**2 + 0.012907 * Tc + 9.62532
return thermalCond # W/m-C
def heatCapacity(self, Tk=None, Tc=None):
"""
Returns the specific heat capacity of Inconel625.
Parameters
----------
Tk : float, optional
Temperature in Kelvin.
Tc : float, optional
Temperature in degrees Celsius.
Returns
-------
heatCapacity : float
heat capacity in J/kg/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tc)
heatCapacity = -5.3777e-6 * Tc**2 + 0.25 * Tc + 404.26
return heatCapacity # J/kg-C
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Returns percent linear expansion of Inconel625.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExpPercent in %-m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tc)
linExpPercent = 5.083e-7 * Tc**2 + 1.125e-3 * Tc - 1.804e-2
return linExpPercent
def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf.
Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
to convert from percent strain to strain, then differentiated with respect to temperature to
find the correlation for instantaneous linear expansion.
i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
correlation is 2*a/100*Tc + b/100
2*(5.083e-7/100.0)*Tc + 1.125e-3/100.0
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExp in m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tc)
linExp = 1.0166e-8 * Tc + 1.125e-5
return linExp
================================================
FILE: armi/materials/inconel800.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Incoloy 800.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTc
class Inconel800(Material):
r"""
Incoloy 800/800H (UNS N08800/N08810).
.. [SM] Special Metals - Incoloy alloy 800
(https://www.specialmetals.com/assets/smc/documents/alloys/incoloy/incoloy-alloy-800.pdf)
"""
propertyValidTemperature = {"thermal expansion": ((20.0, 800.0), "C")}
refTempK = 294.15
def setDefaultMassFracs(self):
"""
Incoloy 800H mass fractions.
From [SM]_.
"""
self.setMassFrac("NI", 0.325) # ave.
self.setMassFrac("CR", 0.21) # ave.
self.setMassFrac("C", 0.00075) # ave. 800H
self.setMassFrac("MN", 0.015) # max.
self.setMassFrac("S", 0.00015) # max.
self.setMassFrac("SI", 0.01) # max.
self.setMassFrac("CU", 0.0075) # max.
self.setMassFrac("AL", 0.00375) # ave.
self.setMassFrac("TI", 0.00375) # ave.
self.setMassFrac("FE", 1.0 - sum(self.massFrac.values())) # balance, 0.395 min.
self.refDens = 7.94
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Average thermal expansion dL/L. Used for computing hot dimensions.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
%dLL(T) in m/m/K
"""
Tc = getTc(Tc, Tk)
refTempC = getTc(Tk=self.refTempK)
return 100.0 * self.meanCoefficientThermalExpansion(Tc=Tc) * (Tc - refTempC)
def meanCoefficientThermalExpansion(self, Tk=None, Tc=None):
"""
Mean coefficient of thermal expansion for Incoloy 800.
Third order polynomial fit of table 5 from [SM]_.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
mean coefficient of thermal expansion in m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("thermal expansion", Tc)
return 2.52525e-14 * Tc**3 - 3.77814e-11 * Tc**2 + 2.06360e-08 * Tc + 1.28071e-05
================================================
FILE: armi/materials/inconelPE16.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inconel PE16.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi import runLog
from armi.materials.material import SimpleSolid
class InconelPE16(SimpleSolid):
references = {
"mass fractions": r"http://www.specialmetals.com/assets/documents/alloys/nimonic/nimonic-alloy-pe16.pdf",
"density": r"http://www.specialmetals.com/assets/documents/alloys/nimonic/nimonic-alloy-pe16.pdf",
}
def setDefaultMassFracs(self):
nb = self.parent.nuclideBases if self.parent else None
if nb is None:
ag107abundance = 0.51839001
ag109abundance = 0.48160999
b10abundance = 0.19799999
b11abundance = 0.80199997
else:
ag107abundance = nb.byName["AG107"].abundance
ag109abundance = nb.byName["AG109"].abundance
b10abundance = nb.byName["B10"].abundance
b11abundance = nb.byName["B11"].abundance
massFracs = {
"C": 0.0006,
"SI": 0.0025,
"MN55": 0.001,
"S": 0.000075,
"AG107": 0.0000025 * ag107abundance,
"AG109": 0.0000025 * ag109abundance,
"AL27": 0.012,
"B10": 0.000025 * b10abundance,
"B11": 0.000025 * b11abundance,
"BI209": 0.0000005,
"CO59": 0.01,
"CR": 0.165,
"CU": 0.0025,
"MO": 0.033,
"NI": 0.425,
"PB": 0.0000075,
"TI": 0.012,
"ZR": 0.0003,
}
massFracs["FE"] = 1 - sum(massFracs.values()) # balance*
# *Reference to the 'balance' of a composition does not guarantee this is exclusively of the element mentioned
# but that it predominates and others are present only in minimal quantities.
for element, massFrac in massFracs.items():
self.setMassFrac(element, massFrac)
def density(self, Tk=None, Tc=None):
runLog.warning(
"PE16 mass density is not temperature dependent, using room temperature value",
single=True,
label="InconelPE16 density",
)
return 8.00
================================================
FILE: armi/materials/inconelX750.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inconel X750.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTc
class InconelX750(Material):
propertyValidTemperature = {
"heat capacity": ((-18.0, 1093.0), "C"),
"linear expansion": ((21.1, 982.2), "C"),
"linear expansion percent": ((21.1, 982.2), "C"),
"thermal conductivity": ((-156.7, 871.1), "C"),
}
references = {
"mass fractions": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf",
"density": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf",
"thermalConductivity": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf",
"specific heat": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf",
"linearExpansionPercent": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf",
"linearExpansion": "http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf",
}
refTempK = 294.15
def __init__(self):
Material.__init__(self)
self.refDens = 8.28 # g/cc
# Only density measurement presented in the reference.
# Presumed to be performed at 21C since this was the reference temperature for linear
# expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
"NI": 0.7180,
"CR": 0.1550,
"FE": 0.0700,
"TI": 0.0250,
"AL27": 0.0070,
"NB93": 0.0095,
"MN55": 0.0050,
"SI": 0.0025,
"S": 0.0001,
"CU": 0.0025,
"C": 0.0004,
"CO59": 0.0050,
}
for element, massFrac in massFracs.items():
self.setMassFrac(element, massFrac)
def thermalConductivity(self, Tk=None, Tc=None):
r"""
Returns the thermal conductivity of InconelX750.
Parameters
----------
Tk : float, optional
Temperature in Kelvin.
Tc : float, optional
Temperature in degrees Celsius.
Returns
-------
thermalCond : float
thermal conductivity in W/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tc)
thermalCond = 1.4835e-6 * Tc**2 + 1.2668e-2 * Tc + 11.632
return thermalCond # W/m-C
def heatCapacity(self, Tk=None, Tc=None):
r"""
Returns the specific heat capacity of InconelX750.
Parameters
----------
Tk : float, optional
Temperature in Kelvin.
Tc : float, optional
Temperature in degrees Celsius.
Returns
-------
heatCapacity : float
heat capacity in J/kg/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tc)
heatCapacity = 9.2261e-7 * Tc**3 - 9.6368e-4 * Tc**2 + 4.7778e-1 * Tc + 420.55
return heatCapacity # J/kg-C
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""
Returns percent linear expansion of InconelX750.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExpPercent in %-m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tc)
linExpPercent = 6.8378e-7 * Tc**2 + 1.056e-3 * Tc - 1.3161e-2
return linExpPercent
def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf.
Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100
to convert from percent strain to strain, then differentiated with respect to temperature to
find the correlation for instantaneous linear expansion.
i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion
correlation is 2*a/100*Tc + b/100
2*(6.8378e-7/100.0)*Tc + 1.056e-3/100.0
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExp in m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tc)
linExp = 1.36756e-8 * Tc + 1.056e-5
return linExp
================================================
FILE: armi/materials/lead.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lead.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials import material
from armi.utils.units import getTk
class Lead(material.Fluid):
"""Natural lead."""
propertyValidTemperature = {
"density": ((600, 1700), "K"),
"heat capacity": ((600, 1500), "K"),
"volumetric expansion": ((600, 1700), "K"),
}
def volumetricExpansion(self, Tk=None, Tc=None):
r"""Volumetric expansion inferred from density.
NOT BASED ON MEASUREMENT.
Done by V. sobolev/ J Nucl Mat 362 (2007) 235-247
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("volumetric expansion", Tk)
return 1.0 / (9516.9 - Tk)
def setDefaultMassFracs(self):
"""Mass fractions."""
self.setMassFrac("PB", 1)
def pseudoDensity(self, Tk=None, Tc=None):
"""Density in g/cc from V. sobolev/ J Nucl Mat 362 (2007) 235-247."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("density", Tk)
return 11.367 - 0.0011944 * Tk # pre-converted from kg/m^3 to g/cc
def heatCapacity(self, Tk=None, Tc=None):
"""Heat capacity in J/kg/K from Sobolev."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tk)
return 162.9 - 3.022e-2 * Tk + 8.341e-6 * Tk**2
================================================
FILE: armi/materials/leadBismuth.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lead-Bismuth eutectic.
This is a great coolant for superfast neutron reactors. It's heavy though.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
import math
from armi.materials import material
from armi.utils.units import getTk
class LeadBismuth(material.Fluid):
"""Lead bismuth eutectic."""
propertyValidTemperature = {
"density": ((400, 1300), "K"),
"dynamic visc": ((400, 1100), "K"),
"heat capacity": ((400, 1100), "K"),
"thermal conductivity": ((400, 1100), "K"),
"volumetric expansion": ((400, 1300), "K"),
}
def setDefaultMassFracs(self):
r"""Mass fractions."""
self.setMassFrac("PB", 0.445)
self.setMassFrac("BI209", 0.555)
def pseudoDensity(self, Tk=None, Tc=None):
r"""Density in g/cc from V. sobolev/ J Nucl Mat 362 (2007) 235-247."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("density", Tk)
return 11.096 - 0.0013236 * Tk # pre-converted from kg/m^3 to g/cc
def dynamicVisc(self, Tk=None, Tc=None):
r"""Dynamic viscosity in Pa-s from Sobolev.
Accessed online at:
http://www.oecd-nea.org/science/reports/2007/nea6195-handbook.html on 11/9/12
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("dynamic visc", Tk)
return 4.94e-4 * math.exp(754.1 / Tk)
def heatCapacity(self, Tk=None, Tc=None):
r"""Heat ccapacity in J/kg/K from Sobolev. Expected accuracy 5%."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tk)
return 159 - 2.72e-2 * Tk + 7.12e-6 * Tk**2
def thermalConductivity(self, Tk=None, Tc=None):
r"""Thermal conductivity in W/m/K from Sobolev.
Accessed online at:
http://www.oecd-nea.org/science/reports/2007/nea6195-handbook.html on 11/9/12
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tk)
return 2.45 * Tk / (86.334 + 0.0511 * Tk)
def volumetricExpansion(self, Tk=None, Tc=None):
r"""Volumetric expansion inferred from density.
NOT BASED ON MEASUREMENT.
Done by V. sobolev/ J Nucl Mat 362 (2007) 235-247
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("volumetric expansion", Tk)
return 1.0 / (8383.2 - Tk)
================================================
FILE: armi/materials/lithium.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lithium.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
Warning
-------
Whenever you irradiate lithium you will get tritium.
"""
from armi import runLog
from armi.materials import material
from armi.utils.mathematics import getFloat
class Lithium(material.Fluid):
references = {"density": "Wikipedia"}
enrichedNuclide = "LI6"
def applyInputParams(self, LI_wt_frac=None, LI6_wt_frac=None, *args, **kwargs):
if LI_wt_frac is not None:
runLog.warning(
"The 'LI_wt_frac' material modification for Lithium will be deprecated"
" Update your inputs to use 'LI6_wt_frac' instead.",
single=True,
label="Lithium applyInputParams 1",
)
if LI6_wt_frac is not None:
runLog.warning(
f"Both 'LI_wt_frac' and 'LI6_wt_frac' are specified for {self}. 'LI6_wt_frac' will be used.",
single=True,
label="Lithium applyInputParams 2",
)
LI6_wt_frac = LI6_wt_frac or LI_wt_frac
enrich = getFloat(LI6_wt_frac)
# allow 0.0 to pass in!
if enrich is not None:
self.adjustMassEnrichment(LI6_wt_frac)
def pseudoDensity(self, Tk=None, Tc=None):
r"""Density (g/cc) from Wikipedia.
Will be liquid above 180C.
Notes
-----
In ARMI, we define pseudoDensity() and density() as the same for Fluids.
"""
return 0.512
def setDefaultMassFracs(self):
nb = self.parent.nuclideBases if self.parent else None
if nb is None:
li6abundance = 0.0759
li7abundance = 0.92410004
else:
li6abundance = nb.byName["LI6"].abundance
li7abundance = nb.byName["LI7"].abundance
self.setMassFrac("LI6", li6abundance)
self.setMassFrac("LI7", li7abundance)
def meltingPoint(self):
return 453.69 # K
def boilingPoint(self):
return 1615.0 # K
def thermalConductivity(self, Tk=None, Tc=None):
"""Wikipedia."""
return 84.8 # W/m-K
def heatCapacity(self, Tk=None, Tc=None):
return 3570.0
================================================
FILE: armi/materials/magnesium.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Magnesium.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials import material
from armi.utils.units import getTk
class Magnesium(material.Fluid):
propertyValidTemperature = {"density": ((923, 1390), "K")}
def setDefaultMassFracs(self):
self.setMassFrac("MG", 1.0)
def pseudoDensity(self, Tk=None, Tc=None):
"""Returns mass density of magnesium in g/cm3.
The Liquid Temperature Range, Density and Constants of Magnesium. P.J. McGonigal. Temple University 1961.
Notes
-----
For Fluids, ARMI defines this 2D pseudodensity is the same as the usual 3D physical density.
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("density", Tk)
return 1.834 - 2.647e-4 * Tk
================================================
FILE: armi/materials/material.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base Material classes.
Most temperatures may be specified in either K or C and the functions will convert for you.
"""
import functools
import traceback
import warnings
import numpy as np
from scipy.optimize import fsolve
from armi import runLog
from armi.nucDirectory import nuclideBases
from armi.reactor.flags import TypeSpec
from armi.utils import densityTools
from armi.utils.units import getTc, getTk
# globals
FAIL_ON_RANGE = True
def parentAwareDensityRedirect(f):
"""Wrap Material.density to warn people about potential problems.
If a Material is linked to a Component, ``Material.density`` may produce
different results from ``Component.density``. The component's density
is considered the source of truth because it incorporates changes in volume,
composition, and temperature in concert with the state of the reactor.
"""
@functools.wraps(f)
def inner(self: "Material", *args, **kwargs) -> float:
if self.parent is not None:
stack = traceback.extract_stack()
# last entry is here, second to last is what called this
caller = stack[-2]
label = f"Found call to Material.density in {caller.filename} at line {caller.lineno}"
runLog.warning(
f"{label}. Calls to Material.density when attached to a component have the potential to induce "
"subtle differences as Component.density and Material.density can diverge.",
single=True,
label=label,
)
return f(self, *args, **kwargs)
return inner
class Material:
r"""
A material is made up of elements or isotopes. It has bulk properties like density.
.. impl:: The abstract material class.
:id: I_ARMI_MAT_PROPERTIES
:implements: R_ARMI_MAT_PROPERTIES
The ARMI Materials library is based on the Object-Oriented Programming design approach, and
uses this generic ``Material`` base class. In this class we define a large number of
material properties like density, heat capacity, or linear expansion coefficient. Specific
materials then subclass this base class to assign particular values to those properties.
.. impl:: Materials generate nuclide mass fractions at instantiation.
:id: I_ARMI_MAT_FRACS
:implements: R_ARMI_MAT_FRACS
An ARMI material is meant to be able to represent real world materials that might be used in
the construction of a nuclear reactor. As such, they are not just individual nuclides, but
practical materials like a particular concrete, steel, or water. One of the main things that
will be needed to describe such a material is the exact nuclide fractions. As such, the
constructor of every Material subclass attempts to set these mass fractions.
Attributes
----------
parent : Component
The component to which this material belongs
massFrac : dict
Mass fractions for all nuclides in the material keyed on the nuclide symbols
refDens : float
A reference density used by some materials, for instance `SimpleSolid`\ s, during thermal
expansion
theoreticalDensityFrac : float
Fraction of the material's density in reality, which is commonly different from 1.0 in solid
materials due to the manufacturing process. Can often be set from the blueprints input via
the TD_frac material modification. For programmatic setting, use `adjustTD()`.
Notes
-----
Specific material classes may have many more attributes specific to the implementation
for that material.
"""
def __init_subclass__(cls) -> None:
# Apply the density decorator to every subclass
if not hasattr(cls.density, "__wrapped__"):
cls.density = parentAwareDensityRedirect(cls.density)
DATA_SOURCE = "ARMI"
"""Indication of where the material is loaded from (may be plugin name)"""
references = {}
"""The literature references {property : citation}"""
enrichedNuclide = None
"""Name of enriched nuclide to be interpreted by enrichment modification methods"""
modelConst = {}
"""Constants that may be used in interpolation functions for property lookups"""
propertyValidTemperature = {}
"""Dictionary of valid temperatures over which the property models are valid in the format
'Property Name': ((Temperature_Lower_Limit, Temperature_Upper_Limit), Temperature_Units)"""
thermalScatteringLaws = ()
"""A tuple of :py:class:`~armi.nucDirectory.thermalScattering.ThermalScatteringLabels` instances with information
about thermal scattering."""
def __init__(self):
self.parent = None
self.massFrac = {}
self.refDens = 0.0
self.theoreticalDensityFrac = 1.0
self.cached = {}
self._backupCache = None
self._name = self.__class__.__name__
# call subclass implementations
self.setDefaultMassFracs()
def __repr__(self):
return f""
@property
def name(self):
"""Getter for the private name attribute of this Material."""
return self._name
@name.setter
def name(self, nomen):
"""Setter for the private name attribute of this Material.
Warning
-------
Some code in ARMI expects the "name" of a material matches its class name. So you use this
method at your own risk.
See Also
--------
armi.materials.resolveMaterialClassByName
"""
self._name = nomen
def getName(self):
"""Duplicate of name property, kept for backwards compatibility."""
return self._name
def getChildren(self, deep=False, generationNum=1, includeMaterials=False, predicate=None):
"""Return empty list, representing that materials have no children."""
return []
def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=True):
"""Return empty list, representing that this object has no children."""
return []
def backUp(self):
"""Create and store a backup of the state."""
self._backupCache = (self.cached, self._backupCache)
self.cached = {} # don't .clear(), using reference above!
def restoreBackup(self, paramsToApply):
"""Restore the parameters from previously created backup."""
self.cached, self._backupCache = self._backupCache
def clearCache(self):
"""Clear the cache so all new values are recomputed."""
self.cached = {}
def _getCached(self, name):
"""Obtain a value from the cache."""
return self.cached.get(name, None)
def _setCache(self, name, val):
"""
Set a value in the cache.
See Also
--------
_getCached : returns a previously-cached value
"""
self.cached[name] = val
def duplicate(self):
"""Copy without needing a deepcopy."""
m = self.__class__()
m.massFrac = {}
for key, val in self.massFrac.items():
m.massFrac[key] = val
m.parent = self.parent
m.refDens = self.refDens
m.theoreticalDensityFrac = self.theoreticalDensityFrac
return m
def linearExpansion(self, Tk: float = None, Tc: float = None) -> float:
"""
The instantaneous linear expansion coefficient (dL/L)/dT.
This is used for reactivity coefficients, etc. but will not affect density or dimensions.
See Also
--------
linearExpansionPercent : average linear thermal expansion to affect dimensions and density
"""
raise NotImplementedError(f"{self} does not have a linear expansion property defined")
def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
"""
Average thermal expansion dL/L. Used for computing hot dimensions and density.
Defaults to 0.0 for materials that don't expand.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
dLL(T) in % m/m/K
See Also
--------
linearExpansion : handle instantaneous thermal expansion coefficients
"""
return 0.0
def linearExpansionFactor(self, Tc: float, T0: float) -> float:
"""
Return a dL/L factor relative to T0 instead of the material-dependent reference temperature.
Notes
-----
For a detailed description of the linear expansion methodology, see "thermalExpansion" in the documentation.
Parameters
----------
Tc : float
Current (hot) temperature in C
T0 : float
Cold temperature in C
Returns
-------
dLL: float
The average thermal expansion between Tc and T0. If there is no dLL, it should return 0.0.
See Also
--------
linearExpansionPercent
"""
dLLhot = self.linearExpansionPercent(Tc=Tc)
dLLcold = self.linearExpansionPercent(Tc=T0)
return (dLLhot - dLLcold) / (100.0 + dLLcold)
def getThermalExpansionDensityReduction(self, prevTempInC: float, newTempInC: float) -> float:
"""Return the factor required to update thermal expansion going from temperatureInC to temperatureInCNew."""
dLL = self.linearExpansionFactor(Tc=newTempInC, T0=prevTempInC)
return 1.0 / (1 + dLL) ** 2
def setDefaultMassFracs(self):
"""Mass fractions."""
pass
def setMassFrac(self, nucName: str, massFrac: float) -> None:
"""
Assigns the mass fraction of a nuclide within the material.
Notes
-----
This will try to convert the provided ``massFrac`` into a float for assignment. If the
conversion cannot occur then an error will be thrown.
"""
try:
massFrac = float(massFrac)
except Exception as ee:
raise TypeError(
f"Error in converting the mass fraction of {massFrac} "
f"for nuclide {nucName} in {self} to a float. "
f"Exception: {ee}"
)
if massFrac < 0.0 or massFrac > 1.0:
raise ValueError(f"Mass fraction of {massFrac} for {nucName} is not between 0 and 1.")
self.massFrac[nucName] = massFrac
def applyInputParams(self):
"""Apply material-specific material input parameters."""
pass
def adjustMassEnrichment(self, massEnrichment: float) -> None:
"""
Adjust the enrichment of the material.
See Also
--------
adjustMassFrac
"""
self.adjustMassFrac(self.enrichedNuclide, massEnrichment)
def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:
"""
Change the mass fraction of the specified nuclide.
This adjusts the mass fraction of a specified nuclide relative to other nuclides of the same
element. If there are no other nuclides within the element, then it is enriched relative to
the entire material. For example, enriching U235 in UZr would enrich U235 relative to U238
and other naturally occurring uranium isotopes. Likewise, enriching ZR in UZr would enrich
ZR relative to uranium.
The method maintains a constant number of atoms, and adjusts ``refDens`` accordingly.
Parameters
----------
nuclideName : str
Name of nuclide to enrich.
massFraction : float
New mass fraction to achieve.
"""
if massFraction > 1.0 or massFraction < 0.0:
raise ValueError(f"Cannot enrich to massFraction of {massFraction}, must be between 0 and 1")
nucsNames = list(self.massFrac)
# refDens could be zero, but cannot normalize to zero.
density = self.refDens or 1.0
massDensities = np.array([self.massFrac[nuc] for nuc in nucsNames]) * density
atomicMasses = np.array([nuclideBases.byName[nuc].weight for nuc in nucsNames]) # in AMU
molesPerCC = massDensities / atomicMasses # item-wise division
enrichedIndex = nucsNames.index(nuclideName)
isoAndEles = nuclideBases.byName[nuclideName].element.nuclides
allIndicesUpdated = [nucsNames.index(nuc.name) for nuc in isoAndEles if nuc.name in self.massFrac]
if len(allIndicesUpdated) == 1:
if isinstance(
nuclideBases.byName[nuclideName], nuclideBases.NaturalNuclideBase
) or nuclideBases.isMonoIsotopicElement(nuclideName):
# If there are not any other nuclides, assume we are enriching an entire element.
# Consequently, allIndicesUpdated is no longer the element's indices, but the materials indices
allIndicesUpdated = range(len(nucsNames))
else:
raise ValueError( # could be warning if problematic
f"Nuclide {nuclideName} was to be enriched in material {self}, but there were no other isotopes of "
"that element. Could not assume the enrichment of the entire element as there were other possible "
"isotopes that did not exist in this material."
)
if massFraction == 1.0:
massDensities[allIndicesUpdated] = 0.0
massDensities[enrichedIndex] = 1.0
else:
balanceWeight = massDensities[allIndicesUpdated].sum() - massDensities[enrichedIndex]
if balanceWeight == 0.0:
onlyOneOtherFracToDetermine = len(allIndicesUpdated) == 2
if not onlyOneOtherFracToDetermine:
raise ValueError(
f"Material {self} has too many masses set to zero. cannot enrich {nuclideName} to "
f"{massFraction}. Current mass fractions: {self.massFrac}"
)
# massDensities get normalized later when conserving atoms; these are just ratios
massDensities[allIndicesUpdated] = 1 - massFraction # there is only one other.
massDensities[enrichedIndex] = massFraction
else:
# derived from solving the following equation for enrchedWeight:
# massFraction = enrichedWeight / (enrichedWeight + balanceWeight)
massDensities[enrichedIndex] = massFraction * balanceWeight / (1 - massFraction)
# ratio is set by here but atoms not conserved yet
updatedNucsMolesPerCC = massDensities[allIndicesUpdated] / atomicMasses[allIndicesUpdated]
updatedNucsMolesPerCC *= molesPerCC[allIndicesUpdated].sum() / updatedNucsMolesPerCC.sum() # conserve atoms
molesPerCC[allIndicesUpdated] = updatedNucsMolesPerCC
updatedMassDensities = molesPerCC * atomicMasses
updatedDensity = updatedMassDensities.sum()
massFracs = updatedMassDensities / updatedDensity
if not np.isclose(sum(massFracs), 1.0, atol=1e-10):
raise RuntimeError(f"The mass fractions {massFracs} in {self} do not sum to 1.0.")
self.massFrac = {nuc: weight for nuc, weight in zip(nucsNames, massFracs)}
if self.refDens != 0.0: # don't update density if not assigned
self.refDens = updatedDensity
def volumetricExpansion(self, Tk=None, Tc=None):
pass
def getTemperatureAtDensity(self, targetDensity: float, tempGuessInC: float) -> float:
"""Get the temperature at which the perturbed density occurs (in Celsius)."""
# 0 at tempertature of targetDensity
densFunc = lambda temp: self.density(Tc=temp) - targetDensity
# is a numpy array if fsolve is called
tAtTargetDensity = float(fsolve(densFunc, tempGuessInC)[0])
return tAtTargetDensity
@property
def liquidPorosity(self) -> float:
"""Fraction of the material that is liquid void (unitless)."""
return 0.0 if self.parent is None else self.parent.liquidPorosity
@property
def gasPorosity(self) -> float:
"""Fraction of the material that is gas void (unitless)."""
return 0.0 if self.parent is None else self.parent.gasPorosity
def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:
"""
Return density that preserves mass when thermally expanded in 2D (in g/cm^3).
Warning
-------
This will not typically agree with ``Material.density()`` or ``Component.density()``
since this method only expands in 2 dimensions. Depending on your use of
``inputHeightsConsideredHot`` and ``Component.temperatureInC``, ``Material.psuedoDensity()``
may be a factor of (1+dLL) different than ``Material.density()`` or ``Component.density()``.
In the case of fluids, density and pseudoDensity are the same as density is not driven by
linear expansion, but rather an explicit density function dependent on temperature.
``Material.linearExpansionPercent()`` is zero for a fluid.
See Also
--------
density
armi.reactor.components.component.Component.density
"""
Tk = getTk(Tc, Tk)
dLL = self.linearExpansionPercent(Tk=Tk)
if self.refDens is None:
runLog.warning(
f"{self} has no reference density",
single=True,
label="No refD " + self.getName(),
)
self.refDens = 0.0
f = (1.0 + dLL / 100.0) ** 2
return self.refDens / f
def pseudoDensityKgM3(self, Tk: float = None, Tc: float = None) -> float:
"""
Return density that preserves mass when thermally expanded in 2D in units of kg/m^3.
See Also
--------
density:
Arguments are forwarded to the g/cc version
"""
return self.pseudoDensity(Tk, Tc) * 1000.0
def density(self, Tk: float = None, Tc: float = None) -> float:
"""
Return density that preserves mass when thermally expanded in 3D (in g/cm^3).
Notes
-----
Since refDens is specified at the material-dep reference case, we don't need to specify the
reference temperature. It is already consistent with linearExpansion Percent.
- p*(dp/p(T) + 1) =p*( p + dp(T) )/p = p + dp(T) = p(T)
- dp/p = (1-(1 + dL/L)**3)/(1 + dL/L)**3
"""
Tk = getTk(Tc, Tk)
dLL = self.linearExpansionPercent(Tk=Tk)
refD = self.refDens
if refD is None:
runLog.warning(
"{0} has no reference density".format(self),
single=True,
label="No refD " + self.getName(),
)
return None
f = (1.0 + dLL / 100.0) ** 3
return refD / f
def densityKgM3(self, Tk: float = None, Tc: float = None) -> float:
"""Return density that preserves mass when thermally expanded in 3D in units of kg/m^3.
See Also
--------
density:
Arguments are forwarded to the g/cc version
"""
return self.density(Tk, Tc) * 1000.0
def getCorrosionRate(self, Tk: float = None, Tc: float = None) -> float:
"""Given a temperature, get the corrosion rate of the material (in microns/year)."""
return 0.0
def yieldStrength(self, Tk: float = None, Tc: float = None) -> float:
"""Returns yield strength at given T in MPa."""
pass
def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float:
"""Thermal conductivity for given T (in units of W/m/K)."""
pass
def getProperty(self, propName: str, Tk: float = None, Tc: float = None, **kwargs) -> float:
"""Gets properties in a way that caches them."""
Tk = getTk(Tc, Tk)
cached = self._getCached(propName)
if cached and cached[0] == Tk:
# only use cached value if the temperature at which it is cached is the same.
return cached[1]
else:
# go look it up from material properties.
val = getattr(self, propName)(Tk=Tk, **kwargs)
# cache only one value for each property. Prevents unbounded cache explosion.
self._setCache(propName, (Tk, val))
return val
def getMassFrac(
self,
nucName=None,
normalized=True,
expandFissionProducts=False,
):
"""
Return mass fraction of nucName.
Parameters
----------
nucName : str, optional
Nuclide name to return ('ZR','PU239',etc.)
normalized : bool, optional
Return the mass fraction such that the sum of all nuclides is sum to 1.0. Default True
Notes
-----
self.massFrac are modified mass fractions that may not add up to 1.0 (for instance, after a
axial expansion, the modified mass fracs will sum to less than one. The alternative is to
put a multiplier on the density. They're mathematically equivalent.
This function returns the normalized mass fraction (they will add to 1.0) as long as the
mass fracs are modified only by get and setMassFrac
This is a performance-critical method as it is called millions of times in a typical ARMI
run.
See Also
--------
setMassFrac
"""
return self.massFrac.get(nucName, 0.0)
def clearMassFrac(self) -> None:
"""Zero out all nuclide mass fractions."""
self.massFrac.clear()
def removeNucMassFrac(self, nuc: str) -> None:
self.setMassFrac(nuc, 0)
try:
del self.massFrac[nuc]
except KeyError:
# the nuc isn't in the mass Frac vector
pass
def checkPropertyTempRange(self, label, val):
"""Checks if the given property / value combination fall between the min and max valid
temperatures provided in the propertyValidTemperature object.
Parameters
----------
label : str
The name of the function or property that is being checked.
val : float
The value to check whether it is between minT and maxT.
Notes
-----
This was designed as a convenience method for ``checkTempRange``.
"""
(minT, maxT) = self.propertyValidTemperature[label][0]
self.checkTempRange(minT, maxT, val, label)
def checkTempRange(self, minT, maxT, val, label=""):
"""
Checks if the given temperature (val) is between the minT and maxT temperature limits
supplied.
Label identifies what material type or element is being evaluated in the check.
Parameters
----------
minT, maxT : float
The minimum and maximum values that val is allowed to have.
val : float
The value to check whether it is between minT and maxT.
label : str
The name of the function or property that is being checked.
"""
if not minT <= val <= maxT:
msg = "Temperature {0} out of range ({1} to {2}) for {3} {4}".format(val, minT, maxT, self.name, label)
if FAIL_ON_RANGE or np.isnan(val):
runLog.error(msg)
raise ValueError(msg)
else:
runLog.warning(
msg,
single=True,
label=f"T out of bounds for {self.name} {label}",
)
def densityTimesHeatCapacity(self, Tk: float = None, Tc: float = None) -> float:
"""
Return heat capacity * density at a temperature.
Parameters
----------
Tk : float, optional
Temperature in Kelvin.
Tc : float, optional
Temperature in degrees Celsius
Returns
-------
rhoCP : float
Calculated value for the HT9 density* heat capacity
unit (J/m^3-K)
"""
Tc = getTc(Tc, Tk)
rhoCp = self.density(Tc=Tc) * 1000.0 * self.heatCapacity(Tc=Tc)
return rhoCp
def getNuclides(self):
"""
Return nuclides in the component that contains this Material.
Notes
-----
This method is the only reason Materials still have self.parent. Essentially, we want to
change that, but right now the logic for finding nuclides in the Reactor is recursive and
considers Materials first. The bulk of the work in finally removing this method will come in
downstream repos, where users have fully embraced this method and call it directly in many,
many places. Please do not use this method, as it is being deprecated.
"""
warnings.warn("Material.getNuclides is being deprecated.", DeprecationWarning)
return self.parent.getNuclides()
def getTempChangeForDensityChange(self, Tc: float, densityFrac: float, quiet: bool = True) -> float:
"""Return a temperature difference for a given density perturbation."""
linearExpansion = self.linearExpansion(Tc=Tc)
linearChange = densityFrac ** (-1.0 / 3.0) - 1.0
deltaT = linearChange / linearExpansion
if not quiet:
runLog.info(
f"The linear expansion for {self.getName()} at initial temperature of {Tc} C is "
f"{linearExpansion}.\nA change in density of {(densityFrac - 1.0) * 100.0} percent "
"at would require a change in temperature of {deltaT} C.",
single=True,
)
return deltaT
def heatCapacity(self, Tk=None, Tc=None):
"""Returns heat capacity in units of J/kg/C."""
raise NotImplementedError(f"Material {type(self).__name__} does not implement heatCapacity")
def getTD(self):
"""Get the fraction of theoretical density for this material."""
return self.theoreticalDensityFrac
def adjustTD(self, val):
"""Set or change the fraction of theoretical density for this material."""
self.theoreticalDensityFrac = val
self.clearCache()
class Fluid(Material):
"""A material that fills its container. Could also be a gas."""
def __init_subclass__(cls):
# Undo the parent-aware density wrapping. Fluids do not expand in the same way solids, so
# Fluid.density(T) is correct. This does not hold for solids because they thermally expand.
if hasattr(cls.density, "__wrapped__"):
cls.density = cls.density.__wrapped__
def getThermalExpansionDensityReduction(self, prevTempInC, newTempInC):
"""Return the factor required to update thermal expansion going from one temperature (in
Celsius) to a new temperature.
"""
rho0 = self.pseudoDensity(Tc=prevTempInC)
if not rho0:
return 1.0
rho1 = self.pseudoDensity(Tc=newTempInC)
return rho1 / rho0
def linearExpansion(self, Tk=None, Tc=None):
"""For void, lets just not allow temperature changes to change dimensions
since it is a liquid it will fill its space.
.. impl:: Fluid materials are not thermally expandable.
:id: I_ARMI_MAT_FLUID
:implements: R_ARMI_MAT_FLUID
ARMI does not model thermal expansion of fluids. The ``Fluid`` superclass therefore sets
the thermal expansion coefficient to zero. All fluids subclassing the ``Fluid``
material will inherit this method which sets the linear expansion coefficient to zero at
all temperatures.
"""
return 0.0
def getTempChangeForDensityChange(self, Tc: float, densityFrac: float, quiet: bool = True) -> float:
"""Return a temperature difference for a given density perturbation."""
currentDensity = self.pseudoDensity(Tc=Tc)
perturbedDensity = currentDensity * densityFrac
tAtPerturbedDensity = self.getTemperatureAtDensity(perturbedDensity, Tc)
deltaT = tAtPerturbedDensity - Tc
if not quiet:
runLog.info(
"A change in density of {} percent in {} at an initial temperature of {} C would "
"require a change in temperature of {} C.".format(
(densityFrac - 1.0) * 100.0, self.getName(), Tc, deltaT
),
single=True,
)
return deltaT
def density(self, Tk=None, Tc=None):
"""
Return the density at the specified temperature for 3D expansion (in g/cm^3).
Notes
-----
For fluids, there is no such thing as 2D expansion so pseudoDensity() is already 3D.
"""
return self.pseudoDensity(Tk=Tk, Tc=Tc)
class SimpleSolid(Material):
"""
Base material for a simple material that primarily defines density.
See Also
--------
armi.materials.pseudoDensity:
armi.materials.density:
"""
refTempK = 300
def __init__(self):
Material.__init__(self)
self.refDens = self.density(Tk=self.refTempK)
def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
"""
Average thermal expansion dL/L. Used for computing hot dimensions and density.
Defaults to 0.0 for materials that don't expand.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
dLL(T) in % m/m/K
Notes
-----
This only method only works for Simple Solid Materials which assumes the density function
returns 'free expansion' density as a function temperature
"""
density1 = self.density(Tk=self.refTempK)
density2 = self.density(Tk=Tk, Tc=Tc)
if density1 == density2:
return 0
else:
return 100 * ((density1 / density2) ** (1.0 / 3.0) - 1)
def density(self, Tk: float = None, Tc: float = None) -> float:
"""Material density (in g/cm^3)."""
return 0.0
def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:
"""
The same method as the parent class, but with the ability to apply a
non-unity theoretical density (in g/cm^3).
"""
return Material.pseudoDensity(self, Tk=Tk, Tc=Tc) * self.getTD()
class FuelMaterial(Material):
"""
Material that is considered a nuclear fuel.
All this really does is enable the special class 1/class 2 isotopics input option.
"""
class1_wt_frac = None
class1_custom_isotopics = None
class2_custom_isotopics = None
def applyInputParams(
self,
class1_custom_isotopics=None,
class2_custom_isotopics=None,
class1_wt_frac=None,
customIsotopics=None,
):
"""Apply optional class 1/class 2 custom enrichment input.
Notes
-----
This is often overridden to insert customized material modification parameters but then this
parent should always be called at the end in case users want to use this style of custom
input.
This is only applied to materials considered fuel so we don't apply these kinds of
parameters to coolants and structural material, which are often not parameterized with any
kind of enrichment.
"""
if class1_wt_frac:
if not 0 <= class1_wt_frac <= 1:
raise ValueError(
f"class1_wt_frac must be between 0 and 1 (inclusive). Right now it is {class1_wt_frac}."
)
validIsotopics = customIsotopics.keys()
errMsg = "{} '{}' not found in the defined custom isotopics."
if class1_custom_isotopics not in validIsotopics:
raise KeyError(errMsg.format("class1_custom_isotopics", class1_custom_isotopics))
if class2_custom_isotopics not in validIsotopics:
raise KeyError(errMsg.format("class2_custom_isotopics", class2_custom_isotopics))
if class1_custom_isotopics == class2_custom_isotopics:
runLog.warning(
"The custom isotopics specified for the class1/class2 materials are both "
f"'{class1_custom_isotopics}'. You are not actually blending anything!"
)
self.class1_wt_frac = class1_wt_frac
self.class1_custom_isotopics = class1_custom_isotopics
self.class2_custom_isotopics = class2_custom_isotopics
self._applyIsotopicsMixFromCustomIsotopicsInput(customIsotopics)
def _applyIsotopicsMixFromCustomIsotopicsInput(self, customIsotopics):
"""
Apply a Class 1/Class 2 mixture of custom isotopics at input.
Only adjust heavy metal.
This may also be needed for building charge assemblies during reprocessing, but will take
input from the SFP rather than from the input external feeds.
"""
class1Isotopics = customIsotopics[self.class1_custom_isotopics]
class2Isotopics = customIsotopics[self.class2_custom_isotopics]
densityTools.applyIsotopicsMix(self, class1Isotopics, class2Isotopics)
def duplicate(self):
"""Copy without needing a deepcopy."""
m = self.__class__()
m.massFrac = {}
for key, val in self.massFrac.items():
m.massFrac[key] = val
m.parent = self.parent
m.refDens = self.refDens
m.theoreticalDensityFrac = self.theoreticalDensityFrac
m.class1_wt_frac = self.class1_wt_frac
m.class1_custom_isotopics = self.class1_custom_isotopics
m.class2_custom_isotopics = self.class2_custom_isotopics
return m
================================================
FILE: armi/materials/mgO.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Magnesium Oxide.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTc, getTk
class MgO(Material):
"""MagnesiumOxide."""
propertyValidTemperature = {
"density": ((273, 1273), "K"),
"linear expansion percent": ((273.15, 1273.15), "K"),
}
def __init__(self):
Material.__init__(self)
"""Same reference as linear expansion. Table II.
Reference density is from Wolfram Alpha At STP (273 K)
"""
self.refDens = 3.58
def setDefaultMassFracs(self):
"""Mass fractions."""
self.setMassFrac("MG", 0.603035897)
self.setMassFrac("O16", 0.396964103)
def linearExpansionPercent(self, Tk=None, Tc=None):
"""The coefficient of expansion of magnesium oxide.
Milo A. Durand
Journal of Applied Physics 7, 297 (1936); doi: 10.1063/1.174539
This is based on a 3rd order polynomial fit of the data in Table I.
"""
Tc = getTc(Tc, Tk)
Tk = getTk(Tc=Tc)
self.checkPropertyTempRange("linear expansion percent", Tk)
return 1.0489e-5 * Tc + 6.0458e-9 * Tc**2 - 2.6875e-12 * Tc**3
================================================
FILE: armi/materials/mixture.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Homogenized mixture material."""
from armi import materials
class _Mixture(materials.Material):
"""
Homogenized mixture of materials.
:meta public:
.. warning:: This class is meant to be used for homogenized block models for neutronics and other
physics solvers.
Notes
-----
This material class can be used to represent a homognized mixture of materials within a block.
This would be done for performance reasons. It allows ARMI to avoid copying and carrying around
the detailed, explicit representation of components within a block to be used in a physics solver
when that solver only needs to know the homogenized number density within a block.
See Also
--------
armi.reactor.blocks.HexBlock.createHomogenizedCopy
"""
================================================
FILE: armi/materials/molybdenum.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Molybdenum.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import SimpleSolid
class Molybdenum(SimpleSolid):
def setDefaultMassFracs(self):
"""Moly mass fractions."""
self.setMassFrac("MO", 1.0)
def density(self, Tk=None, Tc=None):
return 10.28 # g/cc
================================================
FILE: armi/materials/mox.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mixed-oxide (MOX) ceramic fuel.
A definitive source for these properties is [#ornltm20002]_.
.. [#ornltm20002] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of
Irradiation. S.G. Popov, et.al. Oak Ridge National Laboratory.
ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi import runLog
from armi.materials import material
from armi.materials.uraniumOxide import UraniumOxide
from armi.nucDirectory import nucDir
class MOX(UraniumOxide):
"""
MOX fuel.
Some parameters (density, thermal conductivity, etc) are inherited from UraniumOxide. These
parameters are sufficiently equivalent to pure UO2 in the literature to leave them unchanged.
Specific MOX mixtures may be defined in blueprints under custom isotopics.
"""
enrichedNuclide = "U235"
def __init__(self):
UraniumOxide.__init__(self)
def applyInputParams(self, U235_wt_frac=None, TD_frac=None, mass_frac_PU02=None, *args, **kwargs):
if U235_wt_frac is not None:
self.adjustMassEnrichment(U235_wt_frac)
td = TD_frac
if td is not None:
if td > 1.0:
runLog.warning(
"Theoretical density frac for {0} is {1}, which is >1".format(self, td),
single=True,
label="Large theoretical density",
)
elif td == 0:
runLog.warning(
"Theoretical density frac for {self} is zero!",
single=True,
label="Zero theoretical density",
)
self.adjustTD(td)
if mass_frac_PU02 is not None:
self.setMassFracPuO2(mass_frac_PU02)
material.FuelMaterial.applyInputParams(self, *args, **kwargs)
def getMassFracPuO2(self):
massFracPu = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol="PU")])
massFracU = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol="U")])
return massFracPu / (massFracPu + massFracU)
def setMassFracPuO2(self, massFracPuO2):
massFracPu = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol="PU")])
massFracU = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol="U")])
total = massFracU + massFracPu
for Pu in nucDir.getNuclideNames("PU"):
self.setMassFrac(Pu, self.getMassFrac(Pu) / massFracPu * massFracPuO2 * total)
for U in nucDir.getNuclideNames("PU"):
self.setMassFrac(U, self.getMassFrac(U) / massFracU * (1 - massFracPuO2) * total)
def getMolFracPuO2(self):
molweightUO2 = 270.02771 # Approximation, does not include variance due to isotopes
molweightPuO2 = 275.9988 # Approximation, does not include variance due to isotopes
massFracPuO2 = self.getMassFracPuO2()
massFracUO2 = 1 - massFracPuO2
return massFracPuO2 * molweightUO2 / massFracUO2 / molweightPuO2
def setDefaultMassFracs(self):
r"""UO2 + PuO2 mixture mass fractions.
Pu238: 238.0495599 g/mol
Pu239: 239.0521634 g/mol
Pu240: 240.0538135 g/mol
Pu241: 241.0568515 g/mol
Pu242: 242.0587426 g/mol
Am241: 241.0568291 g/mol
U-235: 235.0439299 g/mol
U-238: 238.0507882 g/mol
Oxygen: 15.9994 g/mol
JOYO MOX mass fraction calculation:
Pu mixture: 0.1% Pu238 + 76.82% Pu239 + 19.23% Pu240 + 2.66% Pu241 + 0.55% Pu242 + 0.64% Am241
Pu atomic mass: 239.326469 g/mol
U mixture: 22.99% U-235 + 77.01% U-238
U atomic mass: 237.359511 g/mol
UPu mixture: 17.7% Pu mixture + 82.3% U mixture
UPu atomic mass: 237.70766 g/mol
2 moles of oxygen/1 mole of UPu
grams of UPu = 237.70766 g/mol* 1 mol = 237.70766 g
grams of oxygen= 15.9994 g/mol * 2 mol = 31.9988 g
total= 269.70646 g.
Mass fraction UPu : 237.70766/269.70646 = 0.881357
Mass fraction Pu mixture: 0.177*237.70766/269.70646 = 0.156000
Mass fraction U mixture: 0.823*237.70766/269.70646 = 0.725356
Mass fraction Pu238: 0.001*42.074256/269.70646 = 0.000156
Mass fraction Pu239: 0.7682*42.074256/269.70646 = 0.119839
Mass fraction Pu240: 0.1923*42.074256/269.70646 = 0.029999
Mass fraction Pu241: 0.0266*42.074256/269.70646 = 0.004150
Mass fraction Pu242: 0.0055*42.074256/269.70646 = 0.000858
Mass fraction Am241: 0.0064*42.074256/269.70646 = 0.000998
Mass fraction U-235: 0.2299*195.633404/269.70646 = 0.166759
Mass fraction U-238: 0.7701*195.633404/269.70646 = 0.558597
Mass fraction O: 31.9988/269.70646 = 0.118643
"""
self.setMassFrac("PU238", 0.000156)
self.setMassFrac("PU239", 0.119839)
self.setMassFrac("PU240", 0.029999)
self.setMassFrac("PU241", 0.004150)
self.setMassFrac("PU242", 0.000858)
self.setMassFrac("AM241", 0.000998)
self.setMassFrac("U235", 0.166759)
self.setMassFrac("U238", 0.558597)
self.setMassFrac("O16", 0.118643)
def meltingPoint(self):
"""
Melting point in K - ORNL/TM-2000/351.
Melting point is a function of PuO2 mol fraction.
The liquidus Tl and solidus Ts temperatures in K are given by:
Tl(y) = 3120.0 - 388.1*y - 30.4*y^2
Ts(y) = 3120.0 - 655.3*y + 336.4*y^2 - 99.9*y^3
where y is the mole fraction of PuO2
This function returns the solidus temperature.
Does not take into account changes in the melting temp due to burnup.
"""
molFracPuO2 = self.getMolFracPuO2()
return 3120.0 - 655.3 * molFracPuO2 + 336.4 * molFracPuO2**2 - 99.9 * molFracPuO2**3
================================================
FILE: armi/materials/nZ.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Niobium Zirconium Alloy.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import SimpleSolid
class NZ(SimpleSolid):
def setDefaultMassFracs(self):
self.setMassFrac("NB93", 0.99)
self.setMassFrac("ZR", 0.01)
def density(self, Tk=None, Tc=None):
return 8.66 # g/cc
================================================
FILE: armi/materials/potassium.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Potassium.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials import material
from armi.utils.units import getTc, getTk
class Potassium(material.Fluid):
"""
Molten pure Potassium.
From Foust, O.J. Sodium-NaK Engineering Handbook Vol. 1. New York: Gordon and Breach, 1972.
"""
propertyValidTemperature = {"density": ((63.2, 1250), "C")}
def pseudoDensity(self, Tk=None, Tc=None):
r"""
Calculates the density of molten Potassium in g/cc.
From Foust, O.J. Sodium-NaK Engineering Handbook Vol. 1. New York: Gordon and Breach, 1972.
Page 18.
Notes
-----
In ARMI, we define pseudoDensity() and density() as the same for Fluids.
"""
Tc = getTc(Tc, Tk)
Tk = getTk(Tc=Tc)
self.checkPropertyTempRange("density", Tc)
return 0.8415 - 2.172e-4 * Tc - 2.70e-8 * Tc**2 + 4.77e-12 * Tc**3
================================================
FILE: armi/materials/scandiumOxide.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scandium Oxide.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTk
class Sc2O3(Material):
propertyValidTemperature = {"linear expansion percent": ((273.15, 1573.15), "K")}
def __init__(self):
Material.__init__(self)
"""
https://en.wikipedia.org/wiki/Scandium_oxide
"""
self.refDens = 3.86
def setDefaultMassFracs(self):
self.setMassFrac("SC45", 0.6520)
self.setMassFrac("O16", 0.3480)
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Return the linear expansion percent for Scandium Oxide (Scandia).
Notes
-----
From Table 4 of "Thermal Expansion and Phase Inversion of Rare-Earth Oxides.
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tk)
return 2.6045e-07 * Tk**2 + 4.6374e-04 * Tk - 1.4696e-01
================================================
FILE: armi/materials/siC.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Silicon Carbide.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
import math
from armi.materials.material import Material
from armi.nucDirectory import thermalScattering as tsl
from armi.utils.units import getTc
class SiC(Material):
"""Silicon Carbide."""
thermalScatteringLaws = (tsl.fromNameAndCompound("C", tsl.SIC), tsl.fromNameAndCompound("SI", tsl.SIC))
references = {
"heat capacity": ["Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997"],
"cumulative linear expansion": [
"Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997"
],
"density": ["Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997"],
"thermal conductivity": ["Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997"],
}
propertyEquation = {
"heat capacity": "1110 + 0.15*Tc - 425*math.exp(-0.003*Tc)",
"cumulative linear expansion": "(4.22 + 8.33E-4*Tc-3.51*math.exp(-0.00527*Tc))*1.0E-6",
"density": "(rho0*(1 + cA*(Tc - Tc0))**(-3))*1.0E3",
"thermal conductivity": "(52000*math.exp(-1.24E-5*Tc))/(Tc+437)",
}
propertyUnits = {
"melting point": "K",
"heat capacity": "J kg^-1 K^-1",
"cumulative linear expansion": "K^-1",
"density": "kg m^-3",
"thermal conductivity": "W m^-1 K^-1",
}
propertyNotes = {}
propertyValidTemperature = {
"cumulative linear expansion": ((0, 1500), "C"),
"density": ((0, 1500), "C"),
"heat capacity": ((0, 2000), "C"),
"thermal conductivity": ((0, 2000), "C"),
}
refTempK = 298.15
def setDefaultMassFracs(self):
self.setMassFrac("C", 0.299547726)
self.setMassFrac("SI", 0.700452274)
self.refDens = 3.21
def meltingPoint(self):
return 3003.0
def heatCapacity(self, Tc=None, Tk=None):
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tc)
return 1110 + 0.15 * Tc - 425 * math.exp(-0.003 * Tc)
def cumulativeLinearExpansion(self, Tk=None, Tc=None):
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("cumulative linear expansion", Tc)
return (4.22 + 8.33e-4 * Tc - 3.51 * math.exp(-0.00527 * Tc)) * 1.0e-6
def pseudoDensity(self, Tc=None, Tk=None):
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("density", Tc)
rho0 = 3.16
Tc0 = 0.0
cA = self.cumulativeLinearExpansion(Tc=Tc)
return rho0 * (1 + cA * (Tc - Tc0)) ** (-3)
def thermalConductivity(self, Tc=None, Tk=None):
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tc)
return (52000 * math.exp(-1.24e-5 * Tc)) / (Tc + 437)
================================================
FILE: armi/materials/sodium.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sodium material.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi import runLog
from armi.materials import material
from armi.utils.units import getTc, getTk
class Sodium(material.Fluid):
"""
Simplified sodium material.
.. warning:: This is an academic-quality material. Bring in user-provided material
properties through plugins as necessary.
Most info from [ANL-RE-95-2]_
.. [ANL-RE-95-2] Fink, J.K., and Leibowitz, L. Thermodynamic and transport properties of sodium
liquid and vapor. United States: N. p., 1995. Web. doi:10.2172/94649.
https://www.osti.gov/biblio/94649-gXNdLI/webviewable/
"""
propertyValidTemperature = {
"density": ((97.85, 2230.55), "C"),
"enthalpy": ((371.0, 2000.0), "K"),
"thermal conductivity": ((371.5, 1500), "K"),
}
def setDefaultMassFracs(self):
"""It's just sodium."""
self.setMassFrac("NA", 1.0)
self.refDens = 0.968
def pseudoDensity(self, Tk=None, Tc=None):
"""
Returns density of Sodium in g/cc.
This is from 1.3.1 in [ANL-RE-95-2]_.
Parameters
----------
Tk : float, optional
temperature in degrees Kelvin
Tc : float, optional
temperature in degrees Celsius
Returns
-------
density : float
mass density in g/cc
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("density", Tc)
if (Tc is not None) and (Tc <= 97.72):
runLog.warning(
"Sodium frozen at Tc: {0}".format(Tc),
label="Sodium frozen at Tc={0}".format(Tc),
single=True,
)
critDens = 219 # critical density
f = 275.32 #
g = 511.58
h = 0.5
Tcrit = 2503.7 # critical temperature
return (
critDens + f * (1 - (Tc + 273.15) / Tcrit) + g * (1 - (Tc + 273.15) / Tcrit) ** h
) / 1000.0 # convert from kg/m^3 to g/cc.
def specificVolumeLiquid(self, Tk=None, Tc=None):
"""Returns the liquid specific volume in m^3/kg of this material given Tk in K or Tc in C."""
return 1 / (1000.0 * self.pseudoDensity(Tk, Tc))
def enthalpy(self, Tk=None, Tc=None):
"""
Return enthalpy in J/kg.
From [ANL-RE-95-2]_, Table 1.1-2.
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("enthalpy", Tk)
enthalpy = -365.77 + 1.6582 * Tk - 4.2395e-4 * Tk**2 + 1.4847e-7 * Tk**3 + 2992.6 / Tk
enthalpy = enthalpy * 1000 # convert from kJ/kg to kJ/kg
return enthalpy
def thermalConductivity(self, Tk=None, Tc=None):
"""
Returns thermal conductivity of Sodium.
From [ANL-RE-95-2]_, Table 2.1-2
Parameters
----------
Tk : float, optional
temperature in degrees Kelvin
Tc : float, optional
temperature in degrees Celsius
Returns
-------
thermalConductivity : float
thermal conductivity of Sodium (W/m-K)
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tk)
thermalConductivity = 124.67 - 0.11381 * Tk + 5.5226e-5 * Tk**2 - 1.1842e-8 * Tk**3
return thermalConductivity
================================================
FILE: armi/materials/sodiumChloride.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sodium Chloride salt.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
Notes
-----
This is a very simple description of this material.
"""
from armi.materials.material import SimpleSolid
from armi.utils.units import getTk
class NaCl(SimpleSolid):
def setDefaultMassFracs(self):
self.setMassFrac("NA23", 0.3934)
self.setMassFrac("CL35", 0.4596)
self.setMassFrac("CL37", 0.1470)
def density(self, Tk=None, Tc=None):
"""
Return the density of Sodium Chloride.
Notes
-----
From equation 10 of Thermophysical Properties of NaCl
NaBr and NaF by y-ray attenuation technique
"""
Tk = getTk(Tc, Tk)
return -3.130e-04 * Tk + 2.23
================================================
FILE: armi/materials/sulfur.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sulfur.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi import runLog
from armi.materials import material
from armi.utils.mathematics import linearInterpolation
from armi.utils.units import getTk
class Sulfur(material.Fluid):
propertyValidTemperature = {
"density": ((334, 430), "K"),
"volumetric expansion": ((334, 430), "K"),
}
def applyInputParams(self, sulfur_density_frac=None, TD_frac=None):
if sulfur_density_frac is not None:
runLog.warning(
"The 'sulfur_density_frac' material modification for Sulfur "
"will be deprecated. Update your inputs to use 'TD_frac' instead.",
single=True,
)
if TD_frac is not None:
runLog.warning(
f"Both 'sulfur_density_frac' and 'TD_frac' are specified for {self}. 'TD_frac' will be used."
)
else:
self.updateTD(sulfur_density_frac)
if TD_frac is not None:
self.updateTD(TD_frac)
def updateTD(self, TD):
self.fullDensFrac = float(TD)
def setDefaultMassFracs(self):
"""Mass fractions."""
self.fullDensFrac = 1.0
self.setMassFrac("S32", 0.9493)
self.setMassFrac("S33", 0.0076)
self.setMassFrac("S34", 0.0429)
self.setMassFrac("S36", 0.002)
def pseudoDensity(self, Tk=None, Tc=None):
"""Density of Liquid Sulfur.
Ref: P. Espeau, R. Ceolin "density of molten sulfur in the 334-508K range"
Notes
-----
In ARMI, we define pseudoDensity() and density() as the same for Fluids.
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("density", Tk)
return (2.18835 - 0.00098187 * Tk) * (self.fullDensFrac)
def volumetricExpansion(self, Tk=None, Tc=None):
"""
This is just a two-point interpolation.
P. Espeau, R. Ceolin "density of molten sulfur in the 334-508K range"
"""
Tk = getTk(Tc, Tk)
(Tmin, Tmax) = self.propertyValidTemperature["volumetric expansion"][0]
self.checkPropertyTempRange("volumetric expansion", Tk)
return linearInterpolation(x0=334, y0=5.28e-4, x1=430, y1=5.56e-4, targetX=Tk)
================================================
FILE: armi/materials/tZM.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TZM.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from numpy import interp
from armi.materials.material import Material
from armi.utils.units import getTc
class TZM(Material):
propertyValidTemperature = {"linear expansion percent": ((21.11, 1382.22), "C")}
references = {
"linear expansion percent": "Report on the Mechanical and Thermal Properties of Tungsten \
and TZM Sheet Produced in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau \
of Naval Weapons Contract No. N600(19)-59530, Southern Research Institute"
}
temperatureC = [
21.11,
456.11,
574.44,
702.22,
840.56,
846.11,
948.89,
1023.89,
1146.11,
1287.78,
1382.22,
]
percentThermalExpansion = [
0,
1.60e-01,
2.03e-01,
2.53e-01,
3.03e-01,
3.03e-01,
3.42e-01,
3.66e-01,
4.21e-01,
4.68e-01,
5.04e-01,
]
def __init__(self):
Material.__init__(self)
self.refDens = 10.16
def setDefaultMassFracs(self):
self.setMassFrac("C", 2.50749e-05)
self.setMassFrac("TI", 0.002502504)
self.setMassFrac("ZR", 0.000761199)
self.setMassFrac("MO", 0.996711222)
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Return linear expansion in %dL/L from interpolation of tabular data.
This function is used to expand a material from its reference temperature (21C)
to a particular hot temperature.
Parameters
----------
Tk : float
temperature in K
Tc : float
temperature in C
Source: Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced
in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons
Contract No. N600(19)-59530, 1966 Southern Research Institute.
See Table viii-b, Appendix B, page 181.
"""
Tc = getTc(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tc)
return interp(Tc, self.temperatureC, self.percentThermalExpansion)
================================================
FILE: armi/materials/tantalum.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tantalum.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import SimpleSolid
class Tantalum(SimpleSolid):
def setDefaultMassFracs(self):
self.setMassFrac("TA181", 1)
def density(self, Tk=None, Tc=None):
return 16.6 # g/cc
================================================
FILE: armi/materials/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/materials/tests/test__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module tests the __init__.py file since it has rather unique behavior."""
import unittest
from armi import materials
def betterSubClassCheck(item, superClass):
try:
return issubclass(item, superClass)
except TypeError:
return False
class Materials__init__Tests(unittest.TestCase):
def test_canAccessClassesFromPackage(self):
klasses = [kk for _, kk in vars(materials).items() if betterSubClassCheck(kk, materials.material.Material)]
self.assertGreater(len(klasses), 10)
def test_packageClassesEqualModuleClasses(self):
self.assertEqual(materials.UraniumOxide, materials.uraniumOxide.UraniumOxide)
================================================
FILE: armi/materials/tests/test_air.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for air materials."""
import math
import unittest
from armi.materials.air import Air
from armi.utils import densityTools
from armi.utils.units import getTc
"""
Reference thermal physical properties from Table A.4 in Incropera, Frank P., et al. Fundamentals of
heat and mass transfer. Vol. 5. New York: Wiley, 2002.
"""
REFERENCE_Tk = [
100,
150,
200,
250,
300,
350,
400,
450,
500,
550,
600,
650,
700,
750,
800,
850,
900,
950,
1000,
1100,
1200,
1300,
1400,
1500,
1600,
1700,
1800,
1900,
2000,
2100,
2200,
2300,
2400,
2500,
3000,
]
REFERENCE_DENSITY_KG_PER_M3 = [
3.5562,
2.3364,
1.7458,
1.3947,
1.1614,
0.995,
0.8711,
0.774,
0.6964,
0.6329,
0.5804,
0.5356,
0.4972,
0.4643,
0.4354,
0.4097,
0.3868,
0.3666,
0.3482,
0.3166,
0.2902,
0.2679,
0.2488,
0.2322,
0.2177,
0.2049,
0.1935,
0.1833,
0.1741,
0.1658,
0.1582,
0.1513,
0.1448,
0.1389,
0.1135,
]
REFERENCE_HEAT_CAPACITY_kJ_PER_KG_K = [
1.032,
1.012,
1.007,
1.006,
1.007,
1.009,
1.014,
1.021,
1.03,
1.04,
1.051,
1.063,
1.075,
1.087,
1.099,
1.11,
1.121,
1.131,
1.141,
1.159,
1.175,
1.189,
1.207,
1.23,
1.248,
1.267,
1.286,
1.307,
1.337,
1.372,
1.417,
1.478,
1.558,
1.665,
2.726,
]
REFERENCE_THERMAL_CONDUCTIVITY_mJ_PER_M_K = [
9.34,
13.8,
18.1,
22.3,
26.3,
30,
33.8,
37.3,
40.7,
43.9,
46.9,
49.7,
52.4,
54.9,
57.3,
59.6,
62,
64.3,
66.7,
71.5,
76.3,
82,
91,
100,
106,
113,
120,
128,
137,
147,
160,
175,
196,
222,
]
class TestAir(unittest.TestCase):
"""unit tests for air materials.
.. test:: There is a base class for fluid materials.
:id: T_ARMI_MAT_FLUID1
:tests: R_ARMI_MAT_FLUID
"""
def test_pseudoDensity(self):
"""
Reproduce verification results at 300K from Incropera, Frank P., et al.
Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002.
"""
air = Air()
for Tk, densKgPerM3 in zip(REFERENCE_Tk, REFERENCE_DENSITY_KG_PER_M3):
if Tk < 2400:
error = math.fabs((air.pseudoDensityKgM3(Tk=Tk) - densKgPerM3) / densKgPerM3)
self.assertLess(error, 1e-2)
error = math.fabs((air.pseudoDensityKgM3(Tc=getTc(Tk=Tk)) - densKgPerM3) / densKgPerM3)
self.assertLess(error, 1e-2)
def test_heatCapacity(self):
"""
Reproduce verification results at 300K from Incropera, Frank P., et al.
Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002.
"""
air = Air()
for Tk, heatCapacity in zip(REFERENCE_Tk, REFERENCE_HEAT_CAPACITY_kJ_PER_KG_K):
if Tk < 1300:
error = math.fabs((air.heatCapacity(Tk=Tk) - heatCapacity * 1e3) / (heatCapacity * 1e3))
self.assertLess(error, 1e-2)
def test_thermalConductivity(self):
"""
Reproduce verification results at 300K from Incropera, Frank P., et al.
Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002.
"""
air = Air()
for Tk, thermalConductivity in zip(REFERENCE_Tk, REFERENCE_THERMAL_CONDUCTIVITY_mJ_PER_M_K):
if Tk > 200 and Tk < 850:
error = math.fabs(
(air.thermalConductivity(Tk=Tk) - thermalConductivity * 1e-3) / (thermalConductivity * 1e-3)
)
self.assertLess(error, 1e-2)
def test_massFrac(self):
"""Reproduce the number ratios results to PNNL-15870 Rev 1."""
air = Air()
refC = 0.000150
refN = 0.784431
refO = 0.210748
refAR = 0.004671
nuclides, nDens = densityTools.getNDensFromMasses(air.pseudoDensity(Tk=300), air.massFrac)
diff = 1e-4
error = abs(nDens[0] / sum(nDens) - refC)
self.assertLess(error, diff)
error = abs(nDens[1] / sum(nDens) - refN)
self.assertLess(error, diff)
error = abs(nDens[2] / sum(nDens) - refO)
self.assertLess(error, diff)
error = abs(nDens[3] / sum(nDens) - refAR)
self.assertLess(error, diff)
self.assertEqual(nuclides[0].decode(), "C")
self.assertEqual(nuclides[1].decode(), "N")
self.assertEqual(nuclides[2].decode(), "O")
self.assertEqual(nuclides[3].decode(), "AR")
def test_validRanges(self):
air = Air()
den0 = air.density(Tk=101)
denf = air.density(Tk=2399)
self.assertLess(denf, den0)
hc0 = air.heatCapacity(Tk=101)
hcf = air.heatCapacity(Tk=1299)
self.assertGreater(hcf, hc0)
tc0 = air.thermalConductivity(Tk=201)
tcf = air.thermalConductivity(Tk=849)
self.assertGreater(tcf, tc0)
================================================
FILE: armi/materials/tests/test_b4c.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for boron carbide."""
import unittest
from armi.materials.b4c import B4C
from armi.materials.tests.test_materials import AbstractMaterialTest
class B4C_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = B4C
def setUp(self):
AbstractMaterialTest.setUp(self)
self.mat = B4C()
self.B4C_theoretical_density = B4C()
self.B4C_theoretical_density.applyInputParams(theoretical_density=0.5)
self.B4C_TD_frac = B4C()
self.B4C_TD_frac.applyInputParams(TD_frac=0.4)
self.B4C_both = B4C()
self.B4C_both.applyInputParams(theoretical_density=0.5, TD_frac=0.4)
def test_theoretical_pseudoDensity(self):
ref = self.mat.pseudoDensity(500)
reduced = self.B4C_theoretical_density.pseudoDensity(500)
self.assertAlmostEqual(ref * 0.5 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced)
reduced = self.B4C_TD_frac.pseudoDensity(500)
self.assertAlmostEqual(ref * 0.4 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced)
reduced = self.B4C_both.pseudoDensity(500)
self.assertAlmostEqual(ref * 0.4 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
def test_variousEdgeCases(self):
with self.assertRaises(ValueError):
self.mat.setNewMassFracsFromMassEnrich(-0.001)
with self.assertRaises(ValueError):
self.mat.setNewMassFracsFromMassEnrich(1.001)
================================================
FILE: armi/materials/tests/test_be9.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for Beryllium."""
import unittest
from armi.materials.be9 import Be9
from armi.materials.tests import test_materials
class TestBe9(test_materials.AbstractMaterialTest, unittest.TestCase):
"""Beryllium tests."""
MAT_CLASS = Be9
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(Tc=25)
ref = 1.85
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
================================================
FILE: armi/materials/tests/test_fluids.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for fluid-specific behaviors.
The ARMI framework has a lot of thermal expansion machinery that applies to all components
but doesn't make sense for fluids. The tests here help show fluid materials still
play nice with the rest of the framework.
"""
from unittest import TestCase
from armi.materials.material import Fluid, Material
from armi.reactor.components import Circle
from armi.tests import mockRunLogs
class TestFluids(TestCase):
class MyFluid(Fluid):
"""Stand-in fluid that doesn't provide lots of functionality."""
class MySolid(Material):
"""Stand-in solid that doesn't provide lots of functionality."""
def test_fluidDensityWrapperNoWarning(self):
"""Test that Component.material.density does not raise a warning for fluids.
The ARMI Framework contains a mechanism to warn users if they ask for the density of a
material attached to a component. But the component is the source of truth for volume and
composition. And can be thermally expanded during operation. Much of the framework operates
on ``Component.density`` and other ``Component`` methods for mass accounting. However,
``comp.material.density`` does not know about the new composition or volumes and can diverge
from ``component.density``.
Additionally, the framework does not do any thermal expansion on fluids. So the above calls
to ``component.material.density`` are warranted for fluids.
"""
self._checkCompDensityLogs(
mat=self.MySolid(),
nExpectedWarnings=1,
msg="Solids should have the density warning logged.",
)
self._checkCompDensityLogs(
mat=self.MyFluid(),
nExpectedWarnings=0,
msg="Fluids should not have the density warning logged.",
)
def _checkCompDensityLogs(self, mat: Material, nExpectedWarnings: int, msg: str):
comp = Circle(name="test", material=mat, Tinput=20, Thot=20, id=0, od=1, mult=1)
with mockRunLogs.LogCounter() as logs:
comp.material.density(Tc=comp.temperatureInC)
self.assertEqual(logs.messageCounts["warning"], nExpectedWarnings, msg=msg)
================================================
FILE: armi/materials/tests/test_graphite.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for graphite material."""
import math
import unittest
from armi.materials.graphite import Graphite
class Graphite_TestCase(unittest.TestCase):
MAT_CLASS = Graphite
def setUp(self):
self.mat = self.MAT_CLASS()
def test_linearExpansionPercent(self):
accuracy = 2
cur = self.mat.linearExpansionPercent(330)
ref = 0.013186
self.assertAlmostEqual(cur, ref, accuracy)
cur = self.mat.linearExpansionPercent(1500)
ref = 0.748161
self.assertAlmostEqual(cur, ref, accuracy)
cur = self.mat.linearExpansionPercent(3000)
ref = 2.149009
self.assertAlmostEqual(cur, ref, accuracy)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
def test_density(self):
"""Test to reproduce density measurements results in table 2 from [INL-EXT-16-38241]."""
uncertainty = 0.01
for Tc, ref_rho in [
# sample G-348-1
(22.6, 1.8885),
(401.6, 1.8772),
(801.3, 1.8634),
# sample G-348-2
(23.5, 1.9001),
(401.0, 1.8888),
(800.9, 1.8748),
]:
test_rho = self.mat.density(Tc=Tc)
error = math.fabs((ref_rho - test_rho) / ref_rho)
self.assertLess(error, uncertainty)
================================================
FILE: armi/materials/tests/test_lithium.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lithium."""
import unittest
from armi.materials.lithium import Lithium
from armi.materials.tests.test_materials import AbstractMaterialTest
class Lithium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = Lithium
def setUp(self):
AbstractMaterialTest.setUp(self)
self.mat = Lithium()
self.Lithium_LI_wt_frac = Lithium()
self.Lithium_LI_wt_frac.applyInputParams(LI6_wt_frac=0.5)
self.Lithium_LI6_wt_frac = Lithium()
self.Lithium_LI6_wt_frac.applyInputParams(LI6_wt_frac=0.6)
self.Lithium_both = Lithium()
self.Lithium_both.applyInputParams(LI6_wt_frac=0.8)
def test_Lithium_material_modifications(self):
self.assertEqual(self.mat.getMassFrac("LI6"), 0.0759)
self.assertAlmostEqual(self.Lithium_LI_wt_frac.getMassFrac("LI6"), 0.5, places=10)
self.assertAlmostEqual(self.Lithium_LI6_wt_frac.getMassFrac("LI6"), 0.6, places=10)
self.assertAlmostEqual(self.Lithium_both.getMassFrac("LI6"), 0.8, places=10)
def test_pseudoDensity(self):
ref = self.mat.pseudoDensity(Tc=100)
self.assertAlmostEqual(ref, 0.512, delta=abs(ref * 0.001))
ref = self.mat.pseudoDensity(Tc=200)
self.assertAlmostEqual(ref, 0.512, delta=abs(ref * 0.001))
def test_meltingPoint(self):
ref = self.mat.meltingPoint()
cur = 453.69
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
def test_boilingPoint(self):
ref = self.mat.boilingPoint()
cur = 1615.0
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
def test_heatCapacity(self):
ref = self.mat.heatCapacity(Tc=100)
cur = 3570.0
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
ref = self.mat.heatCapacity(Tc=200)
cur = 3570.0
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
================================================
FILE: armi/materials/tests/test_materials.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests materials.py."""
import math
import pickle
import unittest
from copy import deepcopy
from numpy import testing
from armi import context, materials, settings
from armi.materials import _MATERIAL_NAMESPACE_ORDER, setMaterialNamespaceOrder
from armi.reactor import blueprints
from armi.utils import units
class AbstractMaterialTest:
"""Base for material tests."""
MAT_CLASS = None
VALID_TEMP_K = 500
def setUp(self):
self.mat = self.MAT_CLASS()
def test_isPicklable(self):
"""Test that all materials are picklable so we can do MPI communication of state."""
stream = pickle.dumps(self.mat)
mat = pickle.loads(stream)
# check a property that is sometimes interpolated.
self.assertEqual(self.mat.thermalConductivity(self.VALID_TEMP_K), mat.thermalConductivity(self.VALID_TEMP_K))
def test_density(self):
"""Test that all materials produce a non-zero density from density."""
self.assertNotEqual(self.mat.density(self.VALID_TEMP_K), 0)
def test_TD(self):
"""Test the material density."""
self.assertEqual(self.mat.getTD(), self.mat.theoreticalDensityFrac)
self.mat.clearCache()
self.mat._setCache("dummy", 666)
self.assertEqual(self.mat.cached, {"dummy": 666})
self.mat.adjustTD(0.5)
self.assertEqual(0.5, self.mat.theoreticalDensityFrac)
self.assertEqual(self.mat.cached, {})
def test_duplicate(self):
"""Test the material duplication."""
mat = self.mat.duplicate()
self.assertEqual(len(mat.massFrac), len(self.mat.massFrac))
for key in self.mat.massFrac:
self.assertEqual(mat.massFrac[key], self.mat.massFrac[key])
self.assertEqual(mat.parent, self.mat.parent)
self.assertEqual(mat.refDens, self.mat.refDens)
self.assertEqual(mat.theoreticalDensityFrac, self.mat.theoreticalDensityFrac)
def test_cache(self):
"""Test the material cache."""
self.mat.clearCache()
self.assertEqual(len(self.mat.cached), 0)
self.mat._setCache("Emmy", "Noether")
self.assertEqual(len(self.mat.cached), 1)
val = self.mat._getCached("Emmy")
self.assertEqual(val, "Noether")
def test_densityKgM3(self):
"""Test the density for kg/m^3."""
dens = self.mat.density(self.VALID_TEMP_K)
densKgM3 = self.mat.densityKgM3(self.VALID_TEMP_K)
self.assertEqual(dens * 1000.0, densKgM3)
def test_pseudoDensityKgM3(self):
"""Test the pseudo density for kg/m^3."""
dens = self.mat.pseudoDensity(self.VALID_TEMP_K)
densKgM3 = self.mat.pseudoDensityKgM3(self.VALID_TEMP_K)
self.assertEqual(dens * 1000.0, densKgM3)
def test_wrappedDensity(self):
"""Test that the density decorator is applied to non-fluids."""
self.assertEqual(
hasattr(self.mat.density, "__wrapped__"),
not isinstance(self.mat, materials.Fluid),
msg=self.mat,
)
class MaterialConstructionTests(unittest.TestCase):
def test_material_initialization(self):
"""Make sure all materials can be instantiated without error."""
for matClass in materials.iterAllMaterialClassesInNamespace(materials):
matClass()
class MaterialFindingTests(unittest.TestCase):
"""Make sure materials are discoverable as designed."""
def test_findMaterial(self):
"""Test resolveMaterialClassByName() function.
.. test:: Materials can be grabbed from a list of namespaces.
:id: T_ARMI_MAT_NAMESPACE0
:tests: R_ARMI_MAT_NAMESPACE
"""
self.assertIs(
materials.resolveMaterialClassByName("Void", namespaceOrder=["armi.materials"]),
materials.Void,
)
self.assertIs(
materials.resolveMaterialClassByName("Void", namespaceOrder=["armi.materials.void"]),
materials.Void,
)
self.assertIs(
materials.resolveMaterialClassByName("Void", namespaceOrder=["armi.materials.mox", "armi.materials.void"]),
materials.Void,
)
with self.assertRaises(ModuleNotFoundError):
materials.resolveMaterialClassByName("Void", namespaceOrder=["invalid.namespace", "armi.materials.void"])
with self.assertRaises(KeyError):
materials.resolveMaterialClassByName("Unobtanium", namespaceOrder=["armi.materials"])
def __validateMaterialNamespace(self):
"""Helper method to validate the material namespace a little."""
self.assertTrue(isinstance(_MATERIAL_NAMESPACE_ORDER, list))
self.assertGreater(len(_MATERIAL_NAMESPACE_ORDER), 0)
for nameSpace in _MATERIAL_NAMESPACE_ORDER:
self.assertTrue(isinstance(nameSpace, str))
@unittest.skipUnless(context.MPI_RANK == 0, "test only on root node")
def test_namespacing(self):
"""Test loading materials with different material namespaces, to cover how they work.
.. test:: Material can be found in defined packages.
:id: T_ARMI_MAT_NAMESPACE1
:tests: R_ARMI_MAT_NAMESPACE
.. test:: Material namespaces register materials with an order of priority.
:id: T_ARMI_MAT_ORDER
:tests: R_ARMI_MAT_ORDER
"""
# let's do a quick test of getting a material from the default namespace
setMaterialNamespaceOrder(["armi.materials"])
uraniumOxide = materials.resolveMaterialClassByName("UraniumOxide", namespaceOrder=["armi.materials"])
self.assertGreater(uraniumOxide().density(500), 0)
# validate the default namespace in ARMI
self.__validateMaterialNamespace()
# show you can add a material namespace
newMats = "armi.utils.tests.test_densityTools"
setMaterialNamespaceOrder(["armi.materials", newMats])
self.__validateMaterialNamespace()
# in the case of duplicate materials, show that the material namespace determines
# which material is chosen
uraniumOxideTest = materials.resolveMaterialClassByName(
"UraniumOxide", namespaceOrder=[newMats, "armi.materials"]
)
for t in range(200, 600):
self.assertEqual(uraniumOxideTest().density(t), 0)
self.assertEqual(uraniumOxideTest().pseudoDensity(t), 0)
# for safety, reset the material namespace list and order
setMaterialNamespaceOrder(["armi.materials"])
class Californium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Californium
def test_pseudoDensity(self):
ref = 15.1
cur = self.mat.pseudoDensity(923)
self.assertEqual(cur, ref)
cur = self.mat.pseudoDensity(1390)
self.assertEqual(cur, ref)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
def test_porosities(self):
self.mat.parent = None
self.assertEqual(self.mat.liquidPorosity, 0.0)
self.assertEqual(self.mat.gasPorosity, 0.0)
def test_getCorrosionRate(self):
self.assertEqual(self.mat.getCorrosionRate(500), 0.0)
class Cesium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Cs
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(250)
ref = 1.93
self.assertAlmostEqual(cur, ref, delta=ref * 0.05)
cur = self.mat.pseudoDensity(450)
ref = 1.843
self.assertAlmostEqual(cur, ref, delta=ref * 0.05)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class Magnesium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Magnesium
VALID_TEMP_K = 1000
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(923)
ref = 1.5897
delta = ref * 0.0001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(1390)
ref = 1.4661
delta = ref * 0.0001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class MagnesiumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.MgO
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(923)
ref = 3.48887
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(1250)
ref = 3.418434
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
def test_linearExpansionPercent(self):
cur = self.mat.linearExpansionPercent(Tc=100)
ref = 0.00110667
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.linearExpansionPercent(Tc=400)
ref = 0.0049909
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Molybdenum_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Molybdenum
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(333)
ref = 10.28
delta = ref * 0.0001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(1390)
ref = 10.28
delta = ref * 0.0001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class MOX_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.MOX
def test_density(self):
cur = self.mat.density(333)
ref = 10.926
delta = ref * 0.0001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_getMassFracPuO2(self):
ref = 0.176067
self.assertAlmostEqual(self.mat.getMassFracPuO2(), ref, delta=ref * 0.001)
def test_getMolFracPuO2(self):
ref = 0.209
self.assertAlmostEqual(self.mat.getMolFracPuO2(), ref, delta=ref * 0.001)
def test_getMeltingPoint(self):
ref = 2996.788765
self.assertAlmostEqual(self.mat.meltingPoint(), ref, delta=ref * 0.001)
def test_applyInputParams(self):
massFracNameList = [
"AM241",
"O16",
"PU238",
"PU239",
"PU240",
"PU241",
"PU242",
"U235",
"U238",
]
massFracRefValList = [
0.000998,
0.118643,
0.000156,
0.119839,
0.029999,
0.00415,
0.000858,
0.166759,
0.558597,
]
self.mat.applyInputParams()
for name, frac in zip(massFracNameList, massFracRefValList):
cur = self.mat.massFrac[name]
self.assertEqual(cur, frac)
# bonus code coverage for clearMassFrac()
self.mat.clearMassFrac()
self.assertEqual(len(self.mat.massFrac), 0)
# bonus coverage for removeNucMassFrac
self.mat.removeNucMassFrac("PassWithoutWarning")
self.assertEqual(len(self.mat.massFrac), 0)
class NaCl_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.NaCl
def test_density(self):
cur = self.mat.density(Tc=100)
ref = 2.113204
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.density(Tc=300)
ref = 2.050604
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class NiobiumZirconium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.NZ
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(Tk=100)
ref = 8.66
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.pseudoDensity(Tk=1390)
ref = 8.66
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class Potassium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Potassium
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(Tc=100)
ref = 0.8195
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(Tc=333)
ref = 0.7664
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(Tc=500)
ref = 0.7267
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(Tc=750)
ref = 0.6654
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(Tc=1200)
ref = 0.5502
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class ScandiumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Sc2O3
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(Tc=25)
ref = 3.86
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_linearExpansionPercent(self):
cur = self.mat.linearExpansionPercent(Tc=100)
ref = 0.0623499
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.linearExpansionPercent(Tc=400)
ref = 0.28322
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Sodium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Sodium
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(372)
ref = 0.92546
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(1700)
ref = 0.597
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_specificVolumeLiquid(self):
cur = self.mat.specificVolumeLiquid(372)
ref = 0.0010805
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.specificVolumeLiquid(1700)
ref = 0.001674
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_enthalpy(self):
cur = self.mat.enthalpy(372)
ref = 208100.1914
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.enthalpy(1700)
ref = 1959147.963
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_thermalConductivity(self):
cur = self.mat.thermalConductivity(372)
ref = 89.36546
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.thermalConductivity(1500)
ref = 38.24675
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Tantalum_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Tantalum
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(Tc=100)
ref = 16.6
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.pseudoDensity(Tc=300)
ref = 16.6
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class ThoriumUraniumMetal_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.ThU
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(Tc=100)
ref = 11.68
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.pseudoDensity(Tc=300)
ref = 11.68
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_meltingPoint(self):
cur = self.mat.meltingPoint()
ref = 2025.0
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_thermalConductivity(self):
cur = self.mat.thermalConductivity(Tc=100)
ref = 43.1
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.thermalConductivity(Tc=300)
ref = 43.1
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_linearExpansion(self):
cur = self.mat.linearExpansion(Tc=100)
ref = 11.9e-6
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.linearExpansion(Tc=300)
ref = 11.9e-6
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 1)
class Uranium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Uranium
def test_applyInputParams(self):
# check the defaults when applyInputParams is applied without arguments
U235_wt_frac_default = 0.0071136523
self.mat.applyInputParams()
self.assertAlmostEqual(self.mat.massFrac["U235"], U235_wt_frac_default)
densityTemp = materials.Uranium._densityTableK[0]
density0 = self.mat.density(Tk=materials.Uranium._densityTableK[0])
expectedDensity = materials.Uranium._densityTable[0]
self.assertEqual(density0, expectedDensity)
newWtFrac = 1.0
newTDFrac = 0.5
self.mat.applyInputParams(U235_wt_frac=newWtFrac, TD_frac=newTDFrac)
self.assertEqual(self.mat.massFrac["U235"], newWtFrac)
self.assertEqual(self.mat.density(Tk=densityTemp), expectedDensity * newTDFrac)
self.assertAlmostEqual(self.mat.pseudoDensity(Tk=densityTemp), 9.415418593432646)
def test_thermalConductivity(self):
cur = self.mat.thermalConductivity(Tc=100)
ref = 28.489312629207500293659904855
self.assertAlmostEqual(cur, ref, delta=10e-10)
cur = self.mat.thermalConductivity(Tc=300)
ref = 32.789271449207497255429188954
self.assertAlmostEqual(cur, ref, delta=10e-10)
cur = self.mat.thermalConductivity(Tc=500)
ref = 37.561790269207499193271360127
self.assertAlmostEqual(cur, ref, delta=10e-10)
cur = self.mat.thermalConductivity(Tc=700)
ref = 42.806869089207502554472739575
self.assertAlmostEqual(cur, ref, delta=10e-10)
cur = self.mat.thermalConductivity(Tc=900)
ref = 48.524507909207507339033327298
self.assertAlmostEqual(cur, ref, delta=10e-10)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
# ensure that material properties check the bounds and that the bounds
# align with what is expected
for propName, methodName in zip(
[
"thermal conductivity",
"heat capacity",
"density",
"linear expansion",
"linear expansion percent",
],
[
"thermalConductivity",
"heatCapacity",
"density",
"linearExpansion",
"linearExpansionPercent",
],
):
lowerBound = self.mat.propertyValidTemperature[propName][0][0]
upperBound = self.mat.propertyValidTemperature[propName][0][1]
with self.assertRaises(ValueError):
getattr(self.mat, methodName)(lowerBound - 1)
with self.assertRaises(ValueError):
getattr(self.mat, methodName)(upperBound + 1)
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(Tc=500)
ref = 18.74504534852846
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.pseudoDensity(Tc=1000)
ref = 18.1280492780791
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
class UraniumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.UraniumOxide
def test_adjustMassEnrichment(self):
o16 = 15.999304875697801
u235 = 235.043929425
u238 = 238.050788298
self.mat.adjustMassEnrichment(0.02)
gPerMol = 2 * o16 + 0.02 * u235 + 0.98 * u238
massFracs = self.mat.massFrac
testing.assert_allclose(massFracs["O"], 2 * o16 / gPerMol, rtol=5e-4)
testing.assert_allclose(massFracs["U235"], 0.02 * (u235 * 0.02 + u238 * 0.98) / gPerMol, rtol=5e-4)
testing.assert_allclose(massFracs["U238"], 0.98 * (u235 * 0.02 + u238 * 0.98) / gPerMol, rtol=5e-4)
self.mat.adjustMassEnrichment(0.2)
massFracs = self.mat.massFrac
gPerMol = 2 * o16 + 0.8 * u238 + 0.2 * u235
testing.assert_allclose(massFracs["O"], 2 * o16 / gPerMol, rtol=5e-4)
testing.assert_allclose(massFracs["U235"], 0.2 * (u235 * 0.2 + u238 * 0.8) / gPerMol, rtol=5e-4)
testing.assert_allclose(massFracs["U238"], 0.8 * (u235 * 0.2 + u238 * 0.8) / gPerMol, rtol=5e-4)
def test_meltingPoint(self):
cur = self.mat.meltingPoint()
self.assertEqual(cur, 3123.0)
def test_density(self):
# Reference data taken from ORNL/TM-2000/351. "Thermophysical Properties of MOX and UO2
# Fuels Including the Effects of Irradiation.", Popov, et al. Table 3.2 "Parameters of
# thermal expansion of stoichiometric MOX fuel and density of UO2 as a function of
# temperature"
cur = self.mat.density(Tk=700)
ref = 1.0832e4 * 0.001 # Convert to grams/cc
delta = ref * 0.02
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.density(Tk=2600)
ref = 9.9698e3 * 0.001 # Convert to grams/cc
delta = ref * 0.02
self.assertAlmostEqual(cur, ref, delta=delta)
def test_thermalConductivity(self):
cur = self.mat.thermalConductivity(600)
ref = 4.864
accuracy = 3
self.assertAlmostEqual(cur, ref, accuracy)
cur = self.mat.thermalConductivity(1800)
ref = 2.294
accuracy = 3
self.assertAlmostEqual(cur, ref, accuracy)
cur = self.mat.thermalConductivity(2700)
ref = 1.847
accuracy = 3
self.assertAlmostEqual(cur, ref, accuracy)
def test_linearExpansion(self):
cur = self.mat.linearExpansion(300)
ref = 9.93e-6
accuracy = 2
self.assertAlmostEqual(cur, ref, accuracy)
cur = self.mat.linearExpansion(1500)
ref = 1.0639e-5
accuracy = 2
self.assertAlmostEqual(cur, ref, accuracy)
cur = self.mat.linearExpansion(3000)
ref = 1.5821e-5
accuracy = 2
self.assertAlmostEqual(cur, ref, accuracy)
def test_linearExpansionPercent(self):
cur = self.mat.linearExpansionPercent(Tk=500)
ref = 0.222826
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
cur = self.mat.linearExpansionPercent(Tk=950)
ref = 0.677347
self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))
def test_heatCapacity(self):
"""Check against Figure 4.2 from ORNL 2000-1723 EFG."""
self.assertAlmostEqual(self.mat.heatCapacity(300), 230.0, delta=20)
self.assertAlmostEqual(self.mat.heatCapacity(1000), 320.0, delta=20)
self.assertAlmostEqual(self.mat.heatCapacity(2000), 380.0, delta=20)
def test_getTemperatureAtDensity(self):
expectedTemperature = 100.0
tAtTargetDensity = self.mat.getTemperatureAtDensity(self.mat.density(Tc=expectedTemperature), 30.0)
self.assertAlmostEqual(expectedTemperature, tAtTargetDensity)
def test_getDensityExpansion3D(self):
expectedTemperature = 100.0
ref_density = 10.86792660463439e3
test_density = self.mat.densityKgM3(Tc=expectedTemperature)
error = math.fabs((ref_density - test_density) / ref_density)
self.assertLess(error, 0.005)
def test_removeNucMassFrac(self):
self.mat.removeNucMassFrac("O")
massFracs = [str(k) for k in self.mat.massFrac.keys()]
self.assertListEqual(["U235", "U238"], massFracs)
def test_densityTimesHeatCapactiy(self):
Tc = 500.0
expectedRhoCp = self.mat.density(Tc=Tc) * 1000.0 * self.mat.heatCapacity(Tc=Tc)
self.assertAlmostEqual(expectedRhoCp, self.mat.densityTimesHeatCapacity(Tc=Tc))
def test_getTempChangeForDensityChange(self):
Tc = 500.0
linearExpansion = self.mat.linearExpansion(Tc=Tc)
densityFrac = 1.001
linearChange = densityFrac ** (-1.0 / 3.0) - 1.0
expectedDeltaT = linearChange / linearExpansion
actualDeltaT = self.mat.getTempChangeForDensityChange(Tc, densityFrac, quiet=False)
self.assertAlmostEqual(expectedDeltaT, actualDeltaT)
def test_duplicate(self):
"""Test the material duplication.
.. test:: Materials shall calc mass fracs at init.
:id: T_ARMI_MAT_FRACS4
:tests: R_ARMI_MAT_FRACS
"""
duplicateU = self.mat.duplicate()
for key in self.mat.massFrac:
self.assertEqual(duplicateU.massFrac[key], self.mat.massFrac[key])
duplicateMassFrac = deepcopy(self.mat.massFrac)
for key in self.mat.massFrac.keys():
self.assertEqual(duplicateMassFrac[key], self.mat.massFrac[key])
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
def test_applyInputParams(self):
UO2_TD = materials.UraniumOxide()
original = UO2_TD.density(500)
UO2_TD.applyInputParams(TD_frac=0.1)
new = UO2_TD.density(500)
ratio = new / original
self.assertAlmostEqual(ratio, 0.1)
UO2_TD = materials.UraniumOxide()
original = UO2_TD.pseudoDensity(500)
UO2_TD.applyInputParams(TD_frac=0.1)
new = UO2_TD.pseudoDensity(500)
ratio = new / original
self.assertAlmostEqual(ratio, 0.1)
class Thorium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Thorium
def test_setDefaultMassFracs(self):
"""
Test default mass fractions.
.. test:: The materials generate nuclide mass fractions.
:id: T_ARMI_MAT_FRACS0
:tests: R_ARMI_MAT_FRACS
"""
self.mat.setDefaultMassFracs()
cur = self.mat.massFrac
ref = {"TH232": 1.0}
self.assertEqual(cur, ref)
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(30)
ref = 11.68
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_linearExpansion(self):
cur = self.mat.linearExpansion(400)
ref = 11.9e-6
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_thermalConductivity(self):
cur = self.mat.thermalConductivity(400)
ref = 43.1
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_meltingPoint(self):
cur = self.mat.meltingPoint()
ref = 2025.0
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class ThoriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.ThoriumOxide
def test_density(self):
cur = self.mat.density(Tc=25)
ref = 10.00
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
# make sure that material modifications are correctly applied
self.mat.applyInputParams(TD_frac=0.1)
cur = self.mat.density(Tc=25)
self.assertAlmostEqual(cur, ref * 0.1, accuracy)
def test_linearExpansion(self):
cur = self.mat.linearExpansion(400)
ref = 9.67e-6
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_thermalConductivity(self):
cur = self.mat.thermalConductivity(400)
ref = 6.20
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_meltingPoint(self):
cur = self.mat.meltingPoint()
ref = 3643.0
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Void_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Void
def test_pseudoDensity(self):
"""This material has a no pseudo-density."""
self.mat.setDefaultMassFracs()
cur = self.mat.pseudoDensity()
self.assertEqual(cur, 0.0)
def test_density(self):
"""This material has no density."""
self.assertEqual(self.mat.density(500), 0)
self.mat.setDefaultMassFracs()
cur = self.mat.density()
self.assertEqual(cur, 0.0)
def test_linearExpansion(self):
"""This material does not expand linearly."""
cur = self.mat.linearExpansion(400)
ref = 0.0
self.assertEqual(cur, ref)
def test_propertyValidTemperature(self):
"""This material has no valid temperatures."""
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class Mixture_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials._Mixture
def test_density(self):
"""This material has no density function."""
self.assertEqual(self.mat.density(500), 0)
def test_setDefaultMassFracs(self):
"""
Test default mass fractions.
.. test:: The materials generate nuclide mass fractions.
:id: T_ARMI_MAT_FRACS1
:tests: R_ARMI_MAT_FRACS
"""
self.mat.setDefaultMassFracs()
cur = self.mat.pseudoDensity(500)
self.assertEqual(cur, 0.0)
def test_linearExpansion(self):
with self.assertRaises(NotImplementedError):
_cur = self.mat.linearExpansion(400)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class Lead_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Lead
VALID_TEMP_K = 600
def test_volumetricExpansion(self):
self.assertAlmostEqual(
self.mat.volumetricExpansion(800),
1.1472e-4,
4,
msg="\n\nIncorrect Lead volumetricExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
self.mat.volumetricExpansion(800), 1.1472e-4
),
)
self.assertAlmostEqual(
self.mat.volumetricExpansion(1200),
1.20237e-4,
4,
msg="\n\nIncorrect Lead volumetricExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
self.mat.volumetricExpansion(1200), 1.20237e-4
),
)
def test_linearExpansion(self):
"""Unit tests for lead materials linear expansion.
.. test:: Fluid materials do not linearly expand, at any temperature.
:id: T_ARMI_MAT_FLUID2
:tests: R_ARMI_MAT_FLUID
"""
for t in range(300, 901, 25):
cur = self.mat.linearExpansion(t)
self.assertEqual(cur, 0)
def test_setDefaultMassFracs(self):
"""
Test default mass fractions.
.. test:: The materials generate nuclide mass fractions.
:id: T_ARMI_MAT_FRACS2
:tests: R_ARMI_MAT_FRACS
"""
self.mat.setDefaultMassFracs()
cur = self.mat.massFrac
ref = {"PB": 1}
self.assertEqual(cur, ref)
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(634.39)
ref = 10.6120
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(1673.25)
ref = 9.4231
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
def test_heatCapacity(self):
cur = self.mat.heatCapacity(1200)
ref = 138.647
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class LeadBismuth_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.LeadBismuth
def test_setDefaultMassFracs(self):
"""
Test default mass fractions.
.. test:: The materials generate nuclide mass fractions.
:id: T_ARMI_MAT_FRACS3
:tests: R_ARMI_MAT_FRACS
"""
self.mat.setDefaultMassFracs()
cur = self.mat.massFrac
ref = {"BI209": 0.555, "PB": 0.445}
self.assertEqual(cur, ref)
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(404.77)
ref = 10.5617
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.pseudoDensity(1274.20)
ref = 9.3627
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
def test_volumetricExpansion(self):
cur = self.mat.volumetricExpansion(400)
ref = 1.2526e-4
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
cur = self.mat.volumetricExpansion(800)
ref = 1.3187e-4
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_heatCapacity(self):
cur = self.mat.heatCapacity(400)
ref = 149.2592
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.heatCapacity(800)
ref = 141.7968
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
def test_getTempChangeForDensityChange(self):
Tc = 800.0
densityFrac = 1.001
currentDensity = self.mat.pseudoDensity(Tc=Tc)
perturbedDensity = currentDensity * densityFrac
tAtPerturbedDensity = self.mat.getTemperatureAtDensity(perturbedDensity, Tc)
expectedDeltaT = tAtPerturbedDensity - Tc
actualDeltaT = self.mat.getTempChangeForDensityChange(Tc, densityFrac, quiet=False)
self.assertAlmostEqual(expectedDeltaT, actualDeltaT)
def test_dynamicVisc(self):
ref = self.mat.dynamicVisc(Tc=150)
cur = 0.0029355
self.assertAlmostEqual(ref, cur, delta=ref * 0.001)
ref = self.mat.dynamicVisc(Tc=200)
cur = 0.0024316
self.assertAlmostEqual(ref, cur, delta=ref * 0.001)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Copper_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Cu
def test_setDefaultMassFracs(self):
cur = self.mat.massFrac
ref = {"CU63": 0.6915, "CU65": 0.3085}
self.assertEqual(cur, ref)
def test_densityNeverChanges(self):
for tk in [200.0, 400.0, 800.0, 1111.1]:
cur = self.mat.density(tk)
self.assertAlmostEqual(cur, 8.913, 4)
def test_linearExpansionPercent(self):
temps = [100.0, 200.0, 600.0]
expansions = [-0.2955, -0.1500, 0.5326]
for i, temp in enumerate(temps):
cur = self.mat.linearExpansionPercent(Tk=temp)
self.assertAlmostEqual(cur, expansions[i], 4)
def test_getChildren(self):
self.assertEqual(len(self.mat.getChildren()), 0)
def test_getChildrenWithFlags(self):
self.assertEqual(len(self.mat.getChildrenWithFlags("anything")), 0)
class Sulfur_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Sulfur
VALID_TEMP_K = 400
def test_setDefaultMassFracs(self):
cur = self.mat.massFrac
ref = {"S34": 0.0429, "S36": 0.002, "S33": 0.0076, "S32": 0.9493}
self.assertEqual(cur, ref)
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(400)
ref = 1.7956
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_volumetricExpansion(self):
cur = self.mat.volumetricExpansion(334)
ref = 5.28e-4
accuracy = 4
self.assertAlmostEqual(cur, ref, accuracy)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Zr_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Zr
def test_thermalConductivity(self):
cur = self.mat.thermalConductivity(372.7273)
ref = 19.8718698709447
self.assertAlmostEqual(cur, ref)
cur = self.mat.thermalConductivity(1172.727)
ref = 23.193177102455
self.assertAlmostEqual(cur, ref)
def test_linearExpansion(self):
cur = self.mat.linearExpansion(400)
ref = 5.9e-6
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.linearExpansion(800)
ref = 7.9e-6
delta = ref * 0.05
self.assertAlmostEqual(cur, ref, delta=delta)
def test_linearExpansionPercent(self):
testTemperaturesInK = [
293,
400,
500,
600,
700,
800,
900,
1000,
1100,
1137,
1200,
1400,
1600,
1800,
]
expectedLinearExpansionValues = [
0.0007078312624,
0.0602048,
0.123025,
0.1917312,
0.2652626,
0.3425584,
0.4225578,
0.5042,
0.5864242,
0.481608769233,
0.5390352,
0.7249496,
0.9221264,
1.1380488,
]
for i, temp in enumerate(testTemperaturesInK):
Tk = temp
Tc = temp - units.C_TO_K
self.assertAlmostEqual(self.mat.linearExpansionPercent(Tc=Tc), expectedLinearExpansionValues[i])
self.assertAlmostEqual(self.mat.linearExpansionPercent(Tk=Tk), expectedLinearExpansionValues[i])
def test_pseudoDensity(self):
testTemperaturesInK = [
293,
298.15,
400,
500,
600,
700,
800,
900,
1000,
1100,
1137,
1200,
1400,
1600,
1800,
]
expectedDensityValues = [
6.56990469455,
6.56955491852,
6.56209393299,
6.55386200572,
6.54487650252,
6.53528040809,
6.52521578203,
6.51482358662,
6.50424356114,
6.49361414192,
6.50716858169,
6.49973710507,
6.47576529821,
6.45048593916,
6.4229727005,
]
for i, temp in enumerate(testTemperaturesInK):
Tk = temp
Tc = temp - units.C_TO_K
self.assertAlmostEqual(self.mat.pseudoDensity(Tc=Tc), expectedDensityValues[i])
self.assertAlmostEqual(self.mat.pseudoDensity(Tk=Tk), expectedDensityValues[i])
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Inconel_TestCase(AbstractMaterialTest, unittest.TestCase):
def setUp(self):
self.Inconel = materials.Inconel()
self.Inconel800 = materials.Inconel800()
self.InconelPE16 = materials.InconelPE16()
self.mat = self.Inconel
def tearDown(self):
self.Inconel = None
self.Inconel800 = None
self.InconelPE16 = None
def test_setDefaultMassFracs(self):
self.Inconel.setDefaultMassFracs()
self.Inconel800.setDefaultMassFracs()
self.InconelPE16.setDefaultMassFracs()
self.assertAlmostEqual(self.Inconel.getMassFrac("MO"), 0.09)
self.assertAlmostEqual(self.Inconel800.getMassFrac("AL"), 0.00375)
self.assertAlmostEqual(self.InconelPE16.getMassFrac("CR"), 0.165)
def test_pseudoDensity(self):
self.assertEqual(self.Inconel.pseudoDensity(Tc=25), 8.3600)
self.assertEqual(self.Inconel800.pseudoDensity(Tc=21.0), 7.94)
self.assertEqual(self.InconelPE16.pseudoDensity(Tc=25), 8.00)
def test_Iconel800_linearExpansion(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
0.11469329415,
0.27968864560,
0.454195022850,
0.63037690440,
0.80645936875,
0.98672809440,
1.18152935985,
1.4072700436,
]
for Tc, val in zip(TcList, refList):
cur = self.Inconel800.linearExpansionPercent(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Inconel 800 linearExpansionPercent()\nReceived:{}\nExpected:{}\n".format(cur, ref)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.Inconel.propertyValidTemperature), 0)
self.assertGreater(len(self.Inconel800.propertyValidTemperature), 0)
self.assertEqual(len(self.InconelPE16.propertyValidTemperature), 0)
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class Inconel600_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Inconel600
def test_00_setDefaultMassFracs(self):
massFracNameList = ["NI", "CR", "FE", "C", "MN55", "S", "SI", "CU"]
massFracRefValList = [
0.7541,
0.1550,
0.0800,
0.0008,
0.0050,
0.0001,
0.0025,
0.0025,
]
for name, frac in zip(massFracNameList, massFracRefValList):
cur = self.mat.getMassFrac(name)
ref = frac
self.assertAlmostEqual(cur, ref)
def test_01_linearExpansionPercent(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
0.105392,
0.24685800000000002,
0.39576799999999995,
0.552122,
0.7159199999999999,
0.8871619999999999,
1.065848,
1.251978,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
errorMsg = (
"\n\nIncorrect Inconel 600 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_02_linearExpansion(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
1.3774400000000001e-05,
1.45188e-05,
1.52632e-05,
1.60076e-05,
1.6752e-05,
1.74964e-05,
1.82408e-05,
1.8985200000000002e-05,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansion(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Inconel 600 linearExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_03_pseudoDensity(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
8.452174779681522,
8.428336592376965,
8.40335281361706,
8.377239465159116,
8.35001319823814,
8.321691270531865,
8.292291522488402,
8.261832353071625,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.pseudoDensity(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Inconel 600 pseudoDensity(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_heatCapacity(self):
ref = self.mat.heatCapacity(Tc=100)
cur = 461.947021
self.assertAlmostEqual(ref, cur, delta=cur * 0.001)
ref = self.mat.heatCapacity(Tc=200)
cur = 482.742084
self.assertAlmostEqual(ref, cur, delta=cur * 0.001)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Inconel625_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Inconel625
def test_00_setDefaultMassFracs(self):
massFracNameList = [
"NI",
"CR",
"FE",
"MO",
"TA181",
"C",
"MN55",
"SI",
"P31",
"S",
"AL27",
"TI",
"CO59",
]
massFracRefValList = [
0.6188,
0.2150,
0.0250,
0.0900,
0.0365,
0.0005,
0.0025,
0.0025,
0.0001,
0.0001,
0.0020,
0.0020,
0.0050,
]
for name, frac in zip(massFracNameList, massFracRefValList):
cur = self.mat.getMassFrac(name)
ref = frac
self.assertAlmostEqual(cur, ref)
def test_01_linearExpansionPercent(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
0.09954299999999999,
0.22729199999999997,
0.36520699999999995,
0.513288,
0.671535,
0.8399479999999999,
1.018527,
1.207272,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
errorMsg = (
"\n\nIncorrect Inconel 625 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_02_linearExpansion(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
1.22666e-05,
1.32832e-05,
1.4299800000000002e-05,
1.53164e-05,
1.6333e-05,
1.73496e-05,
1.83662e-05,
1.93828e-05,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansion(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Inconel 625 linearExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_03_pseudoDensity(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
8.423222197446128,
8.401763522409897,
8.378689129846913,
8.354019541533887,
8.327776582263244,
8.299983337593213,
8.270664109510587,
8.239844370152333,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.pseudoDensity(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Inconel 625 pseudoDensity(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_heatCapacity(self):
ref = self.mat.heatCapacity(Tc=300)
cur = 478.776007
self.assertAlmostEqual(ref, cur, delta=cur * 0.001)
ref = self.mat.heatCapacity(Tc=400)
cur = 503.399568
self.assertAlmostEqual(ref, cur, delta=cur * 0.001)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class InconelX750_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.InconelX750
def test_00_setDefaultMassFracs(self):
massFracNameList = [
"NI",
"CR",
"FE",
"TI",
"AL27",
"NB93",
"MN55",
"SI",
"S",
"CU",
"C",
"CO59",
]
massFracRefValList = [
0.7180,
0.1550,
0.0700,
0.0250,
0.0070,
0.0095,
0.0050,
0.0025,
0.0001,
0.0025,
0.0004,
0.0050,
]
for name, frac in zip(massFracNameList, massFracRefValList):
cur = self.mat.getMassFrac(name)
ref = frac
self.assertAlmostEqual(cur, ref)
def test_01_linearExpansionPercent(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
0.09927680000000001,
0.2253902,
0.36517920000000004,
0.5186438000000001,
0.6857840000000001,
0.8665998000000001,
1.0610912000000001,
1.2692582000000001,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
errorMsg = (
"\n\nIncorrect Inconel X750 linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_02_linearExpansion(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
1.1927560000000001e-05,
1.329512e-05,
1.466268e-05,
1.603024e-05,
1.73978e-05,
1.876536e-05,
2.013292e-05,
2.150048e-05,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansion(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Inconel X750 linearExpansion(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_03_pseudoDensity(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
8.263584211566972,
8.242801193765645,
8.219855974833411,
8.194776170511199,
8.167591802868142,
8.138335221416156,
8.107041018806447,
8.073745941486463,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.pseudoDensity(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Inconel X750 pseudoDensity(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_heatCapacity(self):
ref = self.mat.heatCapacity(Tc=100)
cur = 459.61381
self.assertAlmostEqual(ref, cur, delta=cur * 0.001)
ref = self.mat.heatCapacity(Tc=200)
cur = 484.93968
self.assertAlmostEqual(ref, cur, delta=cur * 0.001)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class Alloy200_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Alloy200
def test_nickleContent(self):
"""Assert alloy 200 has more than 99% nickel per its spec."""
self.assertGreater(self.mat.massFrac["NI"], 0.99)
def test_linearExpansion(self):
ref = self.mat.linearExpansion(Tc=100)
cur = 13.3e-6
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
def test_linearExpansionHotter(self):
ref = self.mat.linearExpansion(Tk=873.15)
cur = 15.6e-6
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class CaH2_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.CaH2
def test_pseudoDensity(self):
cur = 1.7
ref = self.mat.pseudoDensity(Tc=100)
self.assertAlmostEqual(cur, ref, ref * 0.01)
ref = self.mat.pseudoDensity(Tc=300)
self.assertAlmostEqual(cur, ref, ref * 0.01)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class Hafnium_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Hafnium
def test_pseudoDensity(self):
cur = 13.07
ref = self.mat.pseudoDensity(Tc=100)
self.assertAlmostEqual(cur, ref, ref * 0.01)
ref = self.mat.pseudoDensity(Tc=300)
self.assertAlmostEqual(cur, ref, ref * 0.01)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
class HastelloyN_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.HastelloyN
def test_thermalConductivity(self):
TcList = [200, 300, 400, 500, 600, 700]
refList = [
13.171442,
14.448584,
16.11144,
18.16001,
20.594294,
23.414292,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.thermalConductivity(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Hastelloy N thermalConductivity()\nReceived:{}\nExpected:{}\n".format(cur, ref)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_heatCapacity(self):
TcList = [100, 200, 300, 400, 500, 600, 700]
refList = [
419.183138,
438.728472,
459.630622,
464.218088,
480.092250,
556.547128,
573.450902,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.heatCapacity(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Hastelloy N heatCapacity()\nReceived:{}\nExpected:{}\n".format(cur, ref)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_linearExpansionPercent(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
0.0976529128,
0.2225103228,
0.351926722,
0.4874638024,
0.630683256,
0.7831467748,
0.9464160508,
1.122052776,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Hastelloy N linearExpansionPercent()\nReceived:{}\nExpected:{}\n".format(cur, ref)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_meanCoefficientThermalExpansion(self):
TcList = [100, 200, 300, 400, 500, 600, 700, 800]
refList = [
1.22066141e-05,
1.23616846e-05,
1.25688115e-05,
1.28279948e-05,
1.31392345e-05,
1.35025306e-05,
1.39178831e-05,
1.4385292e-05,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.meanCoefficientThermalExpansion(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect Hastelloy N meanCoefficientThermalExpansion()\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class TZM_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.TZM
def test_00_applyInputParams(self):
massFracNameList = ["C", "TI", "ZR", "MO"]
massFracRefValList = [2.50749e-05, 0.002502504, 0.000761199, 0.996711222]
self.mat.applyInputParams()
for name, frac in zip(massFracNameList, massFracRefValList):
cur = self.mat.massFrac[name]
ref = frac
self.assertEqual(cur, ref)
def test_01_pseudoDensity(self):
ref = 10.16 # g/cc
cur = self.mat.pseudoDensity(Tc=21.11)
self.assertEqual(cur, ref)
def test_02_linearExpansionPercent(self):
TcList = [
21.11,
456.11,
574.44,
702.22,
840.56,
846.11,
948.89,
1023.89,
1146.11,
1287.78,
1382.22,
]
refList = [
0.0,
1.60e-01,
2.03e-01,
2.53e-01,
3.03e-01,
3.03e-01,
3.42e-01,
3.66e-01,
4.21e-01,
4.68e-01,
5.04e-01,
]
for Tc, val in zip(TcList, refList):
cur = self.mat.linearExpansionPercent(Tc=Tc)
ref = val
errorMsg = "\n\nIncorrect TZM linearExpansionPercent(Tk=None,Tc=None)\nReceived:{}\nExpected:{}\n".format(
cur, ref
)
self.assertAlmostEqual(cur, ref, delta=10e-3, msg=errorMsg)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class YttriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.Y2O3
def test_pseudoDensity(self):
cur = 5.03
ref = self.mat.pseudoDensity(Tc=25)
self.assertAlmostEqual(cur, ref, 2)
def test_linearExpansionPercent(self):
ref = self.mat.linearExpansionPercent(Tc=100)
cur = 0.069662
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
ref = self.mat.linearExpansionPercent(Tc=100)
cur = 0.0696622
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class ZincOxide_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = materials.ZnO
def test_density(self):
cur = 5.61
ref = self.mat.density(Tk=10.12)
self.assertAlmostEqual(cur, ref, 2)
def test_linearExpansionPercent(self):
ref = self.mat.linearExpansionPercent(Tc=100)
cur = 0.04899694350661124
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
ref = self.mat.linearExpansionPercent(Tc=300)
cur = 0.15825020246870625
self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
class FuelMaterial_TestCase(unittest.TestCase):
baseInput = r"""
nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
custom isotopics:
customIsotopic1:
input format: mass fractions
density: 1
U: 1
customIsotopic2:
input format: mass fractions
density: 1
ZR: 1
blocks:
fuel: &block_fuel
fuel1: &component_fuel_fuel1
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
fuel2: &component_fuel_fuel2
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel]
height: [1.0]
axial mesh points: [1]
xs types: [A]
"""
def loadAssembly(self, materialModifications):
yamlString = self.baseInput + "\n" + materialModifications
design = blueprints.Blueprints.load(yamlString)
design._prepConstruction(settings.Settings())
return design.assemblies["fuel a"]
def test_class1Class2_class1_wt_frac(self):
# should error because class1_wt_frac not in (0,1)
with self.assertRaises(ValueError):
_a = self.loadAssembly(
"""
material modifications:
class1_wt_frac: [2.0]
class1_custom_isotopics: [customIsotopic1]
class2_custom_isotopics: [customIsotopic2]
"""
)
def test_class1Class2_classX_custom_isotopics(self):
# should error because class1_custom_isotopics doesn't exist
with self.assertRaises(KeyError):
_a = self.loadAssembly(
"""
material modifications:
class1_wt_frac: [0.5]
class1_custom_isotopics: [fakeIsotopic]
class2_custom_isotopics: [customIsotopic2]
"""
)
# should error because class2_custom_isotopics doesn't exist
with self.assertRaises(KeyError):
_a = self.loadAssembly(
"""
material modifications:
class1_wt_frac: [0.5]
class1_custom_isotopics: [customIsotopic1]
class2_custom_isotopics: [fakeIsotopic]
"""
)
================================================
FILE: armi/materials/tests/test_sic.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for SiC."""
import unittest
from armi.materials.siC import SiC
from armi.materials.tests import test_materials
class TestSiC(test_materials.AbstractMaterialTest, unittest.TestCase):
"""SiC tests."""
MAT_CLASS = SiC
def test_pseudoDensity(self):
cur = self.mat.pseudoDensity(Tc=25)
ref = 3.159
delta = ref * 0.001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_meltingPoint(self):
cur = self.mat.meltingPoint()
ref = 3003
delta = ref * 0.0001
self.assertAlmostEqual(cur, ref, delta=delta)
def test_heatCapacity(self):
delta = 0.0001
cur = self.mat.heatCapacity(300)
ref = 982.20789
self.assertAlmostEqual(cur, ref, delta=delta)
cur = self.mat.heatCapacity(1500)
ref = 1330.27867
self.assertAlmostEqual(cur, ref, delta=delta)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
================================================
FILE: armi/materials/tests/test_sulfur.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sulfur."""
import unittest
from armi.materials.sulfur import Sulfur
from armi.materials.tests.test_materials import AbstractMaterialTest
class Sulfur_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = Sulfur
VALID_TEMP_K = 400
def setUp(self):
AbstractMaterialTest.setUp(self)
self.mat = Sulfur()
self.Sulfur_sulfur_density_frac = Sulfur()
self.Sulfur_sulfur_density_frac.applyInputParams(sulfur_density_frac=0.5)
self.Sulfur_TD_frac = Sulfur()
self.Sulfur_TD_frac.applyInputParams(TD_frac=0.4)
self.Sulfur_both = Sulfur()
self.Sulfur_both.applyInputParams(sulfur_density_frac=0.5, TD_frac=0.4)
def test_sulfur_density_frac(self):
tk = 410
ref = self.mat.pseudoDensity(tk)
reduced = self.Sulfur_sulfur_density_frac.pseudoDensity(tk)
self.assertAlmostEqual(ref * 0.5, reduced)
reduced = self.Sulfur_TD_frac.pseudoDensity(tk)
self.assertAlmostEqual(ref * 0.4, reduced)
reduced = self.Sulfur_both.pseudoDensity(tk)
self.assertAlmostEqual(ref * 0.4, reduced)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
================================================
FILE: armi/materials/tests/test_thoriumOxide.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ThO2."""
import unittest
from armi.materials.tests.test_materials import AbstractMaterialTest
from armi.materials.thoriumOxide import ThoriumOxide
class ThoriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):
MAT_CLASS = ThoriumOxide
def setUp(self):
AbstractMaterialTest.setUp(self)
self.mat = ThoriumOxide()
self.ThoriumOxide_TD_frac = ThoriumOxide()
self.ThoriumOxide_TD_frac.applyInputParams(TD_frac=0.4)
def test_theoretical_pseudoDensity(self):
ref = self.mat.pseudoDensity(500)
reduced = self.ThoriumOxide_TD_frac.pseudoDensity(500)
self.assertAlmostEqual(ref * 0.4, reduced)
def test_linearExpansionPercent(self):
self.assertAlmostEqual(self.mat.linearExpansionPercent(Tk=500), 0.195334)
def test_propertyValidTemperature(self):
self.assertGreater(len(self.mat.propertyValidTemperature), 0)
================================================
FILE: armi/materials/tests/test_uZr.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for simplified UZr material."""
import pickle
from unittest import TestCase
from armi.materials.uZr import UZr
class UZR_TestCase(TestCase):
MAT_CLASS = UZr
def setUp(self):
self.mat = self.MAT_CLASS()
def test_isPicklable(self):
"""Test that materials are picklable so we can do MPI communication of state.
.. test:: Test the material base class has temp-dependent thermal conductivity curves.
:id: T_ARMI_MAT_PROPERTIES0
:tests: R_ARMI_MAT_PROPERTIES
"""
stream = pickle.dumps(self.mat)
mat = pickle.loads(stream)
# check a property that is sometimes interpolated.
self.assertEqual(self.mat.thermalConductivity(500), mat.thermalConductivity(500))
def test_TD(self):
"""Test the material theoretical density."""
self.assertEqual(self.mat.getTD(), self.mat.theoreticalDensityFrac)
self.mat.clearCache()
self.mat._setCache("dummy", 666)
self.assertEqual(self.mat.cached, {"dummy": 666})
self.mat.adjustTD(0.5)
self.assertEqual(0.5, self.mat.theoreticalDensityFrac)
self.assertEqual(self.mat.cached, {})
def test_duplicate(self):
"""Test the material duplication.
.. test:: Materials shall calc mass fracs at init.
:id: T_ARMI_MAT_FRACS5
:tests: R_ARMI_MAT_FRACS
"""
mat = self.mat.duplicate()
self.assertEqual(len(mat.massFrac), len(self.mat.massFrac))
for key in self.mat.massFrac:
self.assertEqual(mat.massFrac[key], self.mat.massFrac[key])
self.assertEqual(mat.parent, self.mat.parent)
self.assertEqual(mat.refDens, self.mat.refDens)
self.assertEqual(mat.theoreticalDensityFrac, self.mat.theoreticalDensityFrac)
def test_cache(self):
"""Test the material cache."""
self.mat.clearCache()
self.assertEqual(len(self.mat.cached), 0)
self.mat._setCache("Emmy", "Noether")
self.assertEqual(len(self.mat.cached), 1)
val = self.mat._getCached("Emmy")
self.assertEqual(val, "Noether")
def test_densityKgM3(self):
"""Test the density for kg/m^3.
.. test:: Test the material base class has temp-dependent density.
:id: T_ARMI_MAT_PROPERTIES2
:tests: R_ARMI_MAT_PROPERTIES
"""
dens = self.mat.density(500)
densKgM3 = self.mat.densityKgM3(500)
self.assertEqual(dens * 1000.0, densKgM3)
def test_pseudoDensityKgM3(self):
"""Test the pseudo density for kg/m^3.
.. test:: Test the material base class has temp-dependent 2D density.
:id: T_ARMI_MAT_PROPERTIES3
:tests: R_ARMI_MAT_PROPERTIES
"""
dens = self.mat.pseudoDensity(500)
densKgM3 = self.mat.pseudoDensityKgM3(500)
self.assertEqual(dens * 1000.0, densKgM3)
def test_density(self):
"""Test that all materials produce a zero density from density.
.. test:: Test the material base class has temp-dependent density.
:id: T_ARMI_MAT_PROPERTIES1
:tests: R_ARMI_MAT_PROPERTIES
"""
self.assertNotEqual(self.mat.density(500), 0)
cur = self.mat.density(400)
ref = 15.94
delta = ref * 0.01
self.assertAlmostEqual(cur, ref, delta=delta)
def test_propertyValidTemperature(self):
self.assertEqual(len(self.mat.propertyValidTemperature), 0)
================================================
FILE: armi/materials/tests/test_water.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for water materials."""
import unittest
from armi.materials.water import SaturatedSteam, SaturatedWater, Water
class TestWater(unittest.TestCase):
"""Unit tests for water materials."""
def test_waterAtFreezing(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 0C.
http://www.iapws.org/relguide/supsat.pdf
.. test:: There is a base class for fluid materials.
:id: T_ARMI_MAT_FLUID0
:tests: R_ARMI_MAT_FLUID
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 273.16
ref_vapor_pressure = 611.657
ref_dp_dT = 44.436693
ref_saturated_water_rho = 999.789
ref_saturated_steam_rho = 0.00485426
ref_alpha = -11.529101
ref_saturated_water_enthalpy = 0.611786
ref_saturated_steam_enthalpy = 2500.5e3
ref_phi = -0.04
ref_saturated_water_entropy = 0
ref_saturated_steam_entropy = 9.154e3
self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3)
self.assertAlmostEqual(ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2)
self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2)
self.assertAlmostEqual(ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2)
self.assertAlmostEqual(ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2)
self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_waterAtBoiling(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 100C.
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 373.1243
ref_vapor_pressure = 0.101325e6
ref_dp_dT = 3.616e3
ref_saturated_water_rho = 958.365
ref_saturated_steam_rho = 0.597586
ref_alpha = 417.65e3
ref_saturated_water_enthalpy = 417.05e3
ref_saturated_steam_enthalpy = 2675.7e3
ref_phi = 1.303e3
ref_saturated_water_entropy = 1.307e3
ref_saturated_steam_entropy = 7.355e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2)
self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2)
self.assertAlmostEqual(ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_waterAtCritcalPoint(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 647.096K.
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 647.096
ref_vapor_pressure = 22.064e6
ref_dp_dT = 268e3
ref_saturated_water_rho = 322
ref_saturated_steam_rho = 322
ref_alpha = 1548e3
ref_saturated_water_enthalpy = 2086.6e3
ref_saturated_steam_enthalpy = 2086.6e3
ref_phi = 3.578e3
ref_saturated_water_entropy = 4.410e3
ref_saturated_steam_entropy = 4.410e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2)
self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2)
self.assertAlmostEqual(ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_massFrac(self):
for water in [SaturatedWater(), SaturatedSteam()]:
massFracO = water.getMassFrac("O")
massFracH = water.getMassFrac("H")
self.assertAlmostEqual(massFracO, 0.888, places=3)
self.assertAlmostEqual(massFracO + massFracH, 1.0)
def test_propertyValidTemperature(self):
water = SaturatedWater()
self.assertEqual(len(water.propertyValidTemperature), 0)
steam = SaturatedSteam()
self.assertEqual(len(steam.propertyValidTemperature), 0)
def test_validateNames(self):
water = Water()
self.assertEqual(water.name, "Water")
sat = SaturatedWater()
self.assertEqual(sat.name, "SaturatedWater")
steam = SaturatedSteam()
self.assertEqual(steam.name, "SaturatedSteam")
================================================
FILE: armi/materials/thU.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thorium Uranium metal.
Data is from [IAEA-TECDOC-1450]_.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi import runLog
from armi.materials.material import FuelMaterial
from armi.utils.units import getTk
class ThU(FuelMaterial):
enrichedNuclide = "U233"
propertyValidTemperature = {"linear expansion": ((30, 600), "K")}
def __init__(self):
FuelMaterial.__init__(self)
# density in g/cc from IAEA TE 1450
self.refDens = 11.68
def getEnrichment(self):
return self.getMassFrac("U233") / (self.getMassFrac("U233") + self.getMassFrac("TH232"))
def applyInputParams(self, U233_wt_frac=None, *args, **kwargs):
runLog.warning(
"Material {} has not yet been tested for accuracy".format("ThU"),
single=True,
label="ThU applyInputParams",
)
if U233_wt_frac is not None:
self.adjustMassEnrichment(U233_wt_frac)
FuelMaterial.applyInputParams(self, *args, **kwargs)
def setDefaultMassFracs(self):
self.setMassFrac("TH232", 1.0)
self.setMassFrac("U233", 0.0)
def linearExpansion(self, Tk=None, Tc=None):
"""Linear expansion in m/m/K from IAEA TE 1450."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tk)
return 11.9e-6
def thermalConductivity(self, Tk=None, Tc=None):
"""Thermal conductivity in W/m-K from IAEA TE 1450."""
Tk = getTk(Tc, Tk)
return 43.1
def meltingPoint(self):
"""Melting point in K from IAEA TE 1450."""
return 2025.0
================================================
FILE: armi/materials/thorium.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thorium Metal.
Data is from [IAEA-TECDOC-1450]_.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import FuelMaterial
from armi.utils.units import getTk
class Thorium(FuelMaterial):
propertyValidTemperature = {"linear expansion": ((30, 600), "K")}
def __init__(self):
FuelMaterial.__init__(self)
self.refDens = 11.68
def setDefaultMassFracs(self):
self.setMassFrac("TH232", 1.0)
def linearExpansion(self, Tk=None, Tc=None):
r"""Linear Expansion in m/m/K from IAEA TECDOC 1450."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tk)
return 11.9e-6
def thermalConductivity(self, Tk=None, Tc=None):
r"""W/m-K from IAEA TE 1450."""
return 43.1
def meltingPoint(self):
"""Melting point in K from IAEA TE 1450."""
return 2025.0
================================================
FILE: armi/materials/thoriumOxide.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thorium Oxide solid ceramic.
Data is from [IAEA-TECDOC-1450]_.
.. [IAEA-TECDOC-1450] Thorium fuel cycle -- Potential benefits and challenges, IAEA-TECDOC-1450 (2005).
https://www-pub.iaea.org/mtcd/publications/pdf/te_1450_web.pdf
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi import runLog
from armi.materials.material import FuelMaterial, Material, SimpleSolid
from armi.utils.units import getTk
class ThoriumOxide(FuelMaterial, SimpleSolid):
propertyValidTemperature = {"linear expansion": ((298, 1223), "K")}
def __init__(self):
Material.__init__(self)
self.refDens = 10.00
def applyInputParams(self, TD_frac=None, *args, **kwargs):
if TD_frac is not None:
if TD_frac > 1.0:
runLog.warning(
f"Theoretical density frac for {self} is {TD_frac}, which is >1",
single=True,
label="Large theoretical density",
)
elif TD_frac == 0:
runLog.warning(
f"Theoretical density frac for {self} is zero!",
single=True,
label="Zero theoretical density",
)
elif TD_frac < 0:
runLog.error(
"TD_frac is entered as negative. This is not allowed!",
single=True,
label="Negative TD_frac",
)
self.adjustTD(TD_frac)
FuelMaterial.applyInputParams(self, *args, **kwargs)
def setDefaultMassFracs(self):
r"""ThO2 mass fractions. Using Pure Th-232. 100% 232.
Thorium: 232.030806 g/mol
Oxygen: 15.9994 g/mol
2 moles of oxygen/1 mole of Thorium
grams of Th-232 = 232.030806 g/mol* 1 mol = 232.030806 g
grams of Oxygen = 15.9994 g/mol* 2 mol = 31.9988 g
total=264.029606 g.
Mass fractions are computed from this.
"""
self.setMassFrac("TH232", 0.8788)
self.setMassFrac("O16", 0.1212)
def linearExpansion(self, Tk=None, Tc=None):
r"""Linear expansion in m/m/K from IAEA TE 1450."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tk)
return 9.67e-6
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Approximate the linear thermal expansion percent from the linear expansion
coefficient, taking 298K as the reference temperature.
"""
Tk = getTk(Tc=Tc, Tk=Tk)
linearExpansionCoef = self.linearExpansion(Tk=Tk)
return 100 * (linearExpansionCoef * (Tk - 298))
def thermalConductivity(self, Tk=None, Tc=None):
r"""Thermal conductivity in W/m-K from IAEA TE 1450."""
return 6.20
def meltingPoint(self):
r"""Melting point in K from IAEA TE 1450."""
return 3643.0
def density(self, Tk=None, Tc=None):
return Material.density(self, Tk, Tc) * self.getTD()
class ThO2(ThoriumOxide):
"""Another name for ThoriumOxide."""
pass
================================================
FILE: armi/materials/uZr.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simplified UZr alloy.
This is a notional U-10Zr material based on [Chandrabhanu]_.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials import material
from armi.utils import units
class UZr(material.FuelMaterial):
"""
Simplified UZr fuel alloy.
.. warning:: This is an academic-quality material.
Only the 10% Zr-frac properties are present.
If you use a Zr-frac other than 10%, these properties will be incorrect. Bring
in user-provided materials via plugins when necessary.
.. [Chandrabhanu] Chandrabhanu Basak, G.J. Prasad, H.S. Kamath, N. Prabhu,
An evaluation of the properties of As-cast U-rich UZr alloys,
Journal of Alloys and Compounds,
Volume 480, Issue 2,
2009,
Pages 857-862,
ISSN 0925-8388,
https://doi.org/10.1016/j.jallcom.2009.02.077.
"""
enrichedNuclide = "U235"
zrFracDefault = 0.10
uFracDefault = 1.0 - zrFracDefault
def __init__(self):
material.Material.__init__(self)
def setDefaultMassFracs(self):
"""U-Pu-Zr mass fractions."""
u235Enrichment = 0.1
self.setMassFrac("ZR", self.zrFracDefault)
self.setMassFrac("U235", u235Enrichment * self.uFracDefault)
self.setMassFrac("U238", (1.0 - u235Enrichment) * self.uFracDefault)
self._calculateReferenceDensity(self.zrFracDefault, self.uFracDefault)
def applyInputParams(self, U235_wt_frac=None, ZR_wt_frac=None, *args, **kwargs):
"""Apply user input."""
ZR_wt_frac = self.zrFracDefault if ZR_wt_frac is None else ZR_wt_frac
U235_wt_frac = 0.1 if U235_wt_frac is None else U235_wt_frac
uFrac = 1.0 - ZR_wt_frac
self.setMassFrac("ZR", ZR_wt_frac)
self.setMassFrac("U235", U235_wt_frac * uFrac)
self.setMassFrac("U238", (1.0 - U235_wt_frac) * uFrac)
self._calculateReferenceDensity(ZR_wt_frac, uFrac)
material.FuelMaterial.applyInputParams(self, *args, **kwargs)
def _calculateReferenceDensity(self, zrFrac, uFrac):
"""Calculates the reference mass density in g/cc of a U-Pu-Zr alloy at 293K with Vergard's law."""
# use Vergard's law to mix densities by weight fraction at 293K
u0 = 19.1
zr0 = 6.52
specificVolume = uFrac / u0 + zrFrac / zr0
self.refDens = 1.0 / specificVolume
def linearExpansionPercent(self, Tk=None, Tc=None):
"""Gets the linear expansion from eq. 3 in [Chandrabhanu]_ for U-10Zr."""
tk = units.getTk(Tc, Tk)
tk2 = tk * tk
tk3 = tk2 * tk
return -0.73 + 3.489e-3 * tk - 5.154e-6 * tk2 + 4.39e-9 * tk3
================================================
FILE: armi/materials/uranium.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Uranium metal.
Much info is from [AAAFuels]_.
.. [AAAFuels] Kim, Y S, and Hofman, G L. AAA fuels handbook.. United States: N. p., 2003. Web. doi:10.2172/822554.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from numpy import interp
from armi import runLog
from armi.materials.material import FuelMaterial
from armi.utils.units import getTk
class Uranium(FuelMaterial):
enrichedNuclide = "U235"
materialIntro = ""
propertyNotes = {"thermal conductivity": ""}
propertyRawData = {"thermal conductivity": ""}
propertyUnits = {"thermal conductivity": "W/m-K", "heat capacity": "J/kg-K"}
propertyEquation = {"thermal conductivity": "21.73 + 0.01591T + 5.907×10-6T2"}
_heatCapacityTableK = [
298,
300,
400,
500,
600,
700,
800,
900,
941.9,
942,
1000,
1048.9,
1049,
1100,
1200,
1300,
1400,
1407.9,
1408,
1500,
1600,
1700,
1800,
1900,
2000,
2100,
2200,
2400,
]
_heatCapacityTable = [
27.665,
27.700,
29.684,
31.997,
34.762,
38.021,
41.791,
46.081,
48.038,
42.928,
42.928,
42.928,
38.284,
38.284,
38.284,
38.284,
38.284,
38.284,
48.660,
48.660,
48.660,
48.660,
48.660,
48.660,
48.660,
48.660,
48.660,
48.660,
] # J/K/mol
_densityTableK = [
293,
400,
500,
600,
700,
800,
900,
940.9,
941,
1000,
1047.9,
1048,
1100,
1200,
1400,
1407.9,
1408,
1500,
1600,
]
_densityTable = [
19.07,
18.98,
18.89,
18.79,
18.68,
18.55,
18.41,
18.39,
18.16,
18.11,
18.07,
17.94,
17.88,
17.76,
17.53,
17.52,
16.95,
16.84,
16.71,
] # g/cc
_linearExpansionPercent = [
0.000,
0.157,
0.315,
0.494,
0.697,
0.924,
1.186,
1.300,
1.635,
1.737,
1.820,
2.050,
2.168,
2.398,
2.855,
2.866,
4.006,
4.232,
4.502,
] # %
_linearExpansionTable = [
13.9,
15.2,
16.9,
19.0,
21.4,
24.3,
27.7,
29.1,
17.3,
17.3,
17.3,
22.9,
22.9,
22.9,
22.9,
22.9,
25.5,
25.5,
25.5,
] # 1e6/K
propertyValidTemperature = {
"thermal conductivity": ((255.4, 1173.2), "K"),
"heat capacity": ((_heatCapacityTableK[0], _heatCapacityTableK[-1]), "K"),
"density": ((_densityTableK[0], _densityTableK[-1]), "K"),
"linear expansion": ((_densityTableK[0], _densityTableK[-1]), "K"),
"linear expansion percent": ((_densityTableK[0], _densityTableK[-1]), "K"),
}
references = {
"thermal conductivity": ["AAA Fuels Handbook by YS Kim and G.L. Hofman, ANL, Section 6.1.1"],
"heat capacity": ["AAA Fuels Handbook by YS Kim and GL Hofman, Table 2-14"],
"melting point": ["AAA Fuels Handbook by YS Kim and GL Hofman, Table 2-13"],
"density": ["Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1"],
"linear expansion": ["Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1"],
"linear expansion percent": ["Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1"],
}
def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float:
"""The thermal conductivity of pure U in W-m/K."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tk)
kU = 21.73 + (0.01591 * Tk) + (0.000005907 * Tk**2)
return kU
def heatCapacity(self, Tk: float = None, Tc: float = None) -> float:
"""Heat capacity in J/kg-K."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tk)
return interp(Tk, self._heatCapacityTableK, self._heatCapacityTable)
def setDefaultMassFracs(self) -> None:
nb = self.parent.nuclideBases if self.parent else None
if nb is None:
u235Weight = 235.043929425
u238Weight = 238.050788298
u235Abundance = 0.007204
else:
u235Weight = nb.byLabel["U235"].weight
u238Weight = nb.byLabel["U238"].weight
u235Abundance = nb.byLabel["U235"].abundance
u238Abundance = 1.0 - u235Abundance # neglect U234 and keep U235 at natural level
gramsIn1Mol = u235Abundance * u235Weight + u238Abundance * u238Weight
self.setMassFrac("U235", u235Weight * u235Abundance / gramsIn1Mol)
self.setMassFrac("U238", u238Weight * u238Abundance / gramsIn1Mol)
self.refDens = 19.07
def applyInputParams(self, U235_wt_frac: float = None, TD_frac: float = None, *args, **kwargs):
if U235_wt_frac is not None:
self.adjustMassEnrichment(U235_wt_frac)
td = TD_frac
if td is not None:
if td > 1.0:
runLog.warning(
f"Theoretical density frac for {self} is {td}, which is >1",
single=True,
label="Large theoretical density",
)
elif td == 0:
runLog.warning(
f"Theoretical density frac for {self} is zero!",
single=True,
label="Zero theoretical density",
)
self.adjustTD(td)
FuelMaterial.applyInputParams(self, *args, **kwargs)
def meltingPoint(self):
"""Melting point in K."""
return 1408
def density(self, Tk: float = None, Tc: float = None) -> float:
"""Density in g/cc."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("density", Tk)
return interp(Tk, self._densityTableK, self._densityTable) * self.getTD()
def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:
"""2D-expanded density in g/cc."""
return super().pseudoDensity(Tk=Tk, Tc=Tc) * self.getTD()
def linearExpansion(self, Tk: float = None, Tc: float = None) -> float:
"""Linear expansion coefficient in 1/K."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tk)
return interp(Tk, self._densityTableK, self._linearExpansionTable) / 1e6
def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
"""Linear expansion percent."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tk)
return interp(Tk, self._densityTableK, self._linearExpansionPercent)
================================================
FILE: armi/materials/uraniumOxide.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Uranium Oxide properties.
UO2 is a common ceramic nuclear fuel form. It's properties are well known. This mostly
uses data from [#ornltm2000]_.
.. [#ornltm2000] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. S.G. Popov,
et.al. Oak Ridge National Laboratory. ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
import collections
import math
from numpy import interp
from armi import runLog
from armi.materials import material
from armi.nucDirectory import thermalScattering as tsl
from armi.utils.units import getTk
HeatCapacityConstants = collections.namedtuple("HeatCapacityConstants", ["c1", "c2", "c3", "theta", "Ea"])
class UraniumOxide(material.FuelMaterial, material.SimpleSolid):
enrichedNuclide = "U235"
REFERENCE_TEMPERATURE = 27
# ORNL/TM-2000/351 section 4.3
heatCapacityConstants = HeatCapacityConstants(c1=302.27, c2=8.463e-3, c3=8.741e7, theta=548.68, Ea=18531.7)
__meltingPoint = 3123.0
propertyUnits = {"heat capacity": "J/mol-K"}
propertyValidTemperature = {
"density": ((293.15, 3100), "K"),
"heat capacity": ((298.15, 3120), "K"),
"linear expansion": ((273, 3120), "K"),
"linear expansion percent": ((273, __meltingPoint), "K"),
"thermal conductivity": ((300, 3000), "K"),
}
references = {
"thermal conductivity": "Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics "
+ "simulation. S. Motoyama. Physical Review B, Volume 60, Number 1, July 1999",
"linear expansion": "Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. "
+ "S.G. Popov, et.al. Oak Ridge National Laboratory. ORNL/TM-2000/351",
"heat capacity": "ORNL/TM-2000/351",
}
thermalScatteringLaws = (tsl.fromNameAndCompound("U", tsl.UO2), tsl.fromNameAndCompound("O", tsl.UO2))
# Thermal conductivity values taken from:
# Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics simulation. S. Motoyama.
# Physical Review B, Volume 60, Number 1, July 1999
thermalConductivityTableK = [
300,
600,
900,
1200,
1500,
1800,
2100,
2400,
2700,
3000,
]
thermalConductivityTable = [
7.991,
4.864,
3.640,
2.768,
2.567,
2.294,
2.073,
1.891,
1.847,
1.718,
]
def __init__(self):
material.FuelMaterial.__init__(self)
self.refDens = self.density(Tk=self.refTempK)
def applyInputParams(self, U235_wt_frac: float = None, TD_frac: float = None, *args, **kwargs) -> None:
if U235_wt_frac is not None:
self.adjustMassEnrichment(U235_wt_frac)
td = TD_frac
if td is not None:
if td > 1.0:
runLog.warning(
"Theoretical density frac for {0} is {1}, which is >1".format(self, td),
single=True,
label="Large theoretical density",
)
elif td == 0:
runLog.warning(
f"Theoretical density frac for {self} is zero!",
single=True,
label="Zero theoretical density",
)
self.adjustTD(td)
material.FuelMaterial.applyInputParams(self, *args, **kwargs)
def setDefaultMassFracs(self) -> None:
"""UO2 mass fractions. Using Natural Uranium without U234."""
nb = self.parent.nuclideBases if self.parent else None
if nb is None:
u235Weight = 235.043929425
u238Weight = 238.050788298
oxygenWeight = 15.999304875697801
u235Abundance = 0.007204
else:
u235Weight = nb.byName["U235"].weight
u238Weight = nb.byName["U238"].weight
oxygenWeight = nb.byName["O"].weight
u235Abundance = nb.byName["U235"].abundance
u238Abundance = 1.0 - u235Abundance # neglect U234 and keep U235 at natural level
gramsIn1Mol = 2 * oxygenWeight + u235Abundance * u235Weight + u238Abundance * u238Weight
self.setMassFrac("U235", u235Weight * u235Abundance / gramsIn1Mol)
self.setMassFrac("U238", u238Weight * u238Abundance / gramsIn1Mol)
self.setMassFrac("O", 2 * oxygenWeight / gramsIn1Mol)
def meltingPoint(self):
"""
Melting point in K.
From [#ornltm2000]_.
"""
return self.__meltingPoint
def density(self, Tk: float = None, Tc: float = None) -> float:
"""
Density in (g/cc).
Polynomial line fit to data from [#ornltm2000]_ on page 11.
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("density", Tk)
return (-1.01147e-7 * Tk**2 - 1.29933e-4 * Tk + 1.09805e1) * self.getTD()
def heatCapacity(self, Tk: float = None, Tc: float = None) -> float:
"""
Heat capacity in J/kg-K.
From Section 4.3 in [#ornltm2000]_
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("heat capacity", Tk)
hcc = self.heatCapacityConstants
# eq 4.2
specificHeatCapacity = (
hcc.c1 * (hcc.theta / Tk) ** 2 * math.exp(hcc.theta / Tk) / (math.exp(hcc.theta / Tk) - 1.0) ** 2
+ 2 * hcc.c2 * Tk
+ hcc.c3 * hcc.Ea * math.exp(-hcc.Ea / Tk) / Tk**2
)
return specificHeatCapacity
def linearExpansion(self, Tk: float = None, Tc: float = None) -> float:
"""
Linear expansion coefficient.
Curve fit from data in [#ornltm2000]_
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tk)
return 1.06817e-12 * Tk**2 - 1.37322e-9 * Tk + 1.02863e-5
def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:
"""
Return dL/L.
From Section 3.3 of [#ornltm2000]_
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tk)
if Tk >= 273.0 and Tk < 923.0:
return (-2.66e-03 + 9.802e-06 * Tk - 2.705e-10 * Tk**2 + 4.391e-13 * Tk**3) * 100.0
else:
return (-3.28e-03 + 1.179e-05 * Tk - 2.429e-09 * Tk**2 + 1.219e-12 * Tk**3) * 100.0
def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float:
"""
Thermal conductivity.
Ref: Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics
simulation. S. Motoyama. Physical Review B, Volume 60, Number 1, July 1999
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tk)
return interp(Tk, self.thermalConductivityTableK, self.thermalConductivityTable)
class UO2(UraniumOxide):
"""Another name for UraniumOxide."""
def __init__(self):
UraniumOxide.__init__(self)
self._name = "UraniumOxide"
================================================
FILE: armi/materials/void.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Void material.
Use this to fill empty spaces while maintaining proper volume fractions.
"""
from armi.materials import material
class Void(material.Fluid):
"""A Void material is a bookkeeping material with zero density."""
def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:
return 0.0
def density(self, Tk: float = None, Tc: float = None) -> float:
return 0.0
================================================
FILE: armi/materials/water.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic water material.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
import math
from armi.materials.material import Fluid
from armi.nucDirectory import elements
from armi.nucDirectory import thermalScattering as tsl
from armi.utils import units
from armi.utils.units import getTk
_REF_SR1_86 = "IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam"
class Water(Fluid):
"""
Water.
This is a good faith implementation of the Revised Supplementary Properties of Ordinary Water Substance (1992) by
IAPWS -- International Association for the Properties of Water and Steam .
This is an abstract class implemented on the Saturated Water Material and the Saturated Steam Material Class, which
should be good enough for
most uses.
http://www.iapws.org/relguide/supsat.pdf
IAPWS-IF97 is now the international standard for calculations in the steam power industry
"""
thermalScatteringLaws = (tsl.fromNameAndCompound("H", tsl.H2O),)
references = {
"vapor pressure": _REF_SR1_86,
"enthalpy (saturated water)": _REF_SR1_86,
"enthalpy (saturated steam)": _REF_SR1_86,
"entropy (saturated water)": _REF_SR1_86,
"entropy (saturated steam)": _REF_SR1_86,
"density (saturated water)": _REF_SR1_86,
"density (saturated steam)": _REF_SR1_86,
}
TEMPERATURE_CRITICAL_K = 647.096
DENSITY_CRITICAL_KGPERCUBICMETER = 322.0
DENSITY_CRITICAL_GPERCUBICCENTIMETER = DENSITY_CRITICAL_KGPERCUBICMETER * units.G_PER_KG / units.CM3_PER_M3
VAPOR_PRESSURE_CRITICAL_MPA = 22.064
VAPOR_PRESSURE_CRITICAL_PA = VAPOR_PRESSURE_CRITICAL_MPA * 1e6
ALPHA_0 = 1000
PHI_0 = ALPHA_0 / TEMPERATURE_CRITICAL_K
# coefficients for auxiliary quantity for enthalpy and entropy kept as d to match original source
d = {
1: -5.65134998e-08,
2: 2690.66631,
3: 127.287297,
4: -135.003439,
5: 0.981825814,
"alpha": -1135.905627715,
"phi": 2319.5246,
}
def setDefaultMassFracs(self) -> None:
nb = self.parent.nuclideBases if self.parent else None
if nb is None:
massHydrogen = 1.007976004510346
massOxygen = 15.999304715704756
else:
massHydrogen = elements.bySymbol["H"].standardWeight
massOxygen = elements.bySymbol["O"].standardWeight
totalMass = 2 * massHydrogen + massOxygen
massFrac = {"H": 2.0 * massHydrogen / totalMass, "O": massOxygen / totalMass}
for nucName, mfrac in massFrac.items():
self.setMassFrac(nucName, mfrac)
def theta(self, Tk: float = None, Tc: float = None) -> float:
"""Returns temperature normalized to the critical temperature."""
return getTk(Tc=Tc, Tk=Tk) / self.TEMPERATURE_CRITICAL_K
def tau(self, Tc: float = None, Tk: float = None) -> float:
"""
Returns 1 - temperature normalized to the critical temperature.
Notes
-----
thermophysical correlations are give in Tau rather than Tk or Tc
"""
return 1.0 - self.theta(Tc=Tc, Tk=Tk)
def vaporPressure(self, Tk: float = None, Tc: float = None) -> float:
"""
Returns vapor pressure in (Pa).
Parameters
----------
Tk: float
temperature in Kelvin
Tc: float
temperature in Celsius
Returns
-------
vaporPressure: float
vapor pressure in Pa
Notes
-----
IAPWS-IF97
http://www.iapws.org/relguide/supsat.pdf
IAPWS-IF97 is now the international standard for calculations in the
steam power industry
"""
tau = self.tau(Tc=Tc, Tk=Tk)
T_ratio = self.TEMPERATURE_CRITICAL_K / getTk(Tc=Tc, Tk=Tk)
a1 = -7.85951783
a2 = 1.84408259
a3 = -11.7866497
a4 = 22.6807411
a5 = -15.9618719
a6 = 1.80122502
sum_coefficients = a1 * tau + a2 * tau**1.5 + a3 * tau**3 + a4 * tau**3.5 + a5 * tau**4 + a6 * tau**7.5
log_vapor_pressure = T_ratio * sum_coefficients
vapor_pressure = self.VAPOR_PRESSURE_CRITICAL_PA * math.e ** (log_vapor_pressure)
# past the supercritical point tau's raised to .5 cause complex #'s
return vapor_pressure.real
def vaporPressurePrime(self, Tk: float = None, Tc: float = None, dT: float = 1e-6) -> float:
"""
Approximation of derivative of vapor pressure wrt temperature.
Parameters
----------
Tk: float
temperature in Kelvin
Tc: float
temperature in Celsius
Note
----
This uses a numerical approximation
"""
Tcold = getTk(Tc=Tc, Tk=Tk) - dT / 2.0
Thot = Tcold + dT
dp = self.vaporPressure(Tk=Thot) - self.vaporPressure(Tk=Tcold)
return dp / dT
def auxiliaryQuantitySpecificEnthalpy(self, Tk: float = None, Tc: float = None) -> float:
"""
Returns the auxiliary quantity for specific enthalpy.
Parameters
----------
Tk: float
temperature in Kelvin
Tc: float
temperature in Celsius
Returns
-------
alpha: float
specific quantity for enthalpy in J/kg
Notes
-----
IAPWS-IF97
http://www.iapws.org/relguide/supsat.pdf
IAPWS-IF97 is now the international standard for calculations in the
steam power industry
alpha is used in the relations for enthalpy
h = alpha + T/pressure*dp/dT
"""
theta = self.theta(Tc=Tc, Tk=Tk)
normalized_alpha = (
self.d["alpha"]
+ self.d[1] * theta**-19
+ self.d[2] * theta
+ self.d[3] * theta**4.5
+ self.d[4] * theta**5.0
+ self.d[5] * theta**54.5
)
# past the supercritical point tau's raised to .5 cause complex #'s
return normalized_alpha.real * self.ALPHA_0
def auxiliaryQuantitySpecificEntropy(self, Tk: float = None, Tc: float = None) -> float:
"""
Returns the auxiliary quantity for specific entropy.
Parameters
----------
Tk: float
temperature in Kelvin
Tc: float
temperature in Celsius
Returns
-------
phi: float
specific quantity for entropy in J/(kgK)
Notes
-----
IAPWS-IF97
http://www.iapws.org/relguide/supsat.pdf
IAPWS-IF97 is now the international standard for calculations in the
steam power industry
alpha is used in the relations for enthalpy
s = phi + 1/pressure*dp/dT
"""
theta = self.theta(Tc=Tc, Tk=Tk)
normalized_phi = (
self.d["phi"]
+ 19.0 / 20.0 * self.d[1] * theta**-20.0
+ self.d[2] * math.log(theta)
+ 9.0 / 7.0 * self.d[3] * theta**3.5
+ 5.0 / 4.0 * self.d[4] * theta**4.0
+ 109.0 / 107.0 * self.d[5] * theta**53.5
)
# past the supercritical point tau's raised to .5 cause complex #'s
return normalized_phi.real * self.PHI_0
def enthalpy(self, Tk: float = None, Tc: float = None) -> float:
"""
Returns enthalpy of saturated water.
Parameters
----------
Tk: float
temperature in Kelvin
Tc: float
temperature in Celsius
Returns
-------
enthalpy: float
vapor pressure in J/kg
Notes
-----
IAPWS-IF97
http://www.iapws.org/relguide/supsat.pdf
IAPWS-IF97 is now the international standard for calculations in the
steam power industry
"""
alpha = self.auxiliaryQuantitySpecificEnthalpy(Tc=Tc, Tk=Tk)
T = getTk(Tc=Tc, Tk=Tk)
rho = self.pseudoDensityKgM3(Tc=Tc, Tk=Tk)
dp_dT = self.vaporPressurePrime(Tc=Tc, Tk=Tk)
return alpha + T / rho * dp_dT
def entropy(self, Tk: float = None, Tc: float = None) -> float:
"""
Returns entropy of saturated water.
Parameters
----------
Tk: float
temperature in Kelvin
Tc: float
temperature in Celsius
Returns
-------
entropy: float
entropy in J/(kgK)
Notes
-----
IAPWS-IF97
http://www.iapws.org/relguide/supsat.pdf
IAPWS-IF97 is now the international standard for calculations in the
steam power industry
"""
phi = self.auxiliaryQuantitySpecificEntropy(Tc=Tc, Tk=Tk)
rho = self.pseudoDensityKgM3(Tc=Tc, Tk=Tk)
dp_dT = self.vaporPressurePrime(Tc=Tc, Tk=Tk)
return phi + 1.0 / rho * dp_dT
def pseudoDensity(self, Tk=None, Tc=None):
"""
Density for arbitrary forms of water.
Notes
-----
In ARMI, we define pseudoDensity() and density() as the same for Fluids.
"""
raise NotImplementedError("Please use a concrete instance: SaturatedWater or SaturatedSteam.")
class SaturatedWater(Water):
"""
Saturated Water.
This is a good faith implementation of the Revised Supplementary Properties
of Ordinary Water Substance (1992) by IAPWS -- International Association for
the Properties of Water and Steam .
This is the Saturated Liquid Water Material Class. For steam look to the
Saturated Steam Material Class.
"""
def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:
"""
Returns density in g/cc.
Parameters
----------
Tk: float
temperature in Kelvin
Tc: float
temperature in Celsius
Returns
-------
density: float
density in g/cc
Note
----
In ARMI, we define pseudoDensity() and density() as the same for Fluids.
IAPWS-IF97
http://www.iapws.org/relguide/supsat.pdf
IAPWS-IF97 is now the international standard for calculations in the steam power industry
"""
tau = self.tau(Tc=Tc, Tk=Tk)
b1 = 1.99274064
b2 = 1.09965342
b3 = -0.510839303
b4 = -1.75493479
b5 = -45.5170352
b6 = -6.74694450e5
normalized_rho = (
1
+ b1 * tau ** (1.0 / 3.0)
+ b2 * tau ** (2.0 / 3.0)
+ b3 * tau ** (5.0 / 3.0)
+ b4 * tau ** (16.0 / 3.0)
+ b5 * tau ** (43.0 / 3.0)
+ b6 * tau ** (111.0 / 3.0)
)
# past the supercritical point tau's raised to .5 cause complex #'s
return normalized_rho.real * self.DENSITY_CRITICAL_GPERCUBICCENTIMETER
class SaturatedSteam(Water):
"""
Saturated Steam.
This is a good faith implementation of the Revised Supplementary Properties
of Ordinary Water Substance (1992) by IAPWS -- International Association for
the Properties of Water and Steam .
This is the Saturated Liquid Water Material Class. For steam look to the
Saturated Steam Material Class.
"""
def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:
"""
Returns density in g/cc.
Parameters
----------
Tk: float
temperature in Kelvin
Tc: float
temperature in Celsius
Returns
-------
density: float
density in g/cc
Notes
-----
In ARMI, we define pseudoDensity() and density() as the same for Fluids.
IAPWS-IF97
http://www.iapws.org/relguide/supsat.pdf
IAPWS-IF97 is now the international standard for calculations in the steam power industry
"""
tau = self.tau(Tc=Tc, Tk=Tk)
c1 = -2.03150240
c2 = -2.68302940
c3 = -5.38626492
c4 = -17.2991605
c5 = -44.7586581
c6 = -63.9201063
log_normalized_rho = (
c1 * tau ** (2.0 / 6.0)
+ c2 * tau ** (4.0 / 6.0)
+ c3 * tau ** (8.0 / 6.0)
+ c4 * tau ** (18.0 / 6.0)
+ c5 * tau ** (37.0 / 6.0)
+ c6 * tau ** (71.0 / 6.0)
)
# past the supercritical point tau's raised to .5 cause complex #'s
return math.e**log_normalized_rho.real * self.DENSITY_CRITICAL_GPERCUBICCENTIMETER
================================================
FILE: armi/materials/yttriumOxide.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Yttrium Oxide.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTk
class Y2O3(Material):
propertyValidTemperature = {"linear expansion percent": ((273.15, 1573.15), "K")}
def __init__(self):
Material.__init__(self)
self.refDens = 5.03
def setDefaultMassFracs(self):
self.setMassFrac("Y89", 0.7875)
self.setMassFrac("O16", 0.2125)
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Return the linear expansion percent for Yttrium Oxide (Yttria).
Notes
-----
From Table 5 of "Thermal Expansion and Phase Inversion of Rare-Earth Oxides.
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tk)
return 1.4922e-07 * Tk**2 + 6.2448e-04 * Tk - 1.8414e-01
================================================
FILE: armi/materials/zincOxide.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zinc Oxide.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from armi.materials.material import Material
from armi.utils.units import getTk
class ZnO(Material):
propertyValidTemperature = {"linear expansion percent": ((10.12, 1491.28), "K")}
def setDefaultMassFracs(self):
self.setMassFrac("ZN", 0.8034)
self.setMassFrac("O16", 0.1966)
def density(self, Tk=None, Tc=None):
return 5.61
def linearExpansionPercent(self, Tk=None, Tc=None):
"""
Return the linear expansion percent for Polycrystalline ZnO.
Notes
-----
Digitized from Figure 1.24 from
Zinc Oxide: Fundamentals, Materials and Device Technology
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tk)
return -1.9183e-10 * Tk**3 + 6.5944e-07 * Tk**2 + 5.2992e-05 * Tk - 5.2631e-02
================================================
FILE: armi/materials/zr.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zirconium metal.
The data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to
this file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data
contained in this file should not be used in production simulations.
"""
from numpy import interp
from armi.materials.material import Material
from armi.utils.units import getTk
class Zr(Material):
"""Metallic zirconium."""
propertyValidTemperature = {
"density": ((293, 1800), "K"),
"linear expansion": ((293, 1800), "K"),
"linear expansion percent": ((293, 1800), "K"),
"thermal conductivity": ((298, 2000), "K"),
}
references = {
"density": "AAA Materials Handbook 45803",
"thermal conductivity": "AAA Fuels handbook. ANL",
"linear expansion": "Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion, "
+ "Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)",
"linear expansion percent": "Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion, "
+ "Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)",
}
linearExpansionTableK = [
293,
400,
500,
600,
700,
800,
900,
1000,
1100,
1136.99999,
1137,
1200,
1400,
1600,
1800,
]
linearExpansionTable = [
5.70e-6,
5.90e-6,
6.60e-6,
7.10e-6,
7.60e-6,
7.90e-6,
8.00e-6,
8.20e-6,
8.20e-6,
8.20e-6,
9.00e-6,
9.10e-6,
9.50e-6,
1.03e-5,
1.13e-5,
]
refTempK = 298.15
def __init__(self):
Material.__init__(self)
self.refDens = self._computeReferenceDensity(Tk=self.refTempK)
def setDefaultMassFracs(self):
self.setMassFrac("ZR", 1.0)
def _computeReferenceDensity(self, Tk=None, Tc=None):
r"""AAA Materials Handbook 45803."""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("density", Tk)
if Tk < 1135:
return -3.29256e-8 * Tk**2 - 9.67145e-5 * Tk + 6.60176
else:
return -2.61683e-8 * Tk**2 - 1.11331e-4 * Tk + 6.63616
def thermalConductivity(self, Tk=None, Tc=None):
"""
Thermal conductivity in W/mK.
Reference: AAA Fuels handbook. ANL.
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("thermal conductivity", Tk)
return 8.853 + (0.007082 * Tk) + (0.000002533 * Tk**2) + (2992.0 / Tk)
def linearExpansion(self, Tk=None, Tc=None):
r"""Linear expansion in m/mK.
Reference: Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion,
Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)
See page 400
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion", Tk)
return interp(Tk, self.linearExpansionTableK, self.linearExpansionTable)
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""Linear expansion in dL/L.
Reference: Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion,
Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)
See page 400
"""
Tk = getTk(Tc, Tk)
self.checkPropertyTempRange("linear expansion percent", Tk)
# NOTE: checkPropertyTempRange takes care of lower/upper limits
if Tk < 1137:
return -0.111 + (2.325e-4 * Tk) + (5.595e-7 * Tk**2) - (1.768e-10 * Tk**3)
else:
return -0.759 + (1.474e-3 * Tk) - (5.140e-7 * Tk**2) + (1.559e-10 * Tk**3)
================================================
FILE: armi/meta.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata describing an ARMI distribution."""
try:
# Python 3.x < 3.8
from importlib import metadata
except ImportError:
# Python >= 3.8
import importlib_metadata as metadata
__version__ = metadata.version("armi")
================================================
FILE: armi/migration/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Migrate input/output from one version of ARMI to another.
Users want to be able to upgrade to the latest version of the code without having to
invest a bunch of time in updating their previous input and output files. Users have up
to thousands of inputs that they want to keep working. Even more serious, follow-on
analysts who got an output database (including associated inputs) from an ARMI
power-user strongly prefer to be able to migrate old cases. Oftentimes, an output
database can be many GB large and be the result of many CPU-weeks, so there's monetary
and temporal value to be preserved.
Meanwhile, developers want to be able to make upgrades to the input and/or output to fix
bugs, ease the training and cognitive burden of new users, and so on.
Migrations are key to getting both of these big needs.
Migrations should generally happen in the background from the user's perspective, just
like happens in mainstream applications like word processors and spreadsheets.
"""
from armi.migration import (
m0_1_3,
m0_1_6,
)
ACTIVE_MIGRATIONS = [
m0_1_3.RemoveCentersFromBlueprints,
m0_1_3.UpdateElementalNuclides,
m0_1_6.ConvertAlphanumLocationSettingsToNum,
]
================================================
FILE: armi/migration/base.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base migration classes.
A classic migration takes a file name, read the files, migrates the
data, and re-writes the file. Some migrations need to happen live
on a stream. For example, if an old/invalid input file is being read
in from an old database. The migration class defined here
chooses this behavior based on whether the ``stream`` or ``path``
variables are given in the constructor.
"""
import os
import shutil
from armi import runLog
from armi.settings import caseSettings
class Migration:
"""Generic migration.
To implement a concrete Migration, one must often only
implement the ``_applyToStream`` method.
"""
fromVersion = "x.x.x"
toVersion = "x.x.x"
def __init__(self, stream=None, path=None):
if not (bool(stream) ^ bool(path)):
# XOR
raise RuntimeError("Stream and path inputs to migration aremutually exclusive. Choose one or the other.")
self.stream = stream
self.path = path
def __repr__(self):
return f"` together. For example:
.. list-table:: Sample MPI Action Workflow
:widths: 5 60 35
:header-rows: 1
* - Step
- Code
- Notes
* - 1
- **primary**: :py:class:`distributeState = DistributeStateAction() `
**worker**: :code:`action = context.MPI_COMM.bcast(None, root=0)`
- **primary**: Initializing a distribute state action.
**worker**: Waiting for something to do, as determined by the primary, this happens within the
worker's :py:meth:`~armi.operators.MpiOperator.workerOperate`.
* - 2
- **primary**: :code:`context.MPI_COMM.bcast(distributeState, root=0)`
**worker**: :code:`action = context.MPI_COMM.bcast(None, root=0)`
- **primary**: Broadcasts a distribute state action to all the worker nodes
**worker**: Receives the action from the primary, which is a
:py:class:`~armi.mpiActions.DistributeStateAction`.
* - 3
- **primary**: :code:`distributeState.invoke(self.o, self.r, self.cs)`
**worker**: :code:`action.invoke(self.o, self.r, self.cs)`
- Both invoke the action, and are in sync. Any broadcast or receive within the action should
also be synced up.
In order to create a new, custom MPI Action, inherit from :py:class:`~armi.mpiActions.MpiAction`,
and override the :py:meth:`~armi.mpiActions.MpiAction.invokeHook` method.
"""
import collections
import gc
import math
import pickle
import timeit
from armi import context, interfaces, runLog, settings, utils
from armi.reactor import reactors
from armi.reactor.parameters import parameterDefinitions
from armi.utils import iterables, tabulate
class MpiAction:
"""Base of all MPI actions.
MPI Actions are tasks that can be executed without needing lots of other
information. When a worker node sits in its main loop, and receives an MPI Action, it will
simply call :py:meth:`~armi.mpiActions.MpiAction.invoke`.
"""
def __init__(self):
self.o = None
self.r = None
self.cs = None
self.serial = False
# items can be set to exclusive if they will take considerably longer
# they will be queued first, and the CPUs for this action will not
# be used for any other purpose (except when number of exclusive actions > num CPU groups)
self.runActionExclusive = False
# lower number is higher; halfway between 1-10.. probably dont need more
# than 10 priorities but negative nums work too...
self.priority = 5
@property
def parallel(self):
return not self.serial
@classmethod
def invokeAsMaster(cls, o, r, cs):
"""Simplified method to call from the primary process.
This can be used in place of:
someInstance = MpiAction()
someInstance = COMM_WORLD.bcast(someInstance, root=0)
someInstance.invoke(o, r, cs)
Interestingly, the code above can be used in two ways:
1. Both the primary and worker can call the above code at the same time, or
2. the primary can run the above code, which will be handled by the worker's main loop.
Option number 2 is the most common usage.
.. warning:: This method will not work if the constructor (i.e. :code:`__init__`) requires
additional arguments. Since the method body is so simple, it is strong discouraged to
add a :code:`*args` or :code:`**kwargs` arguments to this method.
Parameters
----------
o : :py:class:`armi.operators.Operator`
If an operator is not necessary, supply :code:`None`.
r : :py:class:`armi.operators.Reactor`
If a reactor is not necessary, supply :code:`None`.
"""
instance = cls()
instance.broadcast()
return instance.invoke(o, r, cs)
def _mpiOperationHelper(self, obj, mpiFunction):
"""Strips off the operator, reactor, cs from the mpiAction before."""
if obj is None or obj is self:
# prevent sending o, r, and cs, they should be handled appropriately by the other nodes
# reattach with finally
obj = self
o, r, cs = self.o, self.r, self.cs
self.o = self.r = self.cs = None
try:
return mpiFunction(obj, root=0)
except pickle.PicklingError as error:
runLog.error("Failed to {} {}.".format(mpiFunction.__name__, obj))
runLog.error(error)
raise
finally:
if obj is self:
self.o, self.r, self.cs = o, r, cs
def broadcast(self, obj=None):
"""
A wrapper around ``bcast``, on the primary node can be run with an equals sign, so that it
can be consistent within both primary and worker nodes.
Parameters
----------
obj :
This is any object that can be broadcast, if it is None, then it will broadcast itself,
which triggers it to run on the workers (assuming the workers are in the worker main loop.
See Also
--------
armi.operators.operator.OperatorMPI.workerOperate : receives this on the workers and calls ``invoke``
Notes
-----
The standard ``bcast`` method creates a new instance even for the root process. Consequently,
when passing an object, references can be broken to the original object. Therefore, this
method, returns the original object when called by the primary node, or the broadcasted
object when called on the worker nodes.
"""
if self.serial:
return obj if obj is not None else self
if context.MPI_SIZE > 1:
result = self._mpiOperationHelper(obj, context.MPI_COMM.bcast)
# the following if-branch prevents the creation of duplicate objects on the primary node
# if the object is large with lots of links, it is prudent to call gc.collect()
if obj is None and context.MPI_RANK == 0:
return self
elif context.MPI_RANK == 0:
return obj
else:
return result
def gather(self, obj=None):
"""A wrapper around ``MPI_COMM.gather``.
Parameters
----------
obj :
This is any object that can be gathered, if it is None, then it will gather itself.
Notes
-----
The returned list will contain a reference to the original gathered object, without making a copy of it.
"""
if self.serial:
return [obj if obj is not None else self]
if context.MPI_SIZE > 1:
result = self._mpiOperationHelper(obj, context.MPI_COMM.gather)
if context.MPI_RANK == 0:
# this cannot be result[0] = obj or self, because 0.0, 0, [] all eval to False
if obj is None:
result[0] = self
else:
result[0] = obj
else:
result = []
else:
result = [obj if obj is not None else self]
return result
def invoke(self, o, r, cs):
"""
This method is called by worker nodes, and passed the worker node's operator, reactor and
settings file.
Parameters
----------
o : :py:class:`armi.operators.operator.Operator`
the operator for this process
r : :py:class:`armi.reactor.reactors.Reactor`
the reactor represented in this process
cs : :py:class:`armi.settings.caseSettings.Settings`
the case settings
Returns
-------
result : object
result from invokeHook
"""
self.o = o
self.r = r
self.cs = cs
return self.invokeHook()
@staticmethod
def mpiFlatten(allCPUResults):
"""
Flatten results to the same order they were in before making a list of mpiIter results.
See Also
--------
mpiIter : used for distributing objects/tasks
"""
return iterables.flatten(allCPUResults)
@staticmethod
def mpiIter(objectsForAllCoresToIter):
"""
Generate the subset of objects one node is responsible for in MPI.
Notes
-----
Each CPU will get similar number of objects. E.G. if there are 12 objects and 5
CPUs, the first 2 CPUs will get 3 objects and the last 3 CPUS will get 2.
Parameters
----------
objectsForAllCoresToIter: list
List of all objects that need to have an MPI calculation performed on.
Note, that since len() is needed this method cannot accept a generator.
See Also
--------
mpiFlatten : used for collecting results
"""
ntasks = len(objectsForAllCoresToIter)
numLocalObjects, deficit = divmod(ntasks, context.MPI_SIZE)
if deficit > context.MPI_RANK:
numLocalObjects += 1
first = context.MPI_RANK * numLocalObjects
else:
first = context.MPI_RANK * numLocalObjects + deficit
for objIndex in range(first, first + numLocalObjects):
yield objectsForAllCoresToIter[objIndex]
def invokeHook(self):
"""This method must be overridden in sub-clases.
This method is called by worker nodes, and has access to the worker node's operator, reactor, and settings
(through :code:`self.o`, :code:`self.r`, and :code:`self.cs`). It must return a boolean value of :code:`True` or
:code:`False`, otherwise the worker node will raise an exception and terminate execution.
Returns
-------
result : object
Dependent on implementation
"""
raise NotImplementedError()
def runActions(o, r, cs, actions, numPerNode=None, serial=False):
"""Run a series of MpiActions in parallel, or in series if :code:`serial=True`.
Notes
-----
The number of actions DOES NOT need to match :code:`context.MPI_SIZE`.
Calling this method may invoke MPI Split which will change the MPI_SIZE during the action. This allows someone to
call MPI operations without being blocked by tasks which are not doing the same thing.
"""
if not context.MPI_DISTRIBUTABLE or serial:
return runActionsInSerial(o, r, cs, actions)
useForComputation = [True] * context.MPI_SIZE
if numPerNode is not None:
if numPerNode < 1:
raise ValueError("numPerNode must be >= 1")
numThisNode = {nodeName: 0 for nodeName in context.MPI_NODENAMES}
for rank, nodeName in enumerate(context.MPI_NODENAMES):
# if we have more processors than tasks, disable the extra
useForComputation[rank] = numThisNode[nodeName] < numPerNode
numThisNode[nodeName] += 1
queue, numBatches = _makeQueue(actions, useForComputation)
runLog.extra(f"Running {len(actions)} MPI actions in parallel over {numBatches} batches")
results = []
batchNum = 0
while queue:
actionsThisRound = []
batchNum += 1
runLog.extra(f"MPI actions, batch {batchNum} of {numBatches}:\n")
for useRank in useForComputation:
actionsThisRound.append(queue.pop(0) if useRank and queue else None)
distrib = distributeActions(actionsThisRound, useForComputation)
distrib.broadcast()
results.append(distrib.invoke(o, r, cs))
return results
def runBatchedActions(o, r, cs, actionsByNode, serial=False):
"""Run a series of MpiActions in parallel, or in series if :code:`serial=True`.
Notes
-----
This method takes a set of actions that have been batched by the user beforehand.
This is useful for heterogeneous work packages where some tasks have significantly larger
or smaller memory requirements. The user can place an appropriate amount of work on each node.
"""
if not context.MPI_DISTRIBUTABLE or serial:
actions = []
for _node, nodeActions in actionsByNode.items():
actions.extend(nodeActions)
return runActionsInSerial(o, r, cs, actions)
# count how many actions will run on each node
nodes = set(context.MPI_NODENAMES)
numToRunOnThisNode = {nodeName: 0 for nodeName in context.MPI_NODENAMES}
for nodeName in nodes:
numToRunOnThisNode[nodeName] = len(actionsByNode.get(nodeName, []))
# determine which ranks will run the actions
numAssigned = {nodeName: 0 for nodeName in nodes}
useForComputation = [True] * len(context.MPI_NODENAMES)
for rank, nodeName in enumerate(context.MPI_NODENAMES):
# if we have more processors than tasks, disable the extra
useForComputation[rank] = numAssigned[nodeName] < numToRunOnThisNode[nodeName]
if useForComputation[rank]:
numAssigned[nodeName] += 1
# check that we do not request more tasks than processors on a node
for nodeName in nodes:
if numToRunOnThisNode[nodeName] > numAssigned[nodeName]:
msg = (
f"There are more actions ({numToRunOnThisNode[nodeName]}) than ranks available "
f"({numAssigned[nodeName]}) on {nodeName}!"
)
runLog.error(msg)
raise ValueError(msg)
totalActions = sum(len(actions) for node, actions in actionsByNode.items())
runLog.extra(f"Running {totalActions} MPI actions in parallel over {len(actionsByNode)} nodes.")
results = []
actionsThisRound = []
for rank, nodeName in enumerate(context.MPI_NODENAMES):
queue = actionsByNode.get(nodeName, [])
actionsThisRound.append(queue.pop(0) if useForComputation[rank] and queue else None)
distrib = distributeActions(actionsThisRound, useForComputation)
distrib.broadcast()
results.append(distrib.invoke(o, r, cs))
return results
def distributeActions(actionsThisRound, useForComputation):
useForComputation = _disableForExclusiveTasks(actionsThisRound, useForComputation)
realActions = [
(context.MPI_NODENAMES[rank], rank, act) for rank, act in enumerate(actionsThisRound) if act is not None
]
tableText = tabulate.tabulate(realActions, headers=["Nodename", "Rank", "Action"])
runLog.extra(f"Distributing {len(realActions)} MPI actions for parallel processing:\n{tableText}")
return DistributionAction(actionsThisRound)
def _disableForExclusiveTasks(actionsThisRound, useForComputation):
# disable processors that are exclusive for next
indicesToDisable = [
i for i, action in enumerate(actionsThisRound) if action is not None and action.runActionExclusive
]
for i in indicesToDisable:
useForComputation[i] = False
return useForComputation
def _makeQueue(actions, useForComputation):
"""
Sort actions by priority in a queue, if more exclusive than CPUs makes all non-exclusive.
Notes
-----
All exclusive actions will occur first regardless of the priority.
All non-exclusive actions will be after all exclusive actions regardless of the priority.
Within these 2 bins, priority matters.
In the event that more exclusive actions are requested than CPUs - 1, all actions will
be changed to non-exclusive but previously evaluated order will remain.
CPUs - 1 is to reserve at least 1 CPU for non-exclusive actions.
"""
def sortActionPriority(action):
# exclusive actions first and those groups of CPUs only get 1 action
exclusivePriority = 1 if action.runActionExclusive else 2
return (exclusivePriority, action.priority)
queue = list(sorted(actions, key=sortActionPriority))
minCPUsForRemainingTasks = 1
nExclusiveCPUs = len([action for action in queue if action.runActionExclusive])
nCPUsAvailable = len([rank for rank in useForComputation if rank])
if nExclusiveCPUs + minCPUsForRemainingTasks > nCPUsAvailable:
# there are more exclusive tasks than sets of CPUs, so just make them all
# non-exclusive and evenly balance them
for action in queue:
action.runActionExclusive = False
numBatches = int(math.ceil(len(actions) / float(nCPUsAvailable)))
else:
nLeftoverCPUs = nCPUsAvailable - nExclusiveCPUs
nLeftoverActions = len(actions) - nExclusiveCPUs
numBatches = int(math.ceil(nLeftoverActions / nLeftoverCPUs))
return queue, numBatches
def runActionsInSerial(o, r, cs, actions):
"""Run a series of MpiActions in serial.
Notes
-----
This will set the `MpiAction.serial` attribute to :code:`True`, and the `MpiAction.broadcast` and `MpiAction.gather`
methods will basically just return the value being supplied.
"""
results = []
runLog.extra("Running {} MPI actions in serial".format(len(actions)))
numActions = len(actions)
for aa, action in enumerate(actions):
canDistribute = context.MPI_DISTRIBUTABLE
action.serial = True
context.MPI_DISTRIBUTABLE = False
runLog.extra("Running action {} of {}: {}".format(aa + 1, numActions, action))
results.append(action.invoke(o, r, cs))
action.serial = False # return to original state
context.MPI_DISTRIBUTABLE = canDistribute
return results
class DistributionAction(MpiAction):
"""
This MpiAction scatters the workload of multiple actions to available resources.
Notes
-----
This currently only works from the root (of COMM_WORLD). Eventually, it would be nice to make
it possible for sub-tasks to manage their own communicators and spawn their own work within some
sub-communicator.
This performs an MPI Split operation and takes over the context.MPI_COMM and associated variables.
For this reason, it is possible that when someone thinks they have distributed information to all
nodes, it may only be a subset that was necessary to perform the number of actions needed by this
DsitributionAction.
"""
def __init__(self, actions):
MpiAction.__init__(self)
self._actions = actions
def __reduce__(self):
"""Reduce prevents from unnecessary actions to others, after all we only want to scatter.
Consequently, the worker nodes _actions will be None.
"""
return DistributionAction, (None,)
def invokeHook(self):
"""
Overrides invokeHook to distribute work amongst available resources as requested.
Notes
-----
Two things about this method make it non-recursive
"""
canDistribute = context.MPI_DISTRIBUTABLE
mpiComm = context.MPI_COMM
mpiRank = context.MPI_RANK
mpiSize = context.MPI_SIZE
mpiNodeNames = context.MPI_NODENAMES
if self.cs["verbosity"] == "debug" and mpiRank == 0:
runLog.debug("Printing diagnostics for MPI actions!")
objectCountDict = collections.defaultdict(int)
for debugAction in self._actions:
utils.classesInHierarchy(debugAction, objectCountDict)
for objekt, count in objectCountDict.items():
runLog.debug("There are {} {} in MPI action {}".format(count, objekt, debugAction))
actionResult = None
try:
action = mpiComm.scatter(self._actions, root=0)
# create a new communicator that only has these specific processes running
hasAction = action is not None
context.MPI_COMM = mpiComm.Split(int(hasAction))
context.MPI_RANK = context.MPI_COMM.Get_rank()
context.MPI_SIZE = context.MPI_COMM.Get_size()
context.MPI_DISTRIBUTABLE = context.MPI_SIZE > 1
context.MPI_NODENAMES = context.MPI_COMM.allgather(context.MPI_NODENAME)
if hasAction:
actionResult = action.invoke(self.o, self.r, self.cs)
finally:
# restore the global variables
context.MPI_DISTRIBUTABLE = canDistribute
context.MPI_COMM = mpiComm
context.MPI_RANK = mpiRank
context.MPI_SIZE = mpiSize
context.MPI_NODENAMES = mpiNodeNames
return actionResult
class MpiActionError(Exception):
"""Exception class raised when error conditions occur during an MpiAction."""
class DistributeStateAction(MpiAction):
def __init__(self, skipInterfaces=False):
MpiAction.__init__(self)
self._skipInterfaces = skipInterfaces
def invokeHook(self):
"""Sync up all nodes with the reactor, the cs, and the interfaces.
Notes
-----
This is run by all workers and the primary any time the code needs to sync all processors.
"""
if context.MPI_SIZE <= 1:
runLog.extra("Not distributing state because there is only one processor")
return
# Detach phase:
# The Reactor and the interfaces have links to the Operator, which contains Un-MPI-able objects
# like the MPI Comm and the SQL database connections.
runLog.info("Distributing State")
start = timeit.default_timer()
try:
cs = self._distributeSettings()
self._distributeReactor(cs)
DistributeStateAction._distributeParamAssignments()
if self._skipInterfaces:
self.o.reattach(self.r, cs)
else:
self._distributeInterfaces()
# Lastly, make sure the reactor knows it is up to date. The operator/interface
# attachment may invalidate some of the cache, but since all the underlying data is the
# same, ultimately all state should be (initially) the same.
self.r._markSynchronized()
except (pickle.PicklingError, TypeError) as error:
runLog.error("Failed to transmit on distribute state root MPI bcast")
runLog.error(error)
# workers are still waiting for a reactor object
if context.MPI_RANK == 0:
context.MPI_COMM.bcast("quit") # try to get the workers to quit
raise
if context.MPI_RANK != 0:
self.r.core.regenAssemblyLists()
# check to make sure that everything has been properly reattached
if self.r.core.getFirstBlock().core.r is not self.r:
raise RuntimeError("Block.core.r is not self.r. Reattach the blocks!")
beforeCollection = timeit.default_timer()
# force collection; we've just created a bunch of objects that don't need to be used again.
runLog.debug("Forcing garbage collection.")
gc.collect()
stop = timeit.default_timer()
runLog.extra(
"Distributed state in {}s, garbage collection took {}s".format(
beforeCollection - start, stop - beforeCollection
)
)
def _distributeSettings(self):
if context.MPI_RANK == 0:
runLog.debug("Sending the settings object")
self.cs = cs = self.broadcast(self.o.cs)
if isinstance(cs, settings.Settings):
runLog.setVerbosity(cs["verbosity"] if context.MPI_RANK == 0 else cs["branchVerbosity"])
runLog.debug("Received settings object")
else:
raise RuntimeError("Failed to transmit settings, received: {}".format(cs))
if context.MPI_RANK != 0:
self.o.cs = cs
return cs
def _distributeReactor(self, cs):
runLog.debug("Sending the Reactor object")
r = self.broadcast(self.r)
if isinstance(r, reactors.Reactor):
runLog.debug("Received reactor")
else:
raise RuntimeError("Failed to transmit reactor, received: {}".format(r))
if context.MPI_RANK == 0:
# on the primary node this unfortunately created a __deepcopy__ of the reactor, delete it
del r
else:
# maintain original reactor object on primary
self.r = r
self.o.r = r
self.r.o = self.o
runLog.debug(f"The reactor has {len(self.r.core)} assemblies")
# attach here so any interface actions use a properly-setup reactor.
self.o.reattach(self.r, cs) # sets r and cs
@staticmethod
def _distributeParamAssignments():
data = dict()
if context.MPI_RANK == 0:
data = {
(pName, pdType.__name__): pDef.assigned
for (
pName,
pdType,
), pDef in parameterDefinitions.ALL_DEFINITIONS.items()
}
data = context.MPI_COMM.bcast(data, root=0)
if context.MPI_RANK != 0:
for (pName, pdType), pDef in parameterDefinitions.ALL_DEFINITIONS.items():
pDef.assigned = data[pName, pdType.__name__]
def _distributeInterfaces(self):
"""
Distribute the interfaces to all MPI nodes.
Interface copy description
Since interfaces store information that can influence a calculation, it is important
in branch searches to make sure that no information is carried forward from these
runs on either the primary node or the workers. However, there are interfaces that
cannot be distributed, making this a challenge. To solve this problem, any interface
that cannot be distributed is simply re-initialized. If any information needs to be
given to the worker nodes on a non-distributable interface, additional function definitions
(and likely soul searching as to why needed distributable information is on a
non-distributable interface) are required to pass the information around.
See Also
--------
armi.interfaces.Interface.preDistributeState : runs on primary before DS
armi.interfaces.Interface.postDistributeState : runs on primary after DS
armi.interfaces.Interface.interactDistributeState : runs on workers after DS
"""
if context.MPI_RANK == 0:
# These run on the primary node. (Worker nodes run synchronized code below)
toRestore = {}
for i in self.o.getInterfaces():
if i.distributable() == interfaces.Interface.Distribute.DUPLICATE:
runLog.debug("detaching interface {0}".format(i.name))
i.detachReactor()
toRestore[i] = i.preDistributeState()
# Verify that the interface stacks are identical.
runLog.debug("Sending the interface names and flags")
_dumIList = self.broadcast([(i.name, i.distributable()) for i in self.o.getInterfaces()])
# transmit interfaces
for i in self.o.getInterfaces():
# avoid sending things that don't pickle, like the database.
if i.distributable() == interfaces.Interface.Distribute.DUPLICATE:
runLog.debug("Sending the interface {0}".format(i))
_idum = self.broadcast(i) # don't send the reactor or operator
i.postDistributeState(toRestore[i])
i.attachReactor(self.o, self.r)
else:
# These run on the worker nodes.
# verify identical interface stack
# This list is (interfaceName, distributable) tuples)
interfaceList = self.broadcast(None)
for iName, distributable in interfaceList:
iOld = self.o.getInterface(iName)
if distributable == interfaces.Interface.Distribute.DUPLICATE:
# expect a transmission of the interface as a whole.
runLog.debug("Receiving new {0}".format(iName))
iNew = self.broadcast(None)
runLog.debug("Received {0}".format(iNew))
if iNew == "quit":
return
self.o.removeInterface(iOld)
self.o.addInterface(iNew)
iNew.interactDistributeState()
elif distributable == interfaces.Interface.Distribute.NEW:
runLog.debug("Initializing new interface {0}".format(iName))
# make a fresh instance of the non-transmittable interface.
self.o.removeInterface(iOld)
iNew = iOld.__class__(self.r, self.cs)
if not iNew:
for i in self.o.getInterfaces():
runLog.warning(i)
raise RuntimeError(
"Non-distributable interface {0} exists on the primary MPI process "
"but not on the workers. "
"Cannot distribute state.".format(iName)
)
self.o.addInterface(iNew)
iNew.interactInit()
iNew.interactBOL()
else:
runLog.debug("Skipping broadcast of interface {0}".format(iName))
if iOld:
iOld.interactDistributeState()
================================================
FILE: armi/nucDirectory/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
The nucDirectory module contains tools to access nuclide information in the :py:mod:`~armi.nucDirectory.nuclideBases`
module, and information for :py:mod:`~armi.nucDirectory.nuclide` module.
#. :ref:`Element data ` - name, symbol, atomic number (Z).
#. :ref:`Generic nuclide data ` - this includes mass, atomic number, natural abundance and various
names and labels that are used in ARMI for the nuclide. It also includes decay and transmutation modes.
.. _doc-elements:
Elements
========
:py:class:`Elements ` are simple objects containing minimal information about
atomic elements. This information is loaded from a data file within ARMI; elements.dat.
:py:class:`Elements ` are mainly used as a building block of the nuclide objects
, as discussed below. If you need to grab an element there are three available dictionaries provided for rapid access.::
>>> r = Reactor("ExampleReactor", bp)
>>> elements = r.nuclideBases.elements
>>> uranium = elements.byZ[92]
>>> uranium.name
'uranium'
>>> uranium.z
92
Likewise, elements can be retrieved by their name or symbol.::
>>> ironFromZ = elements.byZ[26]
>>> ironFromName = elements.byName['iron']
>>> ironFromSymbol = elements.bySymbol['FE']
>>> ironFromZ == ironFromName == ironFromSymbol
True
.. note::
The :py:attr:`~armi.nucDirectory.elements.Elements.byName` and
:py:attr:`~armi.nucDirectory.elements.Elements.bySymbol` are case specific; names are *lower case* and symbols are
*UPPER CASE*.
The elements are truly the *same* :py:class:`~armi.nucDirectory.elements.Element` object. The
:py:mod:`~armi.nucDirectory` makes efficient use of the memory being used by elements and will only ever contain ~118
:py:class:`Elements `.::
>>> id(ironFromZ) == id(ironFromName) == id(ironFromSymbol)
True
.. _doc-nuclide-bases:
Nuclide Bases
=============
The :py:mod:`~armi.nucDirectory` allows ARMI to get information about various nuclides, like U235 or FE56. Often times
you need to look up cross section or densities for nuclides, or you might need the atomic weight or the natural isotopic
distribution. The :py:mod:`~armi.nucDirectory` is here to help.
The fundamental object of nuclide management in ARMI is the :py:class:`~armi.nucDirectory.nuclideBases.INuclide` object.
After construction, they contain basic information, such as Z, A, and atomic weight (if known). Similar to
:py:class:`Elements `, the information is loaded from a series of data files within
ARMI. The data is originally from [NIST]_::
>>> r = Reactor("ExampleReactor", bp)
>>> u235= r.nuclideBases.byName['U235']
>>> u235.z
92
>>> u235.weight
235.0439299
>>> u235.a
235
.. [NIST] http://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl
Upon creating a Reactor, a fully fledged ``NuclideBases`` object will be created. In that object there will be a full
Upon loading the :py:mod:`armi.nucDirectory` package, inside that will be a fully instantiated ``Elements`` object and a
list called.:py:data:`nuclideBases.instances `. The ``instances`` will
be filled with nuclide base objects. Nuclide bases contain a lot of basic information about a nuclide, such as the
atomic mass, atomic number (Z), the mass number (A), and the natural abundance.
Nuclide names, labels, and IDs
------------------------------
Nuclides have names, labels and IDs.
:py:attr:`INuclide.name `
The nuclide name is what *should* be used within ARMI and ARMI-based appliations. This is a human readable name such
as, ``U235`` or ``FE``. The names contain **only** capital letters and numbers, made up from the corresponding
element symbol and mass number (A).
:py:attr:`INuclide.label `
The nuclide label is a unique 4 character name which identifies the nuclide from all others. The label is fixed to
4 characters to conform with the CCCC standard files, which traditionally only allow for a maximum of 6 character
labels in legacy nuclear codes. Of the 6 allowable characters, 4 are reserved for the unique identifier of the
nuclide and 2 characters are reserved for cross section labels (i.e., AA, AB, ZA, etc.). The cross section labels
are based on the cross section group manager implementation within the framework. These labels are not necessarily
human readable/interpretable, but are generally the nuclide symbol followed by the last two digits of the mass
number (A), so the nuclide for U235 has the label ``U235``, but PU239 has the label ``PU39``.
For reference, the data used to build the nuclide bases in ARMI comes from a file called ``nuclides.dat``.
Indices - rapid access
----------------------
There are three main ways to retrieve a nuclide, which are provided depending on what information you have about a
nuclide. For example, if you know a nuclide name, use ``NuclideBases.byName`` dictionary. There are also dictionaries
available for retrieving by the label, ``NuclideBases.byLabel``, and by other software-specific IDs (i.e., MCNP,
MC2-2, and MC2-3). The software-specific labels are incorporated into the framework to support plugin developments and
may be extended as needed by end-users as needs arise.
>>> r = Reactor("testReactor", bp)
>>> pu239 = r.nuclideBases.byName["PU239"]
>>> pu239.z
94
Just like with elements, the item retrieved from the various dictionaries are the same object.
>>> tinFromName = r.nuclideBases.byName["SN112"]
>>> tinFromLabel = r.nuclideBases.byLabel["SN112"]
>>> tinFromMcc2Id = r.nuclideBases.byName["SN1125"]
>>> tinFromMcc3Id = r.nuclideBases.byLabel["SN1127"]
>>> tinFromName == tinFromLabel == tinFromMcc2Id == tinFromMcc3Id
True
>>> id(tinFromName) == id(tinFromLabel) == id(tinFromMcc2Id) == id(tinFromMcc3Id)
True
"""
================================================
FILE: armi/nucDirectory/elements.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides fundamental element information to be used throughout the framework and applications.
.. impl:: A tool for querying basic data for elements of the periodic table.
:id: I_ARMI_ND_ELEMENTS0
:implements: R_ARMI_ND_ELEMENTS
The :py:mod:`elements ` module defines the
:py:class:`Element ` class which acts as a data structure for organizing
information about an individual element, including number of protons, name, chemical symbol, phase (at STP),
periodic table group, standard weight, and a list of isotope
:py:class:`nuclideBase ` instances. The module includes a factory that
generates the :py:class:`Element ` instances by reading from the
``elements.dat`` file stored in the ARMI resources folder. When an
:py:class:`Element ` instance is initialized, it is added to a set of global
dictionaries that are keyed by number of protons, element name, and element symbol. The module includes several
helper functions for querying these global dictionaries.
The element class structure is outlined :ref:`here `.
.. _elements-class-diagram:
.. pyreverse:: armi.nucDirectory.elements
:align: center
:width: 75%
Examples
--------
>>> elements.byZ[92]
>>> elements.bySymbol["U"]
>>> elements.byName["Uranium"]
Retrieve gaseous elements at Standard Temperature and Pressure (STP):
>>> elements.getElementsByChemicalPhase(elements.ChemicalPhase.GAS)
[,
,
,
,
,
,
,
,
,
,
,
]
Retrieve elements that are classified as actinides:
>>> elements.getElementsByChemicalGroup(elements.ChemicalGroup.ACTINIDE)
[,
,
,
,
,
,
,
,
,
,
,
,
,
,
]
.. only:: html
For specific data on nuclides within each element, refer to the
:ref:`nuclide bases summary table `.
.. exec::
from armi.nucDirectory.elements import Elements
from armi.utils.tabulate import tabulate
from dochelpers import createTable
attributes = ['z',
'name',
'symbol',
'phase',
'group',
'is naturally occurring?',
'is heavy metal?',
'num. nuclides',]
def getAttributes(element):
return [
f'``{element.z}``',
f'``{element.name}``',
f'``{element.symbol}``',
f'``{element.phase}``',
f'``{element.group}``',
f'``{element.isNaturallyOccurring()}``',
f'``{element.isHeavyMetal()}``',
f'``{len(element.nuclides)}``',
]
elements = Elements()
elements.factory()
sortedElements = sorted(elements.byZ.values())
return createTable(tabulate(data=[getAttributes(elem) for elem in sortedElements],
headers=attributes,
tableFmt='rst'),
caption='List of elements',
label='nuclide-bases-table')
Notes
-----
Currently, this module contains a lot of data in the global scope. But ARMI is in the process of encapsulating this
data, moving it out of the global scope, making it part of the reactor data model, and making it configurable via
Settings. Pardon the mess during this transition.
"""
import os
from enum import Enum
from typing import List
from armi import context
from armi.utils.units import HEAVY_METAL_CUTOFF_Z
elements = None
byZ = None
byName = None
bySymbol = None
class ChemicalPhase(Enum):
GAS = 1
LIQUID = 2
SOLID = 3
UNKNOWN = 4
class ChemicalGroup(Enum):
ALKALI_METAL = 1
ALKALINE_EARTH_METAL = 2
NONMETAL = 3
TRANSITION_METAL = 4
POST_TRANSITION_METAL = 5
METALLOID = 6
HALOGEN = 7
NOBLE_GAS = 8
LANTHANIDE = 9
ACTINIDE = 10
UNKNOWN = 11
class Element:
"""Represents an element defined on the Periodic Table."""
def __init__(self, z, symbol, name, phase="UNKNOWN", group="UNKNOWN"):
"""
Creates an instance of an Element.
.. impl:: An element of the periodic table.
:id: I_ARMI_ND_ELEMENTS1
:implements: R_ARMI_ND_ELEMENTS
The :py:class:`Element ` class acts as a data structure for organizing
information about an individual element, including number of protons, name, chemical symbol, phase (at STP),
periodic table group, standard weight, and a list of isotope
:py:class:`nuclideBase ` instances.
The :py:class:`Element ` class has a few methods for appending
additional isotopes, checking whether an isotope is naturally occurring, retrieving the natural isotopic
abundance, or whether the element is a heavy metal.
Parameters
----------
z : int
atomic number, number of protons
symbol : str
element symbol
name: str
element name
phase : str
Chemical phase of the element at standard temperature and pressure (e.g., gas, liquid, solid).
group : str
Chemical group of the element.
"""
self.z = z
self.symbol = symbol
self.name = name
self.phase = ChemicalPhase[phase]
self.group = ChemicalGroup[group]
self.standardWeight = None
self.nuclides = []
def __repr__(self):
return f"3s} (Z={self.z}), {self.name}, {self.group}, {self.phase}>"
def __hash__(self):
return hash((self.name, self.z, self.symbol, self.phase, self.group, len(self.nuclides)))
def __lt__(self, other):
return self.z < other.z
def __eq__(self, other):
return hash(self) == hash(other)
def __iter__(self):
for nuc in sorted(self.nuclides):
yield nuc
def append(self, nuclide):
"""Assigns and sorts the nuclide to the element and ensures no duplicates."""
if nuclide in self.nuclides:
return
self.nuclides.append(nuclide)
self.nuclides = sorted(self.nuclides)
def isNaturallyOccurring(self):
"""Return True if the element is occurs in nature."""
return any([nuc.abundance > 0.0 for nuc in self.nuclides])
def getNaturalIsotopics(self):
"""
Return a list of nuclides that are naturally occurring for this element.
Notes
-----
This method will filter out any NaturalNuclideBases from the `nuclides` attribute.
"""
return [nuc for nuc in self.nuclides if nuc.abundance > 0.0 and nuc.a > 0]
def isHeavyMetal(self):
"""
Return True if all nuclides belonging to the element are heavy metals.
Notes
-----
Heavy metal in this instance is not related to an exact weight or density cut-off, but rather is designated for
nuclear fuel burn-up evaluations, where the initial heavy metal mass within a component should be tracked. It is
typical to include any element/nuclide above Actinium.
"""
return self.z > HEAVY_METAL_CUTOFF_Z
def getElementsByChemicalPhase(phase: ChemicalPhase) -> List[Element]:
"""Pass through to Elements.getElementsByChemicalPhase() for the global Elements object."""
global elements
return elements.getElementsByChemicalPhase(phase)
def getElementsByChemicalGroup(group: ChemicalGroup) -> List[Element]:
"""Pass through to Elements.getElementsByChemicalGroup() for the global Elements object."""
global elements
return elements.getElementsByChemicalGroup(group)
def getName(z: int = None, symbol: str = None) -> str:
"""Pass through to Elements.getName() for the global Elements object."""
global elements
return elements.getName(z, symbol)
def getSymbol(z: int = None, name: str = None) -> str:
"""Pass through to Elements.getSymbol() for the global Elements object."""
global elements
return elements.getSymbol(z, name)
def getElementZ(symbol: str = None, name: str = None) -> int:
"""Pass through to Elements.getElementZ() for the global Elements object."""
global elements
return elements.getElementZ(symbol, name)
def factory(elementsFile: str = None):
"""Pass through to Elements.factory() for the global Elements object."""
global elements
global byZ
global byName
global bySymbol
elements = Elements()
elements.factory(elementsFile)
byZ = elements.byZ
byName = elements.byName
bySymbol = elements.bySymbol
def addGlobalElement(element: Element):
"""Pass through to Elements.addElement() for the global Elements object."""
global elements
elements.addElement(element)
def destroyGlobalElements():
"""Pass through to Elements.clear() for the global Elements object."""
global elements
elements.clear()
class Elements:
"""
A container for all the atomics elements information in the simulation.
By design, you would only expect to have one instance of this object in memory during a simulation.
Attributes
----------
byZ: dict[int, Element]
A dictionary to find Element objects by atomic number (integer Z).
byName: dict[str, Element]
A dictionary to find Element objects by unique string identifier ("C", "PU239", "U235", etc).
bySymbol: dict[str, Element]
A dictionary to find Element objects by atomic symbol ("C", "N", "PU", etc).
elementsFile: str
File path to the custom ARMI "elements.dat" file.
"""
DEFAULT_ELEMENTS_FILE = os.path.join(context.RES, "elements.dat")
def __init__(self, elementsFile: str = None):
self.byZ: dict[int, Element] = {}
self.byName: dict[str, Element] = {}
self.bySymbol: dict[str, Element] = {}
self.elementsFile: str = elementsFile if elementsFile else self.DEFAULT_ELEMENTS_FILE
def clear(self):
"""Empty all the data in this collection."""
self.byZ.clear()
self.byName.clear()
self.bySymbol.clear()
def addElement(self, element: Element):
"""Add an element to this collection.
Raises
------
ValueError
If the element already exists in the collection.
"""
if element.z in self.byZ or element.name in self.byName or element.symbol in self.bySymbol:
raise ValueError(f"{element} has already been added and cannot be duplicated.")
self.byZ[element.z] = element
self.byName[element.name] = element
self.bySymbol[element.symbol] = element
def factory(self, elementsFile: str = None):
"""Generate the :class:`Elements ` instances."""
self.clear()
# If an input file is provided, use it, otherwise there is a class default.
if elementsFile:
self.elementsFile = elementsFile
with open(self.elementsFile, "r") as f:
for line in f:
# Skip header lines
if line.startswith("#") or line.startswith("Z"):
continue
# read z, symbol, name, phase, and chemical group
lineData = line.split()
z = int(lineData[0])
sym = lineData[1].upper()
name = lineData[2]
phase = lineData[3]
group = lineData[4]
standardWeight = lineData[5]
e = Element(z, sym, name, phase, group)
if standardWeight != "Derived":
e.standardWeight = float(standardWeight)
self.addElement(e)
def getElementsByChemicalPhase(self, phase: ChemicalPhase) -> List[Element]:
"""
Returns all elements that are of the given chemical phase.
Parameters
----------
phase: ChemicalPhase
This should be one of the valid options from the `ChemicalPhase` class.
Returns
-------
elems : List[Element]
A list of elements that are associated with the given chemical phase.
"""
elems = []
if not isinstance(phase, ChemicalPhase):
raise TypeError(f"{phase} is not an instance of {ChemicalPhase}")
for element in self.byName.values():
if element.phase == phase:
elems.append(element)
return elems
def getElementsByChemicalGroup(self, group: ChemicalGroup) -> List[Element]:
"""
Returns all elements that are of the given chemical group.
Parameters
----------
group: ChemicalGroup
This should be one of the valid options from the `ChemicalGroup` class.
Returns
-------
elems : List[Element]
A list of elements that are associated with the given chemical group.
"""
elems = []
if not isinstance(group, ChemicalGroup):
raise ValueError(f"{group} is not an instance of {ChemicalGroup}")
for element in self.byName.values():
if element.group == group:
elems.append(element)
return elems
def getName(self, z: int = None, symbol: str = None) -> str:
r"""
Returns element name.
Parameters
----------
z : int
Atomic number
symbol : str
Element abbreviation e.g. 'Zr'
Examples
--------
>>> elements.getName(10)
'Neon'
>>> elements.getName(symbol="Ne")
'Neon'
"""
element = None
if z:
element = self.byZ[z]
else:
element = self.byName[symbol.upper()]
return element.name
def getSymbol(self, z: int = None, name: str = None) -> str:
r"""
Returns element abbreviation given atomic number Z.
Parameters
----------
z : int
Atomic number
name : str
Element name E.g. Zirconium
Examples
--------
>>> elements.getSymbol(10)
'Ne'
>>> elements.getSymbol(name="Neon")
'Ne'
"""
element = None
if z:
element = self.byZ[z]
else:
element = self.byName[name.lower()]
return element.symbol
def getElementZ(self, symbol: str = None, name: str = None) -> int:
"""
Get element atomic number given a symbol or name.
Parameters
----------
symbol : str
Element symbol e.g. 'Zr'
name : str
Element name e.g. 'Zirconium'
Examples
--------
>>> elements.getZ("Zr")
40
>>> elements.getZ(name="Zirconium")
40
Notes
-----
Element Z is stored in elementZBySymbol, indexed by upper-case element symbol.
"""
if not symbol and not name:
return None
element = None
if symbol:
element = self.bySymbol[symbol.upper()]
else:
element = self.byName[name.lower()]
return element.z
factory()
================================================
FILE: armi/nucDirectory/nucDir.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some original nuclide directory code.
Notes
-----
This may be deprecated. Consider using the appropriate instance methods available through the
:py:class:`armi.nucDirectory.nuclideBases.INuclide` objects and/or the
:py:mod:`armi.nucDirectory.nuclideBases` module.
"""
import re
from armi.nucDirectory import elements, nuclideBases
nuclidePattern = re.compile(r"([A-Za-z]+)-?(\d{0,3})(\d*)(\S*)")
zaPat = re.compile(r"([A-Za-z]+)-?([0-9]+)")
# Partially from table 2.2 in Was
# See also: Table 2.4 in Primary Radiation Damage in Materials
# https://www.oecd-nea.org/science/docs/2015/nsc-doc2015-9.pdf
eDisplacement = {
"H": 10.0,
"C": 31.0,
"N": 30.0,
"NA": 25.0,
"SI": 25.0,
"V": 40.0,
"CR": 40.0,
"MN": 40.0,
"NI": 40.0,
"MO": 60.0,
"FE": 40.0,
"W": 90.0,
"TI": 30.0,
"NB": 60.0,
"ZR": 40.0,
"CU": 30.0,
"CO": 40.0,
"AL": 25.0,
"PB": 25.0,
"TA": 90.0,
}
def getNuclideFromName(name):
actualName = name
if "-" in name:
actualName = name.replace("-", "")
if "_" in name:
actualName = name.replace("_", "")
return nuclideBases.byName[actualName]
def getNaturalIsotopics(elementSymbol=None, z=None):
"""
Determines the atom fractions of all natural isotopes.
Parameters
----------
elementSymbol : str, optional
The element symbol, e.g. Zr, U
z : int, optional
The atomic number of the element
Returns
-------
abundances : list
A list of (A,fraction) tuples where A is the mass number of the isotopes
"""
element = None
if z:
element = elements.byZ[z]
else:
element = elements.bySymbol[elementSymbol]
return [(nn.a, nn.abundance) for nn in element.getNaturalIsotopics()]
def getNaturalMassIsotopics(elementSymbol=None, z=None):
"""Return mass fractions of all natural isotopes.
To convert number fractions to mass fractions, we multiply by A.
"""
numIso = getNaturalIsotopics(elementSymbol, z)
terms = []
for a, frac in numIso:
terms.append(a * frac)
s = sum(terms)
massIso = []
for i, (a, frac) in enumerate(numIso):
massIso.append((a, terms[i] / s))
return massIso
def getMc2Label(name):
"""
Return a MC2 prefix label without a xstype suffix.
MC**2 has labels and library names. The labels are like
U235IA, ZIRCFB, etc. and the library names are references
to specific data sets on the MC**2 libraries (e.g. U-2355, etc.)
This method returns the labels without the xstype suffixes (IA, FB).
Rather than maintaining a lookup table, this simply converts
the ARMI nuclide names to MC**2 names.
Parameters
----------
name : str
ARMI nuclide name of the nuclide
Returns
-------
mc2LibLabel : str
The MC**2 prefix for this nuclide.
Examples
--------
>>> nucDir.getMc2Label("U235")
'U235'
>> nucDir.getMc2Label('FE')
'FE'
>>> nucDir.getMc2Label("IRON")
'FE'
>>> nucDir.getMc2Label("AM242")
A242
"""
# First translate to the proper nuclide. CARB->C
nuc = getNuclide(name)
return nuc.label
def getElementName(z=None, symbol=None):
"""
Returns element name.
Parameters
----------
z : int
Atomic number
symbol : str
Element abbreviation e.g. 'Zr'
Examples
--------
>>> nucDir.getElementName(10)
'Neon'
>>> nucDir.getElementName(symbol="Zr")
'Neon'
"""
element = None
if z:
element = elements.byZ[z]
else:
element = elements.byName[symbol.upper()]
return element.name
def getElementSymbol(z=None, name=None):
"""
Returns element abbreviation given atomic number Z.
Parameters
----------
z : int
Atomic number
name : str
Element name E.g. Zirconium
Examples
--------
>>> nucDir.getElementSymbol(10)
'Ne'
>>> nucDir.getElementSymbol(name="Neon")
'Ne'
"""
element = None
if z:
element = elements.byZ[z]
else:
element = elements.byName[name.lower()]
return element.symbol
def getNuclide(nucName):
"""
Looks up the ARMI nuclide object that has this name.
Parameters
----------
nucName : str
A nuclide name like U-235 or AM241, AM242M, AM242M
Returns
-------
nuc : Nuclide
An armi nuclide object.
"""
nuc = nuclideBases.byName.get(nucName, None)
if nucName and not nuc:
nuc = getNuclideFromName(nucName)
if not nuc:
raise KeyError(f"Nuclide name {nucName} is invalid.")
return nuc
def getNuclides(nucName=None, elementSymbol=None):
"""
Returns a list of nuclide names in a particular nuclide or element.
If no arguments, returns all nuclideBases in the directory
Used to convert things to DB name, to adjustNuclides, etc.
Parameters
----------
nucName : str
ARMI nuclide label
elementSymbol : str
Element symbol e.g. 'Zr'
"""
if nucName:
# just spit back the nuclide if it's in here. Useful when iterating over the result.
nucList = [getNuclide(nucName)]
elif elementSymbol:
nucList = elements.bySymbol[elementSymbol].nuclides
else:
# all nuclideBases, including shortcut nuclideBases ('CARB')
nucList = [nuc for nuc in nuclideBases.instances if nuc.getMcc2Id() is not None]
return nucList
def getNuclideNames(nucName=None, elementSymbol=None):
"""
Returns a list of nuclide names in a particular nuclide or element.
If no arguments, returns all nuclideBases in the directory.
.. warning:: You will get both isotopes and NaturalNuclideBases for each element.
Parameters
----------
nucName : str
ARMI nuclide label
elementSymbol : str
Element symbol e.g. 'Zr'
"""
nucList = getNuclides(nucName, elementSymbol)
return [nn.name for nn in nucList]
def getAtomicWeight(lab=None, z=None, a=None):
"""
Returns atomic weight in g/mole.
Parameters
----------
lab : str, optional
nuclide label, like U235
z : int, optional
atomic number
a : int, optional
mass number
Returns
-------
aMass : float
Atomic weight in grams /mole from NIST, or just mass number if not in library (U239 gives 239)
Examples
--------
>>> from armi.nucDirectory import nucDir
>>> nucDir.getAtomicWeight("U235")
235.0439299
>>> nucDir.getAtomicWeight("U239")
239
>>> nucDir.getAtomicWeight("U238")
238.0507882
>>> nucDir.getAtomicWeight(z=94, a=239)
239.0521634
"""
if lab:
nuclide = None
if lab in nuclideBases.byLabel:
nuclide = nuclideBases.byLabel[lab]
elif lab in nuclideBases.byMcc3Id:
nuclide = nuclideBases.byMcc3Id[lab]
else:
nuclide = getNuclideFromName(lab)
return nuclide.weight
elif z == 0 and a == 0:
return 0.0
if a == 0 and z:
element = elements.byZ[z]
return element.standardWeight
else:
nuclide = nuclideBases.single(lambda nn: nn.a == a and nn.z == z)
return nuclide.weight
def isHeavyMetal(name):
try:
return getNuclide(name).isHeavyMetal()
except AttributeError:
raise AttributeError("The nuclide {0} is not found in the nuclide directory".format(name))
def isFissile(name):
try:
return getNuclide(name).isFissile()
except AttributeError:
raise AttributeError("The nuclide {0} is not found in the nuclide directory".format(name))
def getThresholdDisplacementEnergy(nuc):
"""
Return the Lindhard cutoff; the energy required to displace an atom.
From SPECTER.pdf Table II
Greenwood, "SPECTER: Neutron Damage Calculations for Materials Irradiations",
ANL.FPP/TM-197, Argonne National Lab., (1985).
Parameters
----------
nuc : str
nuclide name
Returns
-------
Ed : float
The cutoff energy in eV
"""
nuc = getNuclide(nuc)
el = elements.byZ[nuc.z]
try:
ed = eDisplacement[el.symbol]
except KeyError:
print(
"The element {0} of nuclide {1} does not have a displacement energy in the library. Please add one.".format(
el, nuc
)
)
raise
return ed
================================================
FILE: armi/nucDirectory/nuclideBases.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module provides access to fundamental nuclide information to be used throughout the framework and applications.
.. impl:: Isotopes and isomers can be queried by name, label, MC2-3 ID, MCNP ID, and AAAZZZS ID.
:id: I_ARMI_ND_ISOTOPES0
:implements: R_ARMI_ND_ISOTOPES
The :py:mod:`nuclideBases ` module defines the
:py:class:`NuclideBase ` class which is used to organize and store
metadata about each nuclide. The metadata is read from a provided ``nuclides.dat`` file, which contains metadata for
thousands of isotopes. The module also contains classes for special types of nuclides, including
:py:class:`DummyNuclideBase ` for dummy nuclides,
:py:class:`LumpNuclideBase `, for lumped fission product nuclides,
and :py:class:`NaturalNuclideBase ` for when data is given
collectively for an element at natural abundance rather than for individual isotopes.
The :py:class:`NuclideBase ` provides a data structure for information
about a single nuclide, including the atom number, atomic weight, element, isomeric state, half-life, and name.
The :py:mod:`nuclideBases ` module provides a factory and associated functions for
instantiating the :py:class:`NuclideBase ` objects. It is expected that
during a simulation, the ``Reactor`` will contain an instance of ``NuclideBases`` to handle building the nuclide
data dictionaries, including:
* ``elements`` (collection of Element objects)
* ``instances`` (list of INuclide objects)
* ``byName`` (keyed by name, e.g., ``U235``)
* ``byDBName`` (keyed by database name, e.g., ``nU235``)
* ``byLabel`` (keyed by label, e.g., ``U235``)
* ``byMcc2Id`` (keyed by MC\ :sup:`2`-2 ID, e.g., ``U-2355``)
* ``byMcc3Id`` (keyed by MC\ :sup:`2`-3 ID, e.g., ``U235_7``)
* ``byMcc3IdEndfbVII0`` (keyed by MC\ :sup:`2`-3 ID, e.g., ``U235_7``)
* ``byMcc3IdEndfbVII1`` (keyed by MC\ :sup:`2`-3 ID, e.g., ``U235_7``)
* ``byMcnpId`` (keyed by MCNP ID, e.g., ``92235``)
* ``byAAAZZZSId`` (keyed by AAAZZZS, e.g., ``2350920``)
The nuclide class structure is outlined :ref:`here `.
.. _nuclide-bases-class-diagram:
.. pyreverse:: armi.nucDirectory.nuclideBases
:align: center
:width: 75%
Class inheritance diagram for :py:class:`INuclide`.
Examples
--------
>>> r = Reactor("ExampleReactor", bp)
>>> r.nuclideBases.byName["U235"]
, HL:2.22160758861e+16, Abund:7.204000e-03>
>>> r.nuclideBases.byLabel["U235"]
, HL:2.22160758861e+16, Abund:7.204000e-03>
Retrieve U-235 by the MC2-2 ID:
>>> r.nuclideBases.byMcc2Id["U-2355"]
, HL:2.22160758861e+16, Abund:7.204000e-03>
Retrieve U-235 by the MC2-3 ID:
>>> r.nuclideBases.byMcc3IdEndfVII0["U235_7"]
, HL:2.22160758861e+16, Abund:7.204000e-03>
Retrieve U-235 by the MCNP ID:
>>> r.nuclideBases.byMcnpId["92235"]
, HL:2.22160758861e+16, Abund:7.204000e-03>
Retrieve U-235 by the AAAZZZS ID:
>>> r.nuclideBases.byAAAZZZSId["2350920"]
, HL:2.22160758861e+16, Abund:7.204000e-03>
Notes
-----
Currently, this module contains a lot of data in the global scope. But ARMI is in the process of encapsulating this
data, moving it out of the global scope, making it part of the reactor data model, and making it configurable via
Settings. Pardon the mess during this transition.
"""
import os
import numpy as np
from ruamel.yaml import YAML
from armi import context, runLog
from armi.nucDirectory import elements, transmutations
from armi.utils.units import HEAVY_METAL_CUTOFF_Z
# Global nuclide and nuclideBases data
nuclideBases = None
instances = []
burnChainImposed = False
byName = None
byDBName = None
byLabel = None
byMcc2Id = None
byMcc3Id = None # for backwards compatibility. Identical to byMcc3IdEndfbVII1
byMcc3IdEndfbVII0 = None
byMcc3IdEndfbVII1 = None
byMcnpId = None
byAAAZZZSId = None
# lookup table from https://t2.lanl.gov/nis/data/endf/endfvii-n.html
BASE_ENDFB7_MAT_NUM = {
"PM": 139,
"RA": 223,
"AC": 225,
"TH": 227,
"PA": 229,
"NP": 230,
"PU": 235,
"AM": 235,
"CM": 240,
"BK": 240,
"CF": 240,
"TC": 99,
}
class NuclideInterface:
"""An abstract nuclide implementation which defining various methods required for a nuclide object."""
def getDatabaseName(self):
"""Return the the nuclide label for the ARMI database (i.e. "nPu239")."""
raise NotImplementedError
def getDecay(self, decayType):
"""
Return a :py:class:`~armi.nucDirectory.transmutations.DecayMode` object.
Parameters
----------
decType: str
Name of decay mode, e.g. 'sf', 'alpha'
Returns
-------
decay : :py:class:`DecayModes `
"""
raise NotImplementedError
def getMcc2Id(self):
"""Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library."""
raise NotImplementedError
def getMcc3Id(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
raise NotImplementedError
def getMcc3IdEndfbVII0(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library."""
raise NotImplementedError
def getMcc3IdEndfbVII1(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
raise NotImplementedError
def getSerpentId(self):
"""Get the Serpent nuclide identification label."""
raise NotImplementedError
def getNaturalIsotopics(self):
"""Return the natural isotopics root :py:class:`~elements.Element`."""
raise NotImplementedError
def isFissile(self):
"""Return boolean value indicating whether this nuclide is fissile."""
raise NotImplementedError
def isHeavyMetal(self):
"""Return boolean value indicating whether this nuclide is a heavy metal."""
raise NotImplementedError
class NuclideWrapper(NuclideInterface):
"""A nuclide wrapper class, used as a base class for nuclear data file nuclides."""
def __init__(self, container, key):
self._base = None
self.container = container
self.containerKey = key
self.nucLabel = key[:-2]
def __repr__(self):
return f"<{self.__class__.__name__} {self.containerKey}>"
def __format__(self, format_spec):
return format_spec.format(repr(self))
@property
def name(self):
"""
Return the underlying nuclide's name (i.e. "PU239").
Notes
-----
The nuclide name consists of the capitalized 2 character element symbol and atomic mass number.
"""
return self._base.name
@property
def weight(self):
"""Get the underlying nuclide's weight."""
return self._base.weight
def getDatabaseName(self):
"""Get the database name of the underlying nuclide (i.e. "nPu239")."""
return self._base.getDatabaseName()
def getDecay(self, decayType):
"""
Return a :py:class:`~armi.nucDirectory.transmutations.DecayMode` object.
Parameters
----------
decType: str
Name of decay mode, e.g. 'sf', 'alpha'
Returns
-------
decay : :py:class:`DecayModes `
"""
return self._base.getDecay(decayType)
def getMcc2Id(self):
"""Return the MC2-2 nuclide based on the ENDF/B-V.2 cross section library."""
return self._base.getMcc2Id()
def getMcc3Id(self):
"""Return the MC2-3 nuclide based on the ENDF/B-VII.1 cross section library."""
return self.getMcc3IdEndfbVII1()
def getMcc3IdEndfbVII0(self):
"""Return the MC2-3 nuclide based on the ENDF/B-VII.0 cross section library."""
return self._base.getMcc3IdEndfbVII0()
def getMcc3IdEndfbVII1(self):
"""Return the MC2-3 nuclide based on the ENDF/B-VII.1 cross section library."""
return self._base.getMcc3IdEndfbVII1()
def getNaturalIsotopics(self):
"""Return the natural isotopics root :py:class:`~elements.Element`."""
return self._base.getNaturalIsotopics()
def isFissile(self):
"""Return boolean indicating whether or not the underlying nuclide is fissle."""
return self._base.isFissile()
def isHeavyMetal(self):
"""Return boolean indicating whether or not the underlying nuclide is a heavy metal."""
return self._base.isHeavyMetal()
class INuclide(NuclideInterface):
"""
Nuclide interface, the base of all nuclide objects.
Attributes
----------
z : int
Number of protons.
a : int
Number of nucleons.
state : int
Indicates excitement, 1 is more excited than 0.
abundance : float
Isotopic fraction of a naturally occurring nuclide. The sum of all nuclide abundances for a naturally occurring
element should be 1.0. This is atom fraction, not mass fraction.
name : str
ARMI's unique name for the given nuclide.
label : str
ARMI's unique 4 character label for the nuclide. These are not human readable, but do not lose any information.
The label is effectively the :py:attr:`Element.symbol ` padded to two
characters, plus the mass number (A) in base-26 (0-9, A-Z). Additional support for meta-states is provided by
adding 100 * the state to the mass number (A).
nuSF : float
Neutrons released per spontaneous fission. This should probably be moved at some point.
"""
fissile = ["U235", "PU239", "PU241", "AM242M", "CM244", "U233"]
TRANSMUTATION = "transmutation"
DECAY = "decay"
SPONTANEOUS_FISSION = "nuSF"
def __init__(
self,
element,
a,
state,
weight,
abundance,
halflife,
name,
label,
mcc2id=None,
mcc3idEndfbVII0=None,
mcc3idEndfbVII1=None,
):
"""
Create an instance of an INuclide.
Warning
-------
Do not call this constructor directly; use the factory instead.
"""
if state < 0:
raise ValueError(
f"Error in initializing nuclide {name}. An invalid state {state} is provided. The state must be a "
"positive integer."
)
if halflife < 0.0:
raise ValueError(f"Error in initializing nuclide {name}. The halflife must be a positive value.")
self.element = element
self.z = element.z
self.a = a
self.state = state
self.decays = []
self.trans = []
self.weight = weight
self.abundance = abundance
self.halflife = halflife
self.name = name
self.label = label
self.nuSF = 0.0
self.mcc2id = mcc2id or ""
self.mcc3idEndfbVII0 = mcc3idEndfbVII0 or ""
self.mcc3idEndfbVII1 = mcc3idEndfbVII1 or ""
self.element.append(self)
def __hash__(self):
return hash((self.a, self.z, self.state))
def __reduce__(self):
return fromName, (self.name,)
def __lt__(self, other):
return (self.z, self.a, self.state) < (other.z, other.a, other.state)
def __eq__(self, other):
return hash(self) == hash(other)
def _processBurnData(self, burnInfo):
"""
Process YAML burn transmutation, decay, and spontaneous fission data for this nuclide.
This clears out any existing transmutation/decay information before processing.
Parameters
----------
burnInfo: list
List of dictionaries containing burn information for the current nuclide
"""
self.decays = []
self.trans = []
for nuclideBurnCategory in burnInfo:
# Check that the burn category has only one defined burn type
if len(nuclideBurnCategory) > 1:
raise ValueError(
f"Improperly defined ``burn-chain`` of {self}. {nuclideBurnCategory.keys()} should be a single "
"burn type."
)
nuclideBurnType = list(nuclideBurnCategory.keys())[0]
if nuclideBurnType == self.TRANSMUTATION:
self.trans.append(transmutations.Transmutation(self, nuclideBurnCategory[nuclideBurnType]))
elif nuclideBurnType == self.DECAY:
self.decays.append(transmutations.DecayMode(self, nuclideBurnCategory[nuclideBurnType]))
elif nuclideBurnType == self.SPONTANEOUS_FISSION:
userSpontaneousFissionYield = nuclideBurnCategory.get(nuclideBurnType, None)
# Check for user-defined value of nuSF within the burn-chain data. If this is updated then prefer the
# user change and then note this to the user. Otherwise, maintain the default loaded from the nuclide
# bases.
if userSpontaneousFissionYield:
if userSpontaneousFissionYield != self.nuSF:
runLog.info(
f"nuSF provided for {self} will be updated from {self.nuSF:<8.6e} to "
f"{userSpontaneousFissionYield:<8.6e} based on user provided burn-chain data."
)
self.nuSF = userSpontaneousFissionYield
else:
raise Exception(
f"Undefined Burn Data {nuclideBurnType} for {self}. Expected {self.TRANSMUTATION}, {self.DECAY}, "
f"or {self.SPONTANEOUS_FISSION}."
)
def getDecay(self, decayType):
"""Get a :py:class:`~armi.nucDirectory.transmutations.DecayMode`.
Retrieve the first :py:class:`~armi.nucDirectory.transmutations.DecayMode` matching the specified decType.
Parameters
----------
decType: str
Name of decay mode e.g. 'sf', 'alpha'
Returns
-------
decay : :py:class:`DecayModes `
"""
for d in self.decays:
if d.type == decayType:
return d
return None
def isFissile(self):
"""Determine if the nuclide is fissile.
Returns
-------
answer: bool
True if the :py:class:`INuclide` is fissile, otherwise False.
"""
return self.name in self.fissile
def getNaturalIsotopics(self):
r"""Gets the naturally occurring nuclides for this nuclide.
Abstract method, see concrete types for implementation.
Returns
-------
nuclides: list
List of :py:class:`INuclides `
See Also
--------
:meth:`NuclideBase.getNaturalIsotopics`
:meth:`NaturalNuclideBase.getNaturalIsotopics`
:meth:`LumpNuclideBase.getNaturalIsotopics`
:meth:`DummyNuclideBase.getNaturalIsotopics`
"""
raise NotImplementedError
def getDatabaseName(self):
"""Get the name of the nuclide used in the database (i.e. "nPu239")."""
return f"n{self.name.capitalize()}"
def isHeavyMetal(self):
return self.z > HEAVY_METAL_CUTOFF_Z
class IMcnpNuclide:
"""Abstract class for retrieving nuclide identifiers for the MCNP software."""
def getMcnpId(self):
"""Return a string that represents a nuclide label for a material card in MCNP."""
raise NotImplementedError
def getAAAZZZSId(self):
"""Return a string that is ordered by the mass number, A, the atomic number, Z, and the isomeric state, S."""
raise NotImplementedError
class NuclideBase(INuclide, IMcnpNuclide):
r"""Represents an individual nuclide/isotope.
.. impl:: Isotopes and isomers can be queried by name and label.
:id: I_ARMI_ND_ISOTOPES1
:implements: R_ARMI_ND_ISOTOPES
The :py:class:`NuclideBase ` class provides a data structure for
information about a single nuclide, including the atom number, atomic weight, element, isomeric state,
half-life, and name. The class contains static methods for creating an internal ARMI name or label for a
nuclide. There are instance methods for generating the nuclide ID for external codes, e.g. MCNP or Serpent, and
retrieving the nuclide ID for MC\ :sup:`2`-2 or MC\ :sup:`2`-3. There are also instance methods for generating
an AAAZZZS ID and an ENDF MAT number.
"""
def __init__(self, element, a, weight, abundance, state, halflife):
IMcnpNuclide.__init__(self)
INuclide.__init__(
self,
element=element,
a=a,
state=state,
weight=weight,
abundance=abundance,
halflife=halflife,
name=NuclideBase._createName(element, a, state),
label=NuclideBase._createLabel(element, a, state),
)
def __repr__(self):
return (
f"<{self.__class__.__name__} {self.name}: Z:{self.z}, A:{self.a}, S:{self.state}, "
+ f"W:{self.weight:<12.6e}, Label:{self.label}>, HL:{self.halflife:<15.11e}, "
+ f"Abund:{self.abundance:<8.6e}>"
)
@staticmethod
def _createName(element, a, state):
metaChar = ["", "M", "M2", "M3"]
if state > len(metaChar):
raise ValueError(f"The state of NuclideBase is not valid and must not be larger than {len(metaChar)}.")
return f"{element.symbol}{a}{metaChar[state]}"
@staticmethod
def _createLabel(element, a, state):
"""
Make label for nuclide base.
The logic causes labels for things with A<10 to be zero padded like H03 or tritium instead of H3. This avoids
the metastable tritium collision which would look like elemental HE. It also allows things like MO100 to be held
within 4 characters, which is a constraint of the ISOTXS format if we append 2 characters for XS type.
"""
# len(e.symbol) is 1 or 2 => a % (either 1000 or 100)
# => gives exact a, or last two digits.
# the division by 10 removes the last digit.
firstTwoDigits = (a % (10 ** (4 - len(element.symbol)))) // 10
# the last digit is either 0-9 if state=0, or A-J if state=1, or K-T if state=2, or U-d if state=3
lastDigit = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcd"[(a % 10) + state * 10]
return f"{element.symbol}{firstTwoDigits}{lastDigit}"
def getNaturalIsotopics(self):
"""Gets the natural isotopics root :py:class:`~elements.Element`.
Gets the naturally occurring nuclides for this nuclide.
Returns
-------
nuclides: list
List of :py:class:`INuclides `
See Also
--------
:meth:`INuclide.getNaturalIsotopics`
"""
return self.element.getNaturalIsotopics()
def getMcc2Id(self):
"""Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.
.. impl:: Isotopes and isomers can be queried by MC2-2 ID.
:id: I_ARMI_ND_ISOTOPES2
:implements: R_ARMI_ND_ISOTOPES
This method returns the ``mcc2id`` attribute of a
:py:class:`NuclideBase ` instance. This attribute is initially
populated by reading from the mcc-nuclides.yaml file in the ARMI resources folder.
"""
return self.mcc2id
def getMcc3Id(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
return self.getMcc3IdEndfbVII1()
def getMcc3IdEndfbVII0(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.
.. impl:: Isotopes and isomers can be queried by MC2-3 ENDF/B-VII.0 ID.
:id: I_ARMI_ND_ISOTOPES3
:implements: R_ARMI_ND_ISOTOPES
This method returns the ``mcc3idEndfbVII0`` attribute of a
:py:class:`NuclideBase `
instance. This attribute is initially populated by reading from the
mcc-nuclides.yaml file in the ARMI resources folder.
"""
return self.mcc3idEndfbVII0
def getMcc3IdEndfbVII1(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.
.. impl:: Isotopes and isomers can be queried by MC2-3 ENDF/B-VII.1 ID.
:id: I_ARMI_ND_ISOTOPES7
:implements: R_ARMI_ND_ISOTOPES
This method returns the ``mcc3idEndfbVII1`` attribute of a
:py:class:`NuclideBase `
instance. This attribute is initially populated by reading from the
mcc-nuclides.yaml file in the ARMI resources folder.
"""
return self.mcc3idEndfbVII1
def getMcnpId(self):
"""
Gets the MCNP label for this nuclide.
.. impl:: Isotopes and isomers can be queried by MCNP ID.
:id: I_ARMI_ND_ISOTOPES4
:implements: R_ARMI_ND_ISOTOPES
This method generates the MCNP ID for an isotope using the standard
MCNP format based on the atomic number A, number of protons Z, and
excited state. The implementation includes the special rule for
Am-242m, which is 95242. 95642 is used for the less common ground
state Am-242.
Returns
-------
id : str
The MCNP ID e.g. ``92235``, ``94239``, ``6000``
"""
z, a = self.z, self.a
if z == 95 and a == 242:
# Am242 has special rules
if self.state != 1:
# MCNP uses base state for the common metastable state AM242M, so AM242M is just 95242
# AM242 base state is called 95642 (+400) in mcnp.
# see https://mcnp.lanl.gov/pdf_files/la-ur-08-1999.pdf
# New ACE-Formatted Neutron and Proton Libraries Based on ENDF/B-VII.0
a += 300 + 100 * max(self.state, 1)
elif self.state > 0:
# in general mcnp adds 300 + 100*m to the Z number for metastables. see above source
a += 300 + 100 * self.state
return "{z:d}{a:03d}".format(z=z, a=a)
def getAAAZZZSId(self):
"""
Return a string that is ordered by the mass number, A, the atomic number, Z, and the isomeric state, S.
.. impl:: Isotopes and isomers can be queried by AAAZZZS ID.
:id: I_ARMI_ND_ISOTOPES5
:implements: R_ARMI_ND_ISOTOPES
This method generates the AAAZZZS format ID for an isotope. Where
AAA is the mass number, ZZZ is the atomic number, and S is the
isomeric state. This is a general format independent of any code that
precisely defines an isotope or isomer.
Notes
-----
An example would be for U235, where A=235, Z=92, and S=0, returning ``2350920``.
"""
return f"{self.a}{self.z:>03d}{self.state}"
def getSerpentId(self):
"""
Returns the SERPENT style ID for this nuclide.
Returns
-------
id: str
The ID of this nuclide based on it's elemental name, weight,
and state, eg ``U-235``, ``Te-129m``.
"""
symbol = self.element.symbol.capitalize()
return f"{symbol}-{self.a}{'m' if self.state else ''}"
def getEndfMatNum(self):
"""
Gets the ENDF MAT number.
MAT numbers are defined as described in section 0.4.1 of the NJOY manual. Basically, it's Z * 100 + I where I is
an isotope number. I=25 is defined as the lightest known stable isotope of element Z, so for Uranium, Z=92 and
I=25 refers to U234. The values of I go up by 3 for each mass number, so U235 is 9228. This leaves room for
three isomeric states of each nuclide.
Returns
-------
id : str
The MAT number e.g. ``9237`` for U238
"""
z, a = self.z, self.a
if self.element.symbol in BASE_ENDFB7_MAT_NUM:
# no stable isotopes (or other special case). Use lookup table
smallestStableA = BASE_ENDFB7_MAT_NUM[self.element.symbol]
else:
naturalIsotopes = self.getNaturalIsotopics()
if naturalIsotopes:
smallestStableA = min(ni.a for ni in naturalIsotopes) # no guarantee they were sorted
else:
raise KeyError(f"Nuclide {self} is unknown in the MAT number lookup")
isotopeNum = (a - smallestStableA) * 3 + self.state + 25
mat = z * 100 + isotopeNum
return str(mat)
class NaturalNuclideBase(INuclide, IMcnpNuclide):
"""
Represents an individual nuclide/isotope that is naturally occurring.
Notes
-----
This is meant to represent the combination of all naturally occurring nuclides within an element. The abundance is
forced to zero here so that it does not have any interactions with the NuclideBase objects.
"""
def __init__(self, name, element):
INuclide.__init__(
self,
element=element,
a=0,
state=0,
weight=sum([nn.weight * nn.abundance for nn in element.getNaturalIsotopics()]),
abundance=0.0,
halflife=np.inf,
name=name,
label=name,
)
def __repr__(self):
return f"<{self.__class__.__name__} {self.name}: Z:{self.z}, W:{self.weight:<12.6e}, Label:{self.label}>"
def getNaturalIsotopics(self):
"""Gets the natural isotopics root :py:class:`~elements.Element`.
Gets the naturally occurring nuclides for this nuclide.
Returns
-------
nuclides: list
List of :py:class:`INuclides `.
See Also
--------
:meth:`INuclide.getNaturalIsotopics`
"""
return self.element.getNaturalIsotopics()
def getMcnpId(self):
"""Gets the MCNP ID for this element.
Returns
-------
id : str
The MCNP ID e.g. ``1000``, ``92000``. Not zero-padded on the left.
"""
return "{0:d}000".format(self.z)
def getMcc2Id(self):
"""Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library."""
return self.mcc2id
def getMcc3Id(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
return self.getMcc3IdEndfbVII1()
def getMcc3IdEndfbVII0(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library."""
return self.mcc3idEndfbVII0
def getMcc3IdEndfbVII1(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
return self.mcc3idEndfbVII1
def getSerpentId(self):
"""Gets the SERPENT ID for this natural nuclide.
Returns
-------
id: str
SERPENT ID: ``C-nat``, `Fe-nat``
"""
return f"{self.element.symbol.capitalize()}-nat"
def getEndfMatNum(self):
"""Get the ENDF mat number for this element."""
if self.z != 6:
runLog.warning(
f"The only elemental in ENDF/B VII.1 is carbon. ENDF mat num was requested for the elemental {self} and"
"will not be helpful for working with ENDF/B VII.1. Try to expandElementalsToIsotopics"
)
return str(self.z * 100)
class DummyNuclideBase(INuclide):
"""
Represents a dummy/placeholder nuclide within the system.
Notes
-----
This may be used to store mass from a depletion calculation, specifically in the instances where the burn chain is
truncated.
"""
def __init__(self, element, name, weight):
INuclide.__init__(
self,
element=element,
a=0,
state=0,
weight=weight,
abundance=0.0,
halflife=np.inf,
name=name,
label="DMP" + name[4],
)
def __repr__(self):
return f"<{self.__class__.__name__} {self.name}: W:{self.weight:<12.6e}, Label:{self.label}>"
def __hash__(self):
return hash((self.a, self.z, self.state, self.weight))
def __lt__(self, other):
return (self.z, self.a, self.state, self.weight) < (
other.z,
other.a,
other.state,
other.weight,
)
def getNaturalIsotopics(self):
"""Gets the natural isotopics, an empty iterator.
Gets the naturally occurring nuclides for this nuclide.
Returns
-------
empty: iterator
An empty generator
See Also
--------
:meth:`INuclide.getNaturalIsotopics`
"""
return
yield
def isHeavyMetal(self):
return False
def getMcc2Id(self):
"""Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library."""
return self.mcc2id
def getMcc3Id(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
return self.getMcc3IdEndfbVII1()
def getMcc3IdEndfbVII0(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library."""
return self.mcc3idEndfbVII0
def getMcc3IdEndfbVII1(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
return self.mcc3idEndfbVII1
class LumpNuclideBase(INuclide):
"""
Represents a combination of many nuclides from `NuclideBases` into a single lumped nuclide.
See Also
--------
armi.physics.neutronics.fissionProduct model:
Describes what nuclides LumpNuclideBase is expend to.
"""
def __init__(self, element, name, weight):
INuclide.__init__(
self,
element=element,
a=0,
state=0,
weight=weight,
abundance=0.0,
halflife=np.inf,
name=name,
label=name[1:],
)
def __repr__(self):
return f"<{self.__class__.__name__} {self.name}: W:{self.weight:<12.6e}, Label:{self.label}>"
def __hash__(self):
return hash((self.a, self.z, self.state, self.weight))
def __lt__(self, other):
return (self.z, self.a, self.state, self.weight) < (
other.z,
other.a,
other.state,
other.weight,
)
def getNaturalIsotopics(self):
"""Gets the natural isotopics, an empty iterator.
Gets the naturally occurring nuclides for this nuclide.
Returns
-------
empty: iterator
An empty generator
See Also
--------
:meth:`INuclide.getNaturalIsotopics`
"""
return
yield
def isHeavyMetal(self):
return False
def getMcc2Id(self):
"""Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library."""
return self.mcc2id
def getMcc3Id(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
return self.getMcc3IdEndfbVII1()
def getMcc3IdEndfbVII0(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library."""
return self.mcc3idEndfbVII0
def getMcc3IdEndfbVII1(self):
"""Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library."""
return self.mcc3idEndfbVII1
def initReachableActiveNuclidesThroughBurnChain(nuclides, numberDensities, activeNuclides):
"""Pass through to NuclideBases.initReachableActiveNuclidesThroughBurnChain() for the global NuclideBases object."""
global nuclideBases
return nuclideBases.initReachableActiveNuclidesThroughBurnChain(nuclides, numberDensities, activeNuclides)
def getIsotopics(nucName):
"""Pass through to NuclideBases.getIsotopics() for the global NuclideBases object."""
global nuclideBases
return nuclideBases.getIsotopics(nucName)
def fromName(name):
"""Pass through to NuclideBases.fromName() for the global NuclideBases object."""
global nuclideBases
return nuclideBases.fromName(name)
def isMonoIsotopicElement(name):
"""Pass through to NuclideBases.isMonoIsotopicElement() for the global NuclideBases object."""
global nuclideBases
return nuclideBases.isMonoIsotopicElement(name)
def where(predicate):
"""Pass through to NuclideBases.where() for the global NuclideBases object."""
global nuclideBases
return nuclideBases.where(predicate)
def single(predicate):
"""Pass through to NuclideBases.single() for the global NuclideBases object."""
global nuclideBases
return nuclideBases.single(predicate)
def changeLabel(nuclideBase, newLabel):
"""Pass through to NuclideBases.changeLabel() for the global NuclideBases object."""
global nuclideBases
nuclideBases.changeLabel(nuclideBase, newLabel)
def getDepletableNuclides(activeNuclides, obj):
"""Get nuclides in this object that are in the burn chain."""
return sorted(set(activeNuclides) & set(obj.getNuclides()))
def imposeBurnChain(burnChainStream):
"""Pass through to NuclideBases.imposeBurnChain() for the global NuclideBases object."""
global nuclideBases
nuclideBases.imposeBurnChain(burnChainStream)
def factory():
"""Pass through to NuclideBases.factory() for the global NuclideBases object."""
global nuclideBases
global burnChainImposed
global instances
global byName
global byDBName
global byLabel
global byMcc2Id
global byMcc3Id
global byMcc3IdEndfbVII0
global byMcc3IdEndfbVII1
global byMcnpId
global byAAAZZZSId
nuclideBases = NuclideBases()
instances = nuclideBases.instances
burnChainImposed = nuclideBases.burnChainImposed
byName = nuclideBases.byName
byDBName = nuclideBases.byDBName
byLabel = nuclideBases.byLabel
byMcc2Id = nuclideBases.byMcc2Id
byMcc3Id = nuclideBases.byMcc3Id # for backwards compatibility. Identical to byMcc3IdEndfbVII1
byMcc3IdEndfbVII0 = nuclideBases.byMcc3IdEndfbVII0
byMcc3IdEndfbVII1 = nuclideBases.byMcc3IdEndfbVII1
byMcnpId = nuclideBases.byMcnpId
byAAAZZZSId = nuclideBases.byAAAZZZSId
def addNuclideBases():
"""Pass through to NuclideBases.addNuclideBases() for the global NuclideBases object."""
global nuclideBases
nuclideBases.addNuclideBases()
def readMCCNuclideData():
"""Pass through to NuclideBases.readMCCNuclideData() for the global NuclideBases object."""
global nuclideBases
nuclideBases.readMCCNuclideData()
def updateNuclideBasesForSpecialCases():
"""Pass through to NuclideBases.updateNuclideBasesForSpecialCases() for the global NuclideBases object."""
global nuclideBases
nuclideBases.updateNuclideBasesForSpecialCases()
def addGlobalNuclide(nuclide: NuclideBase):
"""Pass through to NuclideBases.addNuclide() for the global NuclideBases object."""
global nuclideBases
nuclideBases.addNuclide(nuclide)
def destroyGlobalNuclides():
"""Pass through to NuclideBases.clear() for the global NuclideBases object."""
global nuclideBases
nuclideBases.clear()
class NuclideBases:
"""
A container for all the nuclide information in the simulation.
By design, you would only expect to have one instance of this object in memory during a simulation.
Attributes
----------
burnChainImposed: bool
Have we applied transmutation and decay data to each nuclide?
instances: list[INuclide]
A simple list of the nuclides in this class.
byName: dict[str, INuclide]
A dictionary of the nuclides in this class, keyed by name, e.g., "U235".
byDBName: dict[str, INuclide]
A dictionary of the nuclides in this class, keyed by database name, e.g., "nU235".
byLabel: dict[str, INuclide]
A dictionary of the nuclides in this class, keyed by label, e.g., "U235".
byMcc2Id: dict[str, INuclide]
A dictionary of the nuclides in this class, keyed by MC2-2 ID, e.g., "U-2355".
byMcc3Id: dict[str, INuclide]
A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., "U235_7".
(This exists for backwards compat. Identical to byMcc3IdEndfbVII1.)
byMcc3IdEndfbVII0: dict[str, INuclide]
A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., "U235_7".
byMcc3IdEndfbVII1: dict[str, INuclide]
A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., "U235_7".
byMcnpId: dict[str, INuclide]
A dictionary of the nuclides in this class, keyed by MCNP ID, e.g., 92235.
byAAAZZZSId: dict[int, INuclide]
A dictionary of the nuclides in this class, keyed by AAAZZZS, e.g., 2350920.
elements: Elements
A container for all the atomics elements information in the simulation.
nuclidesFile: str
File path to the custom ARMI "nuclides.dat" file, containing a plain text description of all the nuclides to be
modeled including: Z, number of neutrons, mass number, amu, natural abundance, half life and nu-bar and more.
mccNuclidesFile: str
File path to the "mcc-nuclides.yaml" file, containing nuclides defined by the MC2-2 and MC2-3 codes, with
various ENDF/B-V mappings.
"""
DEFAULT_NUCLIDES_FILE = os.path.join(context.RES, "nuclides.dat")
DEFAULT_MCC_NUCLIDES_FILE = os.path.join(context.RES, "mcc-nuclides.yaml")
def __init__(self, nuclidesFile=None, mccNuclidesFile=None):
self.burnChainImposed: bool = False
self.elements = None
self.instances: list[INuclide] = []
self.byName: dict[str, INuclide] = {}
self.byDBName: dict[str, INuclide] = {}
self.byLabel: dict[str, INuclide] = {}
self.byMcc2Id: dict[str, INuclide] = {}
self.byMcc3Id: dict[str, INuclide] = {}
self.byMcc3IdEndfbVII0: dict[str, INuclide] = {}
self.byMcc3IdEndfbVII1: dict[str, INuclide] = {}
self.byMcnpId: dict[str, INuclide] = {}
self.byAAAZZZSId: dict[int, INuclide] = {}
self.nuclidesFile: str = nuclidesFile if nuclidesFile else self.DEFAULT_NUCLIDES_FILE
self.mccNuclidesFile: str = mccNuclidesFile if mccNuclidesFile else self.DEFAULT_MCC_NUCLIDES_FILE
self.factory()
def clear(self):
"""Empty all the data containers in this object."""
# grab all the globals
global burnChainImposed
global instances
global byName
global byDBName
global byLabel
global byMcc2Id
global byMcc3Id
global byMcc3IdEndfbVII0
global byMcc3IdEndfbVII1
global byMcnpId
global byAAAZZZSId
# reset the class attributes
self.burnChainImposed = False
self.elements = None
self.instances = []
self.byName = {}
self.byDBName = {}
self.byLabel = {}
self.byMcc2Id = {}
self.byMcc3Id = {}
self.byMcc3IdEndfbVII0 = {}
self.byMcc3IdEndfbVII1 = {}
self.byMcnpId = {}
self.byAAAZZZSId = {}
# reset the globals
instances = self.instances
burnChainImposed = self.burnChainImposed
byName = self.byName
byDBName = self.byDBName
byLabel = self.byLabel
byMcc2Id = self.byMcc2Id
byMcc3Id = self.byMcc3Id
byMcc3IdEndfbVII0 = self.byMcc3IdEndfbVII0
byMcc3IdEndfbVII1 = self.byMcc3IdEndfbVII1
byMcnpId = self.byMcnpId
byAAAZZZSId = self.byAAAZZZSId
def addNuclide(self, nuclide: INuclide):
"""Add an element to the dictionaries in this class."""
if nuclide.name in self.byName or nuclide.getDatabaseName() in self.byDBName or nuclide.label in self.byLabel:
raise ValueError(f"{nuclide} has already been added.")
self.instances.append(nuclide)
self.byName[nuclide.name] = nuclide
self.byDBName[nuclide.getDatabaseName()] = nuclide
self.byLabel[nuclide.label] = nuclide
# Add look-up based on the MCNP nuclide ID
if isinstance(nuclide, IMcnpNuclide):
if nuclide.getMcnpId() in self.byMcnpId:
raise ValueError(f"{nuclide} with McnpId {nuclide.getMcnpId()} has already been added.")
self.byMcnpId[nuclide.getMcnpId()] = nuclide
if not isinstance(nuclide, (NaturalNuclideBase, LumpNuclideBase, DummyNuclideBase)):
self.byAAAZZZSId[nuclide.getAAAZZZSId()] = nuclide
def factory(self, nuclidesFile: str = None, mccNuclidesFile: str = None, elementsFile: str = None):
"""
Reads data files to instantiate the :py:class:`INuclides `.
Reads NIST, MC**2 and burn chain data files to instantiate the :py:class:`INuclides `. Also clears and
fills in the class attibues: instances, byName, byLabel, byMcc3IdEndfbVII0, and byMcc3IdEndfbVII1. This method
is automatically run upon initializing the class, hence it is not usually necessary to re-run it unless there is
a change to the data files, which should not happen during run time, or a *bad* :py:class`INuclide` is created.
Parameters
----------
nuclidesFile: str
File path to the custom ARMI "nuclides.dat" file, containing a plain text description of all nuclides to be
modeled including: Z, number of neutrons, mass number, AMU, natural abundance, half life, nu-bar and more.
mccNuclidesFile: str
File path to the "mcc-nuclides.yaml" file, containing nuclides defined by the MC2-2 and MC2-3 codes, with
various ENDF/B-V mappings.
elementsFile: str
File path to the custom ARMI "elements.dat" file.
Notes
-----
This cannot be run more than once. NuclideBase instances are used throughout the ARMI ecosystem and are even
class attributes in some cases. Re-instantiating them would orphan any existing ones and break everything.
"""
if len(self.instances) != 0:
raise RuntimeError(
"Nuclides are already initialized and cannot be re-initialized unless `nuclideBases.clear()` is called "
"first."
)
# If an input file is provided, use it, otherwise there is a class default.
if nuclidesFile:
self.nuclidesFile = nuclidesFile
if mccNuclidesFile:
self.mccNuclidesFile = mccNuclidesFile
# load the fundamental elements library
elements.factory(elementsFile)
self.elements = elements.elements
# load the isotopes and isomers library
self.addNuclideBases(self.nuclidesFile)
self.__addNaturalNuclideBases()
self.__addDummyNuclideBases()
self.__addLumpedFissionProductNuclideBases()
self.updateNuclideBasesForSpecialCases()
self.readMCCNuclideData(self.mccNuclidesFile)
self.__renormalizeNuclideToElementRelationship()
self.__deriveElementalWeightsByNaturalNuclideAbundances()
def initReachableActiveNuclidesThroughBurnChain(self, nuclides, numberDensities, activeNuclides):
"""
March through the depletion chain and find all nuclides that can be reached by depleting nuclides passed in.
This limits depletion to the smallest set of nuclides that matters.
Parameters
----------
nuclides : np.array, dtype="S6"
Starting array of nuclide names
numberDensities : np.array, dtype=np.float64
Starting array of number densities
activeNuclides : OrderedSet
Active nuclides defined on the reactor blueprints object. See: armi.reactor.blueprints.py
"""
if not self.burnChainImposed:
return nuclides, numberDensities
missingActiveNuclides = set()
memo = set()
nucNames = [nucName.decode() for nucName in nuclides]
difference = set(nucNames).difference(memo)
while any(difference):
newNucs = set()
nuclide = difference.pop()
memo.add(nuclide)
# Skip the nuclide if it is not `active` in the burn-chain
if nuclide not in activeNuclides:
continue
nuclideObj = self.byName[nuclide]
for interaction in nuclideObj.trans + nuclideObj.decays:
try:
# Interaction nuclides can only be added to the number density dictionary if they are a part of the
# user-defined active nuclides
productNuclide = interaction.getPreferredProduct(activeNuclides)
if productNuclide not in nucNames:
newNucs.add(productNuclide.encode())
except KeyError:
# Keep track of the first production nuclide
missingActiveNuclides.add(interaction.productNuclides)
# add the new nuclides to the number density arrays
newNDens = np.zeros(len(newNucs), dtype=np.float64)
nuclides = np.append(nuclides, list(newNucs))
numberDensities = np.append(numberDensities, newNDens)
nucNames = [nucName.decode() for nucName in nuclides]
difference = set(nucNames).difference(memo)
if self.burnChainImposed and missingActiveNuclides:
self._failOnMissingActiveNuclides(missingActiveNuclides)
return nuclides, numberDensities
def _failOnMissingActiveNuclides(self, missingActiveNuclides):
"""Raise ValueError with notification of which nuclides to include in the burn-chain."""
msg = "Missing active nuclides in loading file. Add the following nuclides:"
for i, nucList in enumerate(missingActiveNuclides, 1):
msg += f"\n {i} - " # Index of
for j, nuc in enumerate(nucList, 1):
delimiter = " or " if j < len(nucList) else ""
msg += f"{nuc}{delimiter}"
raise ValueError(msg)
def getIsotopics(self, nucName):
"""Expand elemental nuc name to isotopic nuc bases."""
nb = self.byName[nucName]
if isinstance(nb, (LumpNuclideBase, DummyNuclideBase)):
# skip lumped fission products or dumps
return []
elif isinstance(nb, NaturalNuclideBase):
isotopics = nb.getNaturalIsotopics()
else:
isotopics = [nb]
return isotopics
def fromName(self, name):
"""Return a nuclide from its name."""
matches = [nn for nn in self.instances if nn.name == name]
if len(matches) != 1:
raise Exception(f"Too many or too few ({len(matches)}) matches for {name}")
return matches[0]
def isMonoIsotopicElement(self, name):
"""Return true if this is the only naturally occurring isotope of its element."""
base = self.byName[name]
return base.abundance > 0 and len([e for e in base.element.nuclides if e.abundance > 0]) == 1
def where(self, predicate):
"""
Return all :py:class:`INuclides ` objects matching a condition.
Returns an iterator of :py:class:`INuclides ` matching the specified condition.
Parameters
----------
predicate: lambda
A lambda, or function, accepting a :py:class:`INuclide` as a parameter
Examples
--------
>>> from armi.nucDirectory.nuclideBases import NuclideBases
>>> nuclideBases = NuclideBases()
>>> [nn.name for nn in nuclideBases.where(lambda nb: "Z" in nb.name)]
['ZN64', 'ZN66', 'ZN67', 'ZN68', 'ZN70', 'ZR90', 'ZR91', 'ZR92', 'ZR94', 'ZR96', 'ZR93', 'ZR95', 'ZR']
>>> # in order to get length, convert to list
>>> isomers90 = list(nuclideBases.where(lambda nb: nb.a == 95))
>>> len(isomers90)
3
>>> for iso in isomers:
... print(iso)
"""
return filter(predicate, self.instances)
def single(self, predicate):
"""
Return a single :py:class:`INuclide` object meeting the specified condition.
Similar to :py:func:`where`, this function uses a lambda input to filter the
:py:attr:`INuclide instances `. If there is not 1 and only 1 match for the specified condition, an
exception is raised.
Examples
--------
>>> from armi.nucDirectory import nuclideBases
>>> nuclideBases.single(lambda nb: nb.name == "C")
>>> nuclideBases.single(lambda nb: nb.z == 95 and nb.a == 242 and nb.state == 1)
"""
matches = [nuc for nuc in self.instances if predicate(nuc)]
if len(matches) != 1:
raise IndexError(
"Expected single match, but got {} matches:\n {}".format(
len(matches), "\n ".join(str(mo) for mo in matches)
)
)
return matches[0]
def changeLabel(self, nuclideBase, newLabel):
"""
Updates a nuclide label and modifies the ``byLabel`` look-up dictionary.
Notes
-----
Since nuclide objects are defined and stored globally, any change to the attributes will be maintained.
"""
nuclideBase.label = newLabel
self.byLabel[newLabel] = nuclideBase
def imposeBurnChain(self, burnChainStream):
"""
Apply transmutation and decay information to each nuclide.
Notes
-----
You cannot impose a burn chain twice. Doing so would require that you clean out the transmutations and decays
from all the module-level nuclide bases, which generally requires that you rebuild them. But rebuilding those is
not an option because some of them get set as class-level attributes and would be orphaned. If a need to change
burn chains mid-run re-arises, then a better nuclideBase-level burnchain cleanup should be implemented so the
objects don't have to change identity.
See Also
--------
armi.nucDirectory.transmutations : describes file format
"""
if self.burnChainImposed:
# The only time this should happen is if in a unit test that has already processed conftest.py and is now
# building a Case that also imposes this.
runLog.warning("Burn chain already imposed. Skipping reimposition.")
return
self.burnChainImposed = True
global burnChainImposed
burnChainImposed = True
yaml = YAML(typ="rt")
yaml.allow_duplicate_keys = False
burnData = yaml.load(burnChainStream)
for nucName, burnInfo in burnData.items():
nuclide = self.byName[nucName]
# think of this protected stuff as "module level protection" rather than class.
nuclide._processBurnData(burnInfo)
def addNuclideBases(self, nuclidesFile: str):
"""
Read natural abundances of any natural nuclides.
This adjusts already-existing NuclideBases and Elements with the new information.
.. impl:: Separating natural abundance data from code.
:id: I_ARMI_ND_DATA0
:implements: R_ARMI_ND_DATA
This function reads the ``nuclides.dat`` file from the ARMI resources folder. This file contains metadata
for 4,614 nuclides, including number of protons, number of neutrons, atomic number, excited state, element
symbol, atomic mass, natural abundance, half-life, and spontaneous fission yield. The data in
``nuclides.dat`` have been collected from multiple different sources; the references are given in comments
at the top of that file.
Parameters
----------
nuclidesFile: str
File path to the custom ARMI "nuclides.dat" file, containing a plain text description of all nuclides to be
modeled including: Z, number of neutrons, mass number, AMU, natural abundance, half life, nu-bar and more.
"""
with open(nuclidesFile, "r") as f:
for line in f:
# Skip header lines
if line.startswith("#") or line.startswith("Z"):
continue
lineData = line.split()
_z = int(lineData[0])
_n = int(lineData[1])
a = int(lineData[2])
state = int(lineData[3])
sym = lineData[4].upper()
mass = float(lineData[5])
abun = float(lineData[6])
halflife = lineData[7]
if halflife == "inf":
halflife = np.inf
else:
halflife = float(halflife)
nuSF = float(lineData[8])
element = self.elements.bySymbol[sym]
nb = NuclideBase(element, a, mass, abun, state, halflife)
nb.nuSF = nuSF
self.addNuclide(nb)
def __addNaturalNuclideBases(self):
"""Generates a complete set of nuclide bases for each naturally occurring element."""
for element in self.elements.byZ.values():
if element.symbol not in self.byName:
if element.isNaturallyOccurring():
self.addNuclide(NaturalNuclideBase(element.symbol, element))
def __addDummyNuclideBases(self):
"""
Generates a set of dummy nuclides.
Notes
-----
These nuclides can be used to truncate a depletion / burn-up chain within the MC2 program.
"""
self.addNuclide(DummyNuclideBase(element=self.elements.byName["Dummy"], name="DUMP1", weight=10.0))
self.addNuclide(DummyNuclideBase(element=self.elements.byName["Dummy"], name="DUMP2", weight=240.0))
def __addLumpedFissionProductNuclideBases(self):
"""Generates a set of nuclides for use as lumped fission products."""
self.addNuclide(
LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP35", weight=233.273)
)
self.addNuclide(
LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP38", weight=235.78)
)
self.addNuclide(
LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP39", weight=236.898)
)
self.addNuclide(
LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP40", weight=237.7)
)
self.addNuclide(
LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LFP41", weight=238.812)
)
self.addNuclide(LumpNuclideBase(element=self.elements.byName["LumpedFissionProduct"], name="LREGN", weight=1.0))
def readMCCNuclideData(self, mccNuclidesFile):
r"""Read in the label data for the MC2-2 and MC2-3 cross section codes to the nuclide bases.
.. impl:: Separating MCC data from code.
:id: I_ARMI_ND_DATA1
:implements: R_ARMI_ND_DATA
This function reads the mcc-nuclides.yaml file from the ARMI resources folder. This file contains the
MC\ :sup:`2`-2 ID (from ENDF/B-V.2) and MC\ :sup:`2`-3 ID (from ENDF/B-VII.0) for all nuclides in
MC\ :sup:`2`. The ``mcc2id``, ``mcc3idEndfVII0``, and ``mcc3idEndfVII1`` attributes of each
:py:class:`NuclideBase ` instance are updated as the data is
read, and the global dictionaries ``byMcc2Id`` ``byMcc3IdEndfVII0`` and ``byMcc3IdEndfVII1`` are populated
with the nuclide bases keyed by their corresponding ID for each code.
"""
with open(mccNuclidesFile, "r") as f:
yaml = YAML(typ="rt")
nuclides = yaml.load(f)
for n in nuclides:
nb = self.byName[n]
mcc2id = nuclides[n]["ENDF/B-V.2"]
mcc3idEndfbVII0 = nuclides[n]["ENDF/B-VII.0"]
mcc3idEndfbVII1 = nuclides[n]["ENDF/B-VII.1"]
if mcc2id is not None:
nb.mcc2id = mcc2id
self.byMcc2Id[nb.getMcc2Id()] = nb
if mcc3idEndfbVII0 is not None:
nb.mcc3idEndfbVII0 = mcc3idEndfbVII0
self.byMcc3IdEndfbVII0[nb.getMcc3IdEndfbVII0()] = nb
if mcc3idEndfbVII1 is not None:
nb.mcc3idEndfbVII1 = mcc3idEndfbVII1
self.byMcc3IdEndfbVII1[nb.getMcc3IdEndfbVII1()] = nb
# Have the byMcc3Id dictionary be VII.1 IDs.
self.byMcc3Id = self.byMcc3IdEndfbVII1
def updateNuclideBasesForSpecialCases(self):
"""
Update the nuclide bases for special case name changes.
.. impl:: The special case name Am242g is supported.
:id: I_ARMI_ND_ISOTOPES6
:implements: R_ARMI_ND_ISOTOPES
This function updates the keys for the :py:class:`NuclideBase `
instances for Am-242m and Am-242 in the ``byName`` and ``byDBName`` global dictionaries. This function
associates the more common isomer Am-242m with the name "AM242", and uses "AM242G" to denote the ground
state.
Notes
-----
This function is specifically added to change the definition of `AM242` to refer to its metastable isomer,
`AM242M` by default. `AM242M` is most common isomer of `AM242` and is typically the desired isomer when being
requested rather than than the ground state (i.e., S=0) of `AM242`.
"""
# Change the name of `AM242` to specific represent its ground state.
am242g = self.byName["AM242"]
am242g.name = "AM242G"
self.byName["AM242G"] = am242g
self.byDBName[self.byName["AM242G"].getDatabaseName()] = am242g
# Update the pointer of `AM242` to refer to `AM242M`.
am242m = self.byName["AM242M"]
self.byName["AM242"] = am242m
self.byDBName["nAm242"] = am242m
self.byDBName[self.byName["AM242"].getDatabaseName()] = am242m
def __renormalizeNuclideToElementRelationship(self):
"""Fill in the missing element data for each nuclide."""
for nuc in self.instances:
if nuc.element is None:
nuc.element = self.elements.byZ[nuc.z]
nuc.element.append(nuc)
def __deriveElementalWeightsByNaturalNuclideAbundances(self):
"""Derives and sets the standard atomic weights for each element that has naturally occurring nuclides."""
for element in self.elements.byName.values():
numer = 0.0
denom = 0.0
for nb in element.getNaturalIsotopics():
numer += nb.weight * nb.abundance
denom += nb.abundance
if denom:
element.standardWeight = numer / denom
factory()
================================================
FILE: armi/nucDirectory/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH = path.dirname(__file__)
================================================
FILE: armi/nucDirectory/tests/test_elements.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Elements."""
import unittest
from armi.nucDirectory.elements import Element, Elements
class TestElements(unittest.TestCase):
def setUp(self):
self.elements = Elements()
def test_elements_elementBulkProperties(self):
numElements = len(self.elements.byZ)
self.assertEqual(numElements, len(self.elements.byZ.values()))
self.assertEqual(numElements, len(self.elements.byName))
self.assertEqual(numElements, len(self.elements.bySymbol))
def test_element_elementByNameReturnsElement(self):
"""Get elements by name.
.. test:: Get elements by name.
:id: T_ARMI_ND_ELEMENTS0
:tests: R_ARMI_ND_ELEMENTS
"""
for ee in self.elements.byZ.values():
self.assertIs(ee, self.elements.byName[ee.name])
def test_element_elementByZReturnsElement(self):
"""Get elements by Z.
.. test:: Get elements by Z.
:id: T_ARMI_ND_ELEMENTS1
:tests: R_ARMI_ND_ELEMENTS
"""
for ee in self.elements.byZ.values():
self.assertIs(ee, self.elements.byZ[ee.z])
def test_element_elementBySymbolReturnsElement(self):
"""Get elements by symbol.
.. test:: Get elements by symbol.
:id: T_ARMI_ND_ELEMENTS2
:tests: R_ARMI_ND_ELEMENTS
"""
for ee in self.elements.byZ.values():
self.assertIs(ee, self.elements.bySymbol[ee.symbol])
def test_element_addExistingElementFails(self):
for ee in self.elements.byZ.values():
with self.assertRaises(ValueError):
self.elements.Element(ee.z, ee.symbol, ee.name)
def test_addedElementAppearsInElementList(self):
self.assertNotIn("bacon", self.elements.byName)
self.assertNotIn(999, self.elements.byZ)
self.assertNotIn("BZ", self.elements.bySymbol)
self.elements.addElement(Element(999, "BZ", "bacon"))
self.assertIn("bacon", self.elements.byName)
self.assertIn(999, self.elements.byZ)
self.assertIn("BZ", self.elements.bySymbol)
def test_elementGetNatIsosOnlyRetrievesAbund(self):
for ee in self.elements.byZ.values():
if not ee.isNaturallyOccurring():
continue
for nuc in ee.getNaturalIsotopics():
self.assertGreater(nuc.abundance, 0.0)
self.assertGreater(nuc.a, 0)
def test_elementIsNatOccurring(self):
"""
Test isNaturallyOccurring method by manually testing all elements.
Uses RIPL definitions of naturally occurring. Protactinium is debated as naturally occurring. Yeah it exists as
a U235 decay product but it's kind of pseudo-natural.
.. test:: Get elements by Z to show if they are naturally occurring.
:id: T_ARMI_ND_ELEMENTS3
:tests: R_ARMI_ND_ELEMENTS
"""
for ee in self.elements.byZ.values():
if ee.z == 43 or ee.z == 61 or 84 <= ee.z <= 89 or ee.z >= 93:
self.assertFalse(ee.isNaturallyOccurring())
else:
nat = ee.isNaturallyOccurring()
self.assertTrue(nat)
def test_abundancesAddToOne(self):
for ee in self.elements.byZ.values():
if not ee.isNaturallyOccurring():
continue
totAbund = sum([iso.abundance for iso in ee.nuclides])
self.assertAlmostEqual(
totAbund,
1.0,
places=4,
)
def test_isHeavyMetal(self):
"""Get elements by Z.
.. test:: Get elements by Z to show if they are heavy metals.
:id: T_ARMI_ND_ELEMENTS4
:tests: R_ARMI_ND_ELEMENTS
"""
for ee in self.elements.byZ.values():
if ee.z > 89:
self.assertTrue(ee.isHeavyMetal())
else:
self.assertFalse(ee.isHeavyMetal())
================================================
FILE: armi/nucDirectory/tests/test_nucDirectory.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests nuclide directory."""
import unittest
from armi.nucDirectory import nucDir
from armi.nucDirectory.nuclideBases import NuclideBases
class TestNucDirectory(unittest.TestCase):
def test_nucDir_getNameForOldDashedNames(self):
oldNames = [
"U-232",
"U-233",
"U-234",
"U-235",
"U-236",
"U-238",
"B-10",
"B-11",
"BE-9",
"F-19",
"LI-6",
"LI-7",
"W-182",
"W-183",
"W-184",
"W-186",
"S-32",
"O-16",
]
for oldName in oldNames:
self.assertIsNotNone(nucDir.getNuclideFromName(oldName))
def test_nucDir_getNucFromNucNameReturnsNuc(self):
nb = NuclideBases()
for nuc in nb.instances:
self.assertEqual(nuc, nucDir.getNuclideFromName(nuc.name))
def test_nucDir_getNuclidesFromForBadName(self):
with self.assertRaises(Exception):
nucDir.getNuclideFromName("Charlie")
def test_getDisplacementEnergy(self):
"""Test getting the displacement energy for a given nuclide."""
ed = nucDir.getThresholdDisplacementEnergy("H1")
self.assertEqual(ed, 10.0)
with self.assertRaises(KeyError):
nucDir.getThresholdDisplacementEnergy("fail")
================================================
FILE: armi/nucDirectory/tests/test_nuclideBases.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for NuclideBases."""
import math
import os
import random
import unittest
from ruamel.yaml import YAML
from armi.context import RES
from armi.nucDirectory.nuclideBases import (
DummyNuclideBase,
LumpNuclideBase,
NaturalNuclideBase,
NuclideBases,
)
from armi.nucDirectory.tests import NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH
from armi.utils.units import AVOGADROS_NUMBER, CURIE_PER_BECQUEREL, SECONDS_PER_HOUR
class TestNuclideBases(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.nucDirectoryTestsPath = NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH
cls.nuclideBases = NuclideBases()
# Ensure that the burn chain data is initialized before running these tests.
cls.nuclideBases.burnChainImposed = False
with open(os.path.join(RES, "burn-chain.yaml"), "r") as burnChainStream:
cls.nuclideBases.imposeBurnChain(burnChainStream)
def test_nucBases_fromNameBadNameRaisesException(self):
with self.assertRaises(KeyError):
self.nuclideBases.byName["Cat"]
def test_nucBase_AllAbundancesAddToOne(self):
for zz in range(1, 102):
nuclides = self.nuclideBases.elements.byZ[zz].nuclides
# We only process nuclides with measured masses. Some are purely theoretical, mostly over z=100
self.assertGreater(len(nuclides), 0, msg=f"z={zz} unexpectedly has no nuclides")
total = sum([nn.abundance for nn in nuclides if nn.a > 0])
self.assertAlmostEqual(
any([nn.abundance > 0 for nn in nuclides]),
total,
delta=1e-4,
msg="Abundance ({}) not 1.0 for nuclideBases:\n {}".format(
total, "\n ".join(repr(nn) for nn in nuclides)
),
)
def test_nucBases_AllLabelsAreUnique(self):
labels = []
for nn in self.nuclideBases.instances:
self.assertNotIn(nn.label, labels, f"Label already exists: {nn.label}")
labels.append(nn.label)
def test_nucBases_NegativeZRaisesException(self):
for _ in range(0, 5):
with self.assertRaises(Exception):
self.nuclideBases.isotopes(random.randint(-1000, -1))
def test_nucBases_Z295RaisesException(self):
with self.assertRaises(Exception):
self.nuclideBases.isotopes(295)
def test_nucBases_Mc2Elementals(self):
notElemental = [
"LFP35",
"LFP38",
"LFP39",
"LFP40",
"LFP41",
"DUMMY",
"DUMP1",
"DUMP2",
"LREGN",
]
for lump in self.nuclideBases.where(lambda nn: isinstance(nn, LumpNuclideBase)):
if lump.name in notElemental:
self.assertIsInstance(lump, LumpNuclideBase)
else:
self.assertIsInstance(lump, NaturalNuclideBase)
def test_LumpNucBaseGetNatIsotopDoesNotFail(self):
for nuc in self.nuclideBases.where(lambda nn: isinstance(nn, LumpNuclideBase) and nn.z == 0):
self.assertEqual(0, len(list(nuc.getNaturalIsotopics())), nuc)
def test_NaturalNuclideBase_getNatrualIsotpics(self):
for nuc in self.nuclideBases.where(lambda nn: isinstance(nn, NaturalNuclideBase)):
numNaturals = len(list(nuc.getNaturalIsotopics()))
self.assertGreaterEqual(len(nuc.element.nuclides) - 1, numNaturals)
def test_nucBases_singleFailsWithMultipleMatches(self):
with self.assertRaises(Exception):
self.nuclideBases.single(lambda nuc: nuc.z == 92)
def test_nucBases_singleFailsWithNoMatches(self):
with self.assertRaises(Exception):
self.nuclideBases.single(lambda nuc: nuc.z == 1000)
def test_nucBases_singleIsPrettySpecific(self):
u235 = self.nuclideBases.single(lambda nuc: nuc.name == "U235")
self.assertEqual(235, u235.a)
self.assertEqual(92, u235.z)
def test_natNucStomicWgtIsAvgOfNatIsotopes(self):
for natNuk in self.nuclideBases.where(lambda nn: isinstance(nn, NaturalNuclideBase)):
atomicMass = 0.0
for natIso in natNuk.getNaturalIsotopics():
atomicMass += natIso.abundance * natIso.weight
self.assertAlmostEqual(atomicMass, natNuk.weight, delta=0.000001)
def test_nucBasesLabelAndNameCollsAreForSameNuc(self):
"""The name and labels for correct for nuclides.
.. test:: Validate the name, label, and DB name are accessible for nuclides.
:id: T_ARMI_ND_ISOTOPES0
:tests: R_ARMI_ND_ISOTOPES
"""
count = 0
for nuc in self.nuclideBases.where(lambda nn: nn.name == nn.label):
count += 1
self.assertIs(nuc, self.nuclideBases.byName[nuc.name])
self.assertIs(nuc, self.nuclideBases.byDBName[nuc.getDatabaseName()])
self.assertIs(nuc, self.nuclideBases.byLabel[nuc.label])
self.assertGreater(count, 10)
def test_nucBases_imposeBurnChainDecayBulkStats(self):
"""Test must be updated manually when burn chain is modified."""
decayers = list(self.nuclideBases.where(lambda nn: len(nn.decays) > 0))
self.assertTrue(decayers)
for nuc in decayers:
if nuc.name in [
"U238",
"PU240",
"PU242",
"CM242",
"CM244",
"CM246",
"CF250",
"CF252",
]:
continue
self.assertAlmostEqual(1.0, sum(dd.branch for dd in nuc.decays))
def test_nucBasesImposeBurnChainTransmBulkStats(self):
"""
Make sure all branches are equal to 1 for every transmutation type.
Exception: We allow 3e-4 threshold to account for ternary fissions, which are usually < 2e-4 per fission.
"""
trasmuters = self.nuclideBases.where(lambda nn: len(nn.trans) > 0)
self.assertTrue(trasmuters)
for nuc in trasmuters:
expected = len(set(tt.type for tt in nuc.trans))
self.assertTrue(all(0.0 <= tt.branch <= 1.0 for tt in nuc.trans))
actual = sum(tt.branch for tt in nuc.trans)
# ternary fission
self.assertAlmostEqual(
expected,
actual,
msg=f"{nuc} has {expected} transmutation but the branches add up to {actual}",
delta=3e-4,
)
def test_nucBases_imposeBurn_nuSF(self):
"""Test the nuclide data from file (specifically neutrons / sponaneous fission).
.. test:: Test that nuclide data was read from file instead of code.
:id: T_ARMI_ND_DATA0
:tests: R_ARMI_ND_DATA
"""
actual = {nn.name: nn.nuSF for nn in self.nuclideBases.where(lambda nn: nn.nuSF > 0.0)}
expected = {
"CM248": 3.1610,
"BK249": 3.4000,
"CF249": 3.4000,
"CF250": 3.5200,
"CF252": 3.7676,
"U232": 1.710000,
"U234": 1.8000,
"U235": 1.8700,
"U236": 1.900,
"U238": 2.000,
"PU236": 2.1200,
"PU238": 2.2100,
"PU239": 2.3200,
"PU240": 2.1510,
"PU242": 2.1410,
"CM242": 2.5280,
"CM243": 0.0000,
"CM244": 2.6875,
"CM245": 0.0000,
"CM246": 2.9480,
"TH230": 1.390000,
"TH232": 1.5,
"NP237": 2.05,
"PA231": 1.710000,
"PU241": 2.25,
"PU244": 2.290000,
"U233": 1.76,
"AM241": 2.5,
"AM242M": 2.56,
"AM243": 2.61,
"ES253": 4.700000,
}
for key, val in actual.items():
self.assertEqual(val, expected[key])
def test_nucBases_databaseNamesStartWith_n(self):
for nb in self.nuclideBases.instances:
self.assertEqual("n", nb.getDatabaseName()[0])
def test_nucBases_AllDatabaseNamesAreUnique(self):
self.assertEqual(
len(self.nuclideBases.instances),
len(set(nb.getDatabaseName() for nb in self.nuclideBases.instances)),
)
def test_nucBases_Am242m(self):
"""Test the correct am242g and am242m abbreviations are supported.
.. test:: Specifically test for Am242 and Am242g because it is a special case.
:id: T_ARMI_ND_ISOTOPES1
:tests: R_ARMI_ND_ISOTOPES
"""
am242m = self.nuclideBases.byName["AM242"]
self.assertIs(am242m, self.nuclideBases.byName["AM242M"])
self.assertEqual("nAm242m", am242m.getDatabaseName())
self.assertIs(am242m, self.nuclideBases.byDBName["nAm242"])
self.assertAlmostEqual(am242m.weight, 242.059601666)
am242g = self.nuclideBases.byName["AM242G"]
self.assertIs(am242g, self.nuclideBases.byName["AM242G"])
self.assertEqual("nAm242g", am242g.getDatabaseName())
self.assertIs(am242g, self.nuclideBases.byDBName["nAm242g"])
def test_nucBases_isHeavyMetal(self):
for nb in self.nuclideBases.where(lambda nn: nn.z <= 89):
self.assertFalse(nb.isHeavyMetal())
for nb in self.nuclideBases.where(lambda nn: nn.z > 89):
if isinstance(nb, (DummyNuclideBase, LumpNuclideBase)):
self.assertFalse(nb.isHeavyMetal())
else:
self.assertTrue(nb.isHeavyMetal())
def test_getDecay(self):
nb = list(self.nuclideBases.where(lambda nn: nn.z == 89))[0]
# This test is a bit boring, because the test nuclide library is a bit boring.
self.assertIsNone(nb.getDecay("sf"))
def test_getEndfMatNum(self):
"""Test get nuclides by name.
.. test:: Test get nuclides by name.
:id: T_ARMI_ND_ISOTOPES2
:tests: R_ARMI_ND_ISOTOPES
"""
self.assertEqual(self.nuclideBases.byName["U235"].getEndfMatNum(), "9228")
self.assertEqual(self.nuclideBases.byName["U238"].getEndfMatNum(), "9237")
self.assertEqual(self.nuclideBases.byName["PU239"].getEndfMatNum(), "9437")
self.assertEqual(self.nuclideBases.byName["TC99"].getEndfMatNum(), "4325")
self.assertEqual(self.nuclideBases.byName["AM242"].getEndfMatNum(), "9547") # meta 1
self.assertEqual(self.nuclideBases.byName["CF252"].getEndfMatNum(), "9861")
self.assertEqual(self.nuclideBases.byName["NP237"].getEndfMatNum(), "9346")
self.assertEqual(self.nuclideBases.byName["PM151"].getEndfMatNum(), "6161")
self.assertEqual(self.nuclideBases.byName["PA231"].getEndfMatNum(), "9131")
def test_NonMc2Nuclide(self):
"""Make sure nuclides that aren't in MC2 still get nuclide bases."""
nuc = self.nuclideBases.byName["YB154"]
self.assertEqual(nuc.a, 154)
def test_kryptonDecayConstants(self):
"""Tests that the nuclides data contains the expected decay constants."""
# hand calculated reference data includes stable isotopes, radioactive
# isotopes, metastable isotopes and exercises metastable minimum halflife
REF_KR_DECAY_CONSTANTS = [
("KR69", 24.755256448569472),
("KR70", 17.3286795139986),
("KR71", 6.93147180559945),
("KR72", 0.04053492283976288),
("KR73", 0.0253900066139174),
("KR74", 0.0010045611312463),
("KR75", 0.00251140282811574),
("KR76", 0.0000130095191546536),
("KR77", 0.000162139691359051),
("KR78", 0),
("KR79", 5.49488822742219e-06),
("KR79M", 0.0138629436111989),
("KR80", 0),
("KR81", 9.591693391393433e-14),
("KR81M", 0.0529119985160263),
("KR82", 0),
("KR83", 0),
("KR83M", math.log(2) / (1.83 * SECONDS_PER_HOUR)),
("KR84", 0),
("KR85", 2.0453466678736843e-09),
("KR85M", 4.29725468419061e-05),
("KR86", 0),
("KR87", 0.000151408296321526),
("KR88", 0.0000681560649518136),
("KR89", 0.00366744539978807),
("KR90", 0.021446385537127),
("KR91", 0.0808806511738559),
("KR92", 0.376710424217362),
("KR93", 0.538994697169475),
("KR94", 3.26956217245257),
("KR95", 6.08023842596443),
("KR96", 8.66433975699932),
("KR97", 11.0023361993642),
("KR98", 16.1197018734871),
("KR99", 53.3190138892265),
("KR100", 99.0210257942778),
("KR101", 1091570.36308652),
]
for nucName, refDecayConstant in REF_KR_DECAY_CONSTANTS:
refNb = self.nuclideBases.byName[nucName]
decayConstantNb = math.log(2) / refNb.halflife
try:
self.assertAlmostEqual((refDecayConstant - decayConstantNb) / refDecayConstant, 0, 6)
except ZeroDivisionError:
self.assertEqual(refDecayConstant, decayConstantNb)
except AssertionError:
errorMessage = (
f"{nucName} reference decay constant {refDecayConstant} ARMI decay constant {decayConstantNb}"
)
raise AssertionError(errorMessage)
for nucName in ["XE134", "XE136", "EU151"]:
nb = self.nuclideBases.byName[nucName]
decayConstantNb = math.log(2) / nb.halflife
self.assertAlmostEqual(decayConstantNb, 0, places=3)
def test_curieDefinitionWithRa226(self):
"""
Tests that the decay constant of Ra-226 is close to 1 Ci.
Notes
-----
The original definition of 1 Ci was based on the half-life of Ra-226 for 1 gram. The latest evaluations show
that 1 gram is defined as 0.988 Ci.
"""
ra226 = self.nuclideBases.byName["RA226"]
decayConstantRa226 = math.log(2) / ra226.halflife
weight = ra226.weight
mass = 1 # gram
activity = mass * AVOGADROS_NUMBER / weight * decayConstantRa226 # 1 gram
activity = activity * CURIE_PER_BECQUEREL
self.assertAlmostEqual(activity, 0.9885593, places=6)
def test_loadMcc2Data(self):
"""Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-V.2 nuclides in the data model.
.. test:: Test that MCC v2 ENDF/B-V.2 IDs can be queried by nuclides.
:id: T_ARMI_ND_ISOTOPES3
:tests: R_ARMI_ND_ISOTOPES
"""
with open(os.path.join(RES, "mcc-nuclides.yaml")) as f:
yaml = YAML(typ="rt")
data = yaml.load(f)
expectedNuclides = set([nuc for nuc in data.keys() if data[nuc]["ENDF/B-V.2"] is not None])
for nuc, nb in self.nuclideBases.byMcc2Id.items():
self.assertIn(nb.name, expectedNuclides)
self.assertEqual(nb.getMcc2Id(), nb.mcc2id)
self.assertEqual(nb.getMcc2Id(), nuc)
self.assertEqual(len(self.nuclideBases.byMcc2Id), len(expectedNuclides))
def test_loadMcc3EndfVII0Data(self):
"""Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-VII.0 nuclides in the data model.
.. test:: Test that MCC v3 ENDF/B-VII.0 IDs can be queried by nuclides.
:id: T_ARMI_ND_ISOTOPES4
:tests: R_ARMI_ND_ISOTOPES
.. test:: Test the MCC ENDF/B-VII.0 nuclide data that was read from file instead of code.
:id: T_ARMI_ND_DATA1
:tests: R_ARMI_ND_DATA
"""
with open(os.path.join(RES, "mcc-nuclides.yaml")) as f:
yaml = YAML(typ="rt")
data = yaml.load(f)
expectedNuclides = set([nuc for nuc in data.keys() if data[nuc]["ENDF/B-VII.0"] is not None])
for nuc, nb in self.nuclideBases.byMcc3IdEndfbVII0.items():
self.assertIn(nb.name, expectedNuclides)
self.assertEqual(nb.getMcc3IdEndfbVII0(), nb.mcc3idEndfbVII0)
self.assertEqual(nb.getMcc3IdEndfbVII0(), nuc)
# Subtract 1 nuclide due to DUMP2.
self.assertEqual(len(self.nuclideBases.byMcc3IdEndfbVII0), len(expectedNuclides) - 1)
def test_loadMcc3EndfVII1Data(self):
"""Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-VII.1 nuclides in the data model.
.. test:: Test that MCC v3 ENDF/B-VII.1 IDs can be queried by nuclides.
:id: T_ARMI_ND_ISOTOPES6
:tests: R_ARMI_ND_ISOTOPES
.. test:: Test the MCC ENDF/B-VII.1 nuclide data that was read from file instead of code.
:id: T_ARMI_ND_DATA2
:tests: R_ARMI_ND_DATA
"""
with open(os.path.join(RES, "mcc-nuclides.yaml")) as f:
yaml = YAML(typ="rt")
data = yaml.load(f)
expectedNuclides = set([nuc for nuc in data.keys() if data[nuc]["ENDF/B-VII.1"] is not None])
for nuc, nb in self.nuclideBases.byMcc3IdEndfbVII1.items():
self.assertIn(nb.name, expectedNuclides)
self.assertEqual(nb.getMcc3IdEndfbVII1(), nb.mcc3idEndfbVII1)
self.assertEqual(nb.getMcc3IdEndfbVII1(), nuc)
self.assertEqual(nb.getMcc3Id(), nb.mcc3idEndfbVII1)
self.assertEqual(nb.getMcc3Id(), nuc)
# Subtract 1 nuclide due to DUMP2
self.assertEqual(len(self.nuclideBases.byMcc3IdEndfbVII1), len(expectedNuclides) - 1)
class TestAAAZZZSId(unittest.TestCase):
def test_AAAZZZSNameGenerator(self):
"""Test that AAAZZS ID name generator.
.. test:: Query the AAAZZS IDs can be retrieved for nuclides.
:id: T_ARMI_ND_ISOTOPES5
:tests: R_ARMI_ND_ISOTOPES
"""
referenceNucNames = [("C12", "120060"), ("U235", "2350920"), ("AM242M", "2420951")]
nuclideBases = NuclideBases()
for nucName, refAaazzzs in referenceNucNames:
nb = nuclideBases.byName[nucName]
if refAaazzzs:
self.assertEqual(refAaazzzs, nb.getAAAZZZSId())
================================================
FILE: armi/nucDirectory/tests/test_thermalScattering.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the composite pattern."""
import unittest
from armi.nucDirectory import thermalScattering as ts
class TestThermalScattering(unittest.TestCase):
def test_dataValidity(self):
"""Ensure that over time the raw thermal scattering data in ARMI remains valid."""
for key, val in ts.BY_NAME_AND_COMPOUND.items():
# nuclide name must be a non-empty string
self.assertIsInstance(key[0], str)
self.assertGreater(len(key[0]), 0)
if key[1] is not None:
# compound CAN be None, but otherwise must be a non-empty string
self.assertIsInstance(key[1], str)
self.assertGreater(len(key[1]), 0)
# ENDF/B-VIII label must be a non-empty string
self.assertIsInstance(val[0], str)
self.assertGreater(len(val[0]), 0)
# ACE label must be a non-empty string
self.assertIsInstance(val[1], str)
self.assertGreater(len(val[1]), 0)
def test_fromNameCompInvalid(self):
"""If the name/compound inputs aren't valid, we should get a ValueError."""
with self.assertRaises(ValueError):
ts.fromNameAndCompound("hi", "mom")
with self.assertRaises(ValueError):
ts.fromNameAndCompound("C", None)
with self.assertRaises(ValueError):
ts.fromNameAndCompound("O", None)
with self.assertRaises(ValueError):
ts.fromNameAndCompound("FE56", "FE56")
def test_fromNameCompSpotCheck(self):
"""Spot check some examples that should work."""
tsl = ts.fromNameAndCompound("FE56", None)
self.assertIsInstance(tsl, ts.ThermalScatteringLabels)
self.assertEqual(tsl.endf8Label, "tsl-026_Fe_056.endf")
self.assertEqual(tsl.aceLabel, "fe-56")
tsl = ts.fromNameAndCompound("H", ts.H2O)
self.assertIsInstance(tsl, ts.ThermalScatteringLabels)
self.assertEqual(tsl.endf8Label, "tsl-HinH2O.endf")
self.assertEqual(tsl.aceLabel, "h-h2o")
tsl = ts.fromNameAndCompound("O", ts.D2O)
self.assertIsInstance(tsl, ts.ThermalScatteringLabels)
self.assertEqual(tsl.endf8Label, f"tsl-Oin{ts.D2O}.endf")
self.assertEqual(tsl.aceLabel, "o-d2o")
tsl = ts.fromNameAndCompound("U", ts.UO2)
self.assertIsInstance(tsl, ts.ThermalScatteringLabels)
self.assertEqual(tsl.endf8Label, "tsl-UinUO2.endf")
self.assertEqual(tsl.aceLabel, "u-uo2")
================================================
FILE: armi/nucDirectory/tests/test_transmutations.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for transmutations."""
import random
import string
import unittest
from armi.nucDirectory import transmutations
from armi.nucDirectory.nuclideBases import NuclideBases
def randomString(length):
return "".join(random.choice(string.ascii_lowercase) for _ in range(length))
class TransmutationTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.nuclideBases = NuclideBases()
def test_Transmutation_validReactionTypes(self):
data = {"products": [""]}
for rxn in transmutations.TRANSMUTATION_TYPES:
data["type"] = rxn
temp = transmutations.Transmutation(self.nuclideBases.byName["AM242M"], data)
self.assertEqual(temp.type, rxn)
self.assertEqual(temp.productParticle, transmutations.PRODUCT_PARTICLES.get(temp.type))
def test_Transmutation_productParticle(self):
temp = transmutations.Transmutation(self.nuclideBases.byName["AM242M"], {"products": [""], "type": "nalph"})
self.assertEqual(temp.productParticle, "HE4")
def test_Transmutation_invalidReactionTypes(self):
data = {"products": [""], "branch": 1.0}
errorCount = 0
for _ in range(0, 5):
rxn = randomString(3)
data["type"] = rxn
if rxn in transmutations.TRANSMUTATION_TYPES:
self.assertIsNotNone(transmutations.Transmutation(self.nuclideBases.byName["AM242M"], data))
else:
with self.assertRaises(KeyError):
errorCount += 1
transmutations.Transmutation(self.nuclideBases.byName["AM242M"], data)
self.assertGreater(errorCount, 2)
class DecayModeTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.nuclideBases = NuclideBases()
def test_DecayMode_validReactionTypes(self):
data = {"products": [""], "branch": 1.0, "halfLifeInSeconds": 1.0}
for rxn in transmutations.DECAY_MODES:
data["type"] = rxn
decay = transmutations.DecayMode(self.nuclideBases.byName["AM242M"], data)
self.assertEqual(decay.type, rxn)
def test_DecayMode_invalidReactionTypes(self):
data = {"products": [""], "branch": 1.0, "halfLifeInSeconds": 1.0}
for _ in range(0, 25):
rxn = randomString(3)
data["type"] = rxn
if rxn in transmutations.DECAY_MODES:
self.assertIsNotNone(transmutations.DecayMode(self.nuclideBases.byName["AM242M"], data))
else:
with self.assertRaises(KeyError):
transmutations.DecayMode(self.nuclideBases.byName["AM242M"], data)
================================================
FILE: armi/nucDirectory/thermalScattering.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handle awareness of Thermal Scattering labels for ENDF/B-VIII and ACE.
The information below is based on Parsons, LA-UR-18-25096, https://mcnp.lanl.gov/pdf_files/la-ur-18-25096.pdf
Scattering law labels are currently available for a variety of classifications:
* Element in Compound (H in H2O, Be in BeO)
* Element in structure (C in Graphite, Be in metal)
* Can be separated as crystalline, 30% porous, 10% porous, etc.
* Element in spin isomer (para H, ortho H, para D, ortho D, etc.)
* Compound in phase (solid CH4, liquid CH4, SiO2-alpha, SiO2-beta).
* Just compound (benzene)
* Just isotope (Fe56, Al27)
The labels for these vary across evaluations (e.g. ENDF/B-VII, ENDF/B-VIII, etc.). We provide ENDF/B-III.0 and ACE
labels. Other physics kernels will have to derive their own labels as appropriate in client code.
"""
from dataclasses import dataclass
# strings that users might want to reference downstream
BE_METAL = "Be-metal"
BEO = "BeO"
CRYSTALLINE_GRAPHITE = "crystalline-graphite"
D2O = "D2O"
GRAPHITE_10P = "reactor-graphite-10P"
GRAPHITE_30P = "reactor-graphite-30P"
H2O = "H2O"
SIC = "SiC"
UN = "UN"
UO2 = "UO2"
ZRH = "ZrH"
# thermal scattering label data
BY_NAME_AND_COMPOUND = {
("AL27", None): ("tsl-013_Al_027.endf", "al-27"),
("BE", BE_METAL): (f"tsl-{BE_METAL}.endf", "be-met"),
("BE", BEO): (BEO, "be-beo"),
("C", CRYSTALLINE_GRAPHITE): (f"tsl-{CRYSTALLINE_GRAPHITE}.endf", "grph"),
("C", GRAPHITE_10P): (f"tsl-{GRAPHITE_10P}.endf", "grph10"),
("C", GRAPHITE_30P): (f"tsl-{GRAPHITE_30P}.endf", "grph30"),
("C", SIC): ("tsl-CinSiC.endf", "c-sic"),
("FE56", None): ("tsl-026_Fe_056.endf", "fe-56"),
("H", H2O): ("tsl-HinH2O.endf", "h-h2o"),
("H", ZRH): ("tsl-HinZrH.endf", "h-zrh"),
("H2", D2O): (f"tsl-Din{D2O}.endf", "d-d2o"),
("N", UN): ("tsl-NinUN.endf", "n-un"),
("O", BEO): ("tsl-OinBeO.endf", "o-beo"),
("O", D2O): (f"tsl-Oin{D2O}.endf", "o-d2o"),
("O", UO2): ("tsl-OinUO2.endf", "o-uo2"),
("SI", SIC): ("tsl-SIinSiC.endf", "si-sic"),
("U", UN): ("tsl-UinUN.endf", "u-un"),
("U", UO2): ("tsl-UinUO2.endf", "u-uo2"),
("ZR", ZRH): ("tsl-ZRinZrH.endf", "zr-zrh"),
}
@dataclass(frozen=True)
class ThermalScatteringLabels:
"""Container for the labels for a particular nuclide/compound combination.
Attributes
----------
name: str
Name of the nuclide. This should match the string in the "byName" field in nuclideBases.
compound: str
Label indicating what the subjects are in (e.g. ``"Graphite"`` or ``"H2O"``. Can be left off for, e.g. Fe56.
endf8Label: str
Label for ENDF/B-VIII evaluation.
aceLabel: str
Lavel for ACE.
"""
name: str
compound: str
endf8Label: str
aceLabel: str
def fromNameAndCompound(name: str, compound: str):
"""The standard interface for getting ENDF/B-VIII and ACE labels for a given nuclide.
Parameters
----------
name: str
Name of the nuclide.
compound: str
Name of the compound (can be None).
Returns
-------
ThermalScatteringLabels
An instance of the data class used to contain the ENDF/ACE labels for this nuclide/componound combination.
Raises
------
ValueError
ARMI does not store a large data set of labels. If the user requests one ARMI does not have, they get an error.
"""
if (name, compound) in BY_NAME_AND_COMPOUND:
endf, ace = BY_NAME_AND_COMPOUND[(name, compound)]
return ThermalScatteringLabels(name, compound, endf, ace)
else:
raise ValueError(f"No thermal scattering labels are known for name/compound: {name}/{compound}")
================================================
FILE: armi/nucDirectory/transmutations.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the definition of :py:class:`~Transmutation` and :py:class:`~Decay` classes.
.. inheritance-diagram::
Transmutation DecayMode
The mappings between active nuclides during transmutation and decay are described in a
``burn-chain.yaml`` file pointed to by the ``burnChainFileName``
setting. This file contains one entry per nuclide that can transmute or decay that
look similar to the example below::
U238:
- nuSF: 2.0000
- transmutation:
branch: 1.0
products:
- NP237
type: n2n
- transmutation:
branch: 1.0
products:
- LFP38
type: fission
- transmutation:
branch: 1.0
products:
- NP239
- PU239
type: nGamma
- decay:
branch: 5.45000e-07
halfLifeInSeconds: 1.4099935680e+17
products:
- LFP38
type: sf
This example defines 3 transmutations (an ``(n,2n)`` reaction, an ``(n,fission)`` reaction, an
``(n,gamma``)`` reaction), and a spontaneous fission decay reaction with a very low branching
ratio. Valid reaction ``type`` values are listed in :py:class:`~armi.nucDirectory.transmutations.Transmutation`
and :py:class:`~armi.nucDirectory.transmutations.DecayMode`.
The ``branch`` entry determines the fraction of the products of a given reaction that will end up
in a particular product. The branches must never sum up to anything other than 1.0.
The ``products`` entry is a list, but only one entry will be the actual product. The list defines
a preference order. For example, if ``NP239`` is being tracked as an active nuclide in the problem
it will be the product of the ``nGamma`` reaction above. Otherwise, ``U238`` will transmute directly
to the alternate product, ``PU239``.
.. warning:: If you track very short-lived decays explicitly then the burn matrix becomes very
ill-conditioned and numerical solver issues can result. Specialized matrix
exponential solvers (e.g. CRAM [1]) are required to get adequate solutions in these cases [2].
The example above also defines a ``nuSF`` item, which is how many neutrons are emitted per spontaneous
fission. This is used for intrinsic source term calculations.
[1] Pusa, Maria, and Jaakko Leppanen. "Computing the matrix exponential in burnup calculations."
Nuclear science and engineering 164.2 (2010): 140-150.
[2] Moler, Cleve, and Charles Van Loan. "Nineteen dubious ways to compute the exponential of a matrix."
SIAM review 20.4 (1978): 801-836.
"""
import math
from armi import runLog
from armi.utils import iterables
LN2 = math.log(2)
TRANSMUTATION_TYPES = ["n2n", "fission", "nGamma", "nalph", "np", "nd", "nt"]
DECAY_MODES = [
"bmd", # beta minus
"bpd", # beta plus
"ad", # alpha decay
"ec", # electron capture
"sf",
] # spontaneous-fission
PRODUCT_PARTICLES = {"nalph": "HE4", "np": "H1", "nd": "H2", "nt": "H3", "ad": "HE4"}
class Transmutable:
"""
Transmutable base class.
Attributes
----------
parent : NuclideBase
The parent nuclide in this reaction.
type : str
The type name of reaction (e.g. ``n2n``, ``fission``, etc.)
productNuclides : list
The names of potential product nuclides of this reaction, in order of preference.
Multiple options exist to allow the library to specify a transmutation
to one nuclide if the user is modeling that nuclide, and other ones
as fallbacks in case the user is not tracking the preferred product.
Only one of these products will be created.
productParticle : str
The outgoing particle of this reaction. Could be HE4 for n,alpha, etc.
Default is None.
branch : float
The fraction of the time that this transmutation occurs. Should be between
0 and 1. Less than 1 when a decay or reaction can branch between multiple productNuclides.
Do not make this >1 to get more than one product because it scales the reaction cross section
which will double-deplete the parent.
Notes
-----
These are used to link two :py:class:`~armi.nucDirectory.nuclideBases.NuclideBase` objects through transmutation or
decay.
See Also
--------
Transmutation
DecayMode
"""
def __init__(self, parent, dataDict):
self.parent = parent
self.type = dataDict["type"]
self.productNuclides = tuple(dataDict["products"])
self.productParticle = dataDict.get("productParticle", PRODUCT_PARTICLES.get(self.type))
self.branch = dataDict.get("branch", None)
if self.branch is None:
self.branch = 1.0
runLog.info(f"The branching ratio for {self} was not defined and is assumed to be 1.0.")
def getPreferredProduct(self, libraryNucNames):
"""
Get the index of the most preferred transmutation product/decay daughter.
Notes
-----
The ARMI burn chain is not a full burn chain. It short circuits shorter half-lives, and uses lumped nuclides
as catch-all objects for things that just sit around. Consequently, the "preferred" product/daughter
may not be actual physical product/daughter.
"""
for product in self.productNuclides:
if product in libraryNucNames:
return product
groupedNames = iterables.split(libraryNucNames, max(1, int(len(libraryNucNames) / 10)))
msg = "Could not find suitable product/daughter for {}.\nThe available options were:\n {}".format(
self, ",\n ".join(", ".join(chunk) for chunk in groupedNames)
)
raise KeyError(msg)
class Transmutation(Transmutable):
r"""
A transmutation from one nuclide to another.
Notes
-----
The supported transmutation types include:
* :math:`n,2n`
* :math:`n,fission`
* :math:`n,\gamma` (``nGamma``)
* :math:`n,\alpha` (``nalph``)
* :math:`n,p` (proton) (``np``)
* :math:`n,d` (deuteron) (``nd``)
* :math:`n,t` (triton) (``nt``)
"""
def __init__(self, parent, dataDict):
Transmutable.__init__(self, parent, dataDict)
if self.type not in TRANSMUTATION_TYPES:
raise KeyError("{} not in {}".format(self.type, TRANSMUTATION_TYPES))
def __repr__(self):
return "".format(
self.type, self.parent.name, self.productNuclides, self.branch
)
class DecayMode(Transmutable):
r"""Defines a decay from one nuclide to another.
Notes
-----
The supported decay types are also all transmutations, and include:
* :math:`\beta^-` (``bmd``)
* :math:`\beta^+` (``bpd``)
* :math:`\alpha` (``ad``)
* Electron capture (``ec``)
* Spontaneous fission (``sf``)
Of note, the following are not supported:
* Internal conversion
* Gamma decay
"""
def __init__(self, parent, dataDict):
Transmutable.__init__(self, parent, dataDict)
self.halfLifeInSeconds = parent.halflife
# Check for user-defined value of half-life within the burn-chain data. If this is
# updated then prefer the user change and then note this to the user. Otherwise,
# maintain the default loaded from the nuclide bases.
userHalfLife = dataDict.get("halfLifeInSeconds", None)
if userHalfLife:
if userHalfLife != parent.halflife:
runLog.info(
f"Half-life provided for {self} will be updated from "
f"{parent.halflife:<15.11e} to {userHalfLife:<15.11e} seconds based on "
"user provided burn-chain data."
)
self.halfLifeInSeconds = userHalfLife
self.decay = LN2 / self.halfLifeInSeconds * self.branch # decay constant, reduced by branch to make it accurate
if self.type not in DECAY_MODES:
raise KeyError("{} is not in {}".format(self.type, DECAY_MODES))
def __repr__(self):
return "".format(
self.type,
self.parent.name,
self.productNuclides,
self.halfLifeInSeconds,
)
================================================
FILE: armi/nuclearDataIO/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read and/or write data files associated with nuclear data and reactor physics data."""
# ruff: noqa: F401
# Export the cccc modules here for backward compatibility, though prefer full imports in new code.
from armi.nuclearDataIO.cccc import (
compxs,
dif3d,
dlayxs,
fixsrc,
gamiso,
geodst,
isotxs,
labels,
nhflux,
pmatrx,
pwdint,
rtflux,
rzflux,
)
from armi.physics import neutronics
def getExpectedISOTXSFileName(cycle=None, node=None, suffix=None, xsID=None):
"""
Return the ISOTXS file that matches either the current cycle or xsID with a suffix.
See Also
--------
getExpectedCOMPXSFileName
getExpectedGAMISOFileName
getExpectedPMATRXFileName
"""
if xsID is not None and cycle is not None:
raise ValueError("Both `xsID` and `cycle` cannot be specified together.")
if suffix is not None and cycle is not None:
raise ValueError("Both `suffix` and ``cycle cannot be specified together.")
if xsID is not None:
neutronFileName = neutronics.ISOTXS[:3]
else:
neutronFileName = neutronics.ISOTXS
return _findExpectedNeutronFileName(neutronFileName, _getNeutronKeywords(cycle, node, suffix, xsID))
def getExpectedCOMPXSFileName(cycle=None, node=None):
"""
Return the COMPXS file that matches either the current cycle.
See Also
--------
getExpectedISOTXSFileName
getExpectedGAMISOFileName
getExpectedPMATRXFileName
"""
return _findExpectedNeutronFileName(neutronics.COMPXS, _getNeutronKeywords(cycle, node, suffix=None, xsID=None))
def _findExpectedNeutronFileName(fileType, fileNameKeywords):
return fileType + "".join(fileNameKeywords)
def _getNeutronKeywords(cycle, node, suffix, xsID):
if cycle is not None and xsID is not None:
raise ValueError("Keywords are over-specified. Choose `cycle` or `xsID` only")
# If neither cycle or xsID are provided there are no additional keywords to add to the file name
if cycle is None and xsID is None:
keywords = []
else:
# example: ISOTXS-c0
if cycle is not None:
keywords = [f"-c{cycle}n{node}"] if node is not None else ["-c", str(cycle)]
# example: ISOAA-test
elif xsID is not None:
keywords = [xsID]
if suffix not in [None, ""]:
keywords.append("-" + suffix)
return keywords
def getExpectedGAMISOFileName(cycle=None, node=None, suffix=None, xsID=None):
"""
Return the GAMISO file that matches either the ``cycle`` or ``xsID`` and ``suffix``.
For example:
If ``cycle`` is set to 0, then ``cycle0.gamiso`` will be returned.
If ``xsID`` is set to ``AA`` with a ``suffix`` of ``test``, then
``AA-test.gamiso`` will be returned.
See Also
--------
getExpectedISOTXSFileName
getExpectedCOMPXSFileName
getExpectedPMATRXFileName
"""
if any(i is not None for i in (cycle, suffix, xsID)):
# file path extensions are lower case
gamiso0 = neutronics.GAMISO_EXT
else:
# GAMISO as a file is upper case
gamiso0 = neutronics.GAMISO
return _findExpectedGammaFileName(gamiso0, _getGammaKeywords(cycle, node, suffix, xsID))
def getExpectedPMATRXFileName(cycle=None, node=None, suffix=None, xsID=None):
"""
Return the PMATRX file that matches either the ``cycle`` or ``xsID`` and ``suffix``.
For example:
If ``cycle`` is set to 0 d, then ``cycle0.pmatrx`` will be returned.
If ``xsID`` is set to ``AA`` with a ``suffix`` of ``test``, then
``AA-test.pmatrx`` will be returned.
See Also
--------
getExpectedISOTXSFileName
getExpectedCOMPXSFileName
getExpectedGAMISOFileName
"""
if any(i is not None for i in (cycle, suffix, xsID)):
# file path extensions are lower case
pmatrx0 = neutronics.PMATRX_EXT
else:
# PMATRX as a file is upper case
pmatrx0 = neutronics.PMATRX
return _findExpectedGammaFileName(pmatrx0, _getGammaKeywords(cycle, node, suffix, xsID))
def _findExpectedGammaFileName(fileType, fileNameKeywords):
return "".join(fileNameKeywords) + fileType
def _getGammaKeywords(cycle, node, suffix, xsID):
if cycle is not None and xsID is not None:
raise ValueError("Keywords are over-specified. Choose `cycle` or `xsID` only")
# If neither cycle or xsID are provided there are no additional keywords to add
# to the file name
if cycle is None and xsID is None:
keywords = []
else:
# example: cycle0.gamiso
if cycle is not None:
keywords = [f"cycle{cycle}node{node}"] if node is not None else [f"cycle{cycle}"]
elif xsID is not None:
keywords = [xsID]
if suffix not in [None, ""]:
if not suffix.startswith("-"):
suffix = "-" + suffix
keywords.append(suffix)
else:
raise ValueError("The cycle or XS ID must be specified.")
keywords.append(".")
return keywords
================================================
FILE: armi/nuclearDataIO/cccc/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This subpackage reads and writes CCCC standard interface files for reactor physics codes.
Starting in the late 1960s, the computational nuclear analysis community recognized a need to
establish some standard file formats to exchange reactor descriptions and reactor physics
quantities. They formed the Committee on Computer Code Coordination (CCCC) and issued
several versions of their standards. The latest was issued in 1977 as [CCCC-IV]_. Many
reactor codes to this day use these files. This package provides a Python abstraction to
read many (though not necessarily all) of these files, manipulate the data, and
write them back out to disk.
Section IV of [CCCC-IV]_ defines the standard interface files that were created by the
CCCC. In addition to the standard files listed in this document, software like DIF3D,
PARTISN, and other reactor physics codes may have their own code-dependent interface files.
In most cases, they follow a similar structure and definition as the standardized formats,
but were not general enough to be used and implemented across all codes. The following
are listed as the standard interface files:
* ISOTXS (:py:mod:`armi.nuclearDataIO.cccc.isotxs`) - Nuclide (isotope) - ordered, multigroup
neutron cross section data
* GRUPXS - Group-ordered, isotopic, multigroup neutron cross section data.
* BRKOXS - Bondarenko (Russian format) self-shielding data
* DLAYXS (:py:mod:`armi.nuclearDataIO.cccc.dlayxs`) - Delayed neutron precursor data
* ISOGXS (:py:mod:`armi.nuclearDataIO.cccc.gamiso`) - Nuclide (isotope) - ordered, multigroup
gamma cross section data
* GEODST (:py:mod:`armi.nuclearDataIO.cccc.geodst`) - Geometry description
* NDXSRF - Nuclear density and cross section referencing data
* ZNATDN - Zone and subzone atomic densities
* SEARCH - Criticality search data
* SNCON - Sn (Discrete Ordinates) constants
* FIXSRC (:py:mod:`armi.nuclearDataIO.cccc.fixsrc`) - Distributed and surface fixed sources
* RTFLUX (:py:mod:`armi.nuclearDataIO.cccc.rtflux`) - Regular total (scalar) neutron flux
* ATFLUX (:py:mod:`armi.nuclearDataIO.cccc.rtflux`) - Adjoint total (scalar) neutron flux
* RCURNT - Regular neutron current
* ACURNT - Adjoint neutron current
* RAFLUX - Regular angular neutron flux
* AAFLUX - Adjoint angular neutron flux
* RZFLUX (:py:mod:`armi.nuclearDataIO.cccc.rzflux`) - Regular, zone-avearged flux by neutron group
* PWDINT (:py:mod:`armi.nuclearDataIO.cccc.pwdint`) - Power densitiy by mesh interval
* WORTHS - Reactivity (per cc) by mesh interval
Other code-dependent interface files may also be included in this package but should be
documented which software they are created from and used for. The file structures should
also be provided in the module-level docstrings.
.. [CCCC-IV] R. Douglas O'Dell, "Standard Interface Files and Procedures for Reactor Physics
Codes, Version IV," LA-6941-MS, Los Alamos National Laboratory (September 1977).
Web. doi:10.2172/5369298. (`OSTI `__)
Using the system
----------------
Most supported files are in their own module. Each has their own :py:class:`cccc.DataContainer` to
hold the data and one or more :py:class:`cccc.Stream` objects representing different I/O formats.
The general pattern is to use any of the following methods on a ``Stream`` object:
* :py:meth:`cccc.Stream.readBinary`
* :py:meth:`cccc.Stream.readAscii`
* :py:meth:`cccc.Stream.writeBinary`
* :py:meth:`cccc.Stream.writeAscii`
For example, to get an RTFLUX data structure from a binary file named ``RTFLUX``, you run::
>>> from armi.nuclearDataIO.cccc import rtflux
>>> rtfluxData = rtflux.RtfluxStream.readBinary("RTFLUX")
Then if you want to write that data to an ASCII file named ``rtflux.ascii``, you run:
>>> rtflux.RtfluxStream.writeAscii(rtfluxData, "rtflux.ascii")
Implementation details
----------------------
We have come up with a powerful but somewhat confusing-at-first implementation that allows
us to define the structure of the files in code just once, in a way that can both read and write
the files. Many methods start with the prefix ``rw`` to indicate that they are used
during both reading and writing.
Normal users of this code do not need to know the implementation details.
Discussion
----------
While loading from stream classmethods is explicit and nice and all, there has been some
talk about moving the read/write ascii/binary methods to the data classes for
implementations that use data structures. This would hide the Stream subclasses from
users, which may be appropriate. On the other hand, logic to select which stream
subclass to user (e.g. adjoint vs. real) will have to be moved into the
data classes.
Notes
-----
A CCCC record consists of a leading and ending integer, which indicates the size of the record in
bytes. (This is actually just FORTRAN unformatted sequential files are written, see e.g.
https://gcc.gnu.org/onlinedocs/gfortran/File-format-of-unformatted-sequential-files.html)
As a result, it is possible to perform a check when reading in a record to determine if it
was read correctly, by making sure the record size at the beginning and ending of a record are
always equal.
There are similarities between this code and that in the PyNE cccc subpackage.
This is the original source of the code. TerraPower authorized the publication
of some of the CCCC code to the PyNE project way back in the 2011 era. This code
has since been updated significantly to both read and write the files.
This was originally inspired by Prof. James Paul Holloway's alpha
release of ccccutils written in c++ from 2001.
"""
from armi.nuclearDataIO.cccc.cccc import * # noqa: F403
================================================
FILE: armi/nuclearDataIO/cccc/cccc.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines containers for the reading and writing standard interface files for reactor physics codes.
.. impl:: Generic tool for reading and writing Committee on Computer Code Coordination (CCCC) format
files for reactor physics codes
:id: I_ARMI_NUCDATA
:implements: R_ARMI_NUCDATA_ISOTXS,
R_ARMI_NUCDATA_GAMISO,
R_ARMI_NUCDATA_GEODST,
R_ARMI_NUCDATA_DIF3D,
R_ARMI_NUCDATA_PMATRX,
R_ARMI_NUCDATA_DLAYXS
This module provides a number of base classes that implement general capabilities for binary and
ASCII file I/O. The :py:class:`IORecord` serves as an abstract base class that instantiates a
number of methods that the binary and ASCII children classes are meant to implement. These
methods, prefixed with ``rw``, are meant to convert literal data types, e.g. float or int, to
either binary or ASCII. This base class does its own conversion for container data types, e.g.
list or matrix, relying on the child implementation of the literal types that the container
possesses. The binary conversion is implemented in :py:class:`BinaryRecordReader` and
:py:class:`BinaryRecordWriter`. The ASCII conversion is implemented in
:py:class:`AsciiRecordReader` and :py:class:`AsciiRecordWriter`.
These :py:class:`IORecord` classes are used within :py:class:`Stream` objects for the data
conversion. :py:class:`Stream` is a context manager that opens a file for reading or writing on
the ``__enter__`` and closes that file upon ``__exit__``. :py:class:`Stream` is an abstract base
class that is subclassed for each CCCC file. It is subclassed directly for the CCCC files that
contain cross-section data:
* :py:class:`ISOTXS `
* :py:mod:`GAMISO `
* :py:class:`PMATRX `
* :py:class:`DLAYXS `
* :py:mod:`COMPXS `
For the CCCC file types that are outputs from a flux solver such as DIF3D (e.g., GEODST, DIF3D,
NHFLUX) the streams are subclassed from :py:class:`StreamWithDataContainer`, which is a special
abstract subclass of :py:class:`Stream` that implements a common pattern used for these file
types. In a :py:class:`StreamWithDataContainer`, the data is directly read to or written from a
specialized data container.
The data container structure for each type of CCCC file is implemented in the module for that
file, as a subclass of :py:class:`DataContainer`. The subclasses for each CCCC file type define
standard attribute names for the data that will be read from or written to the CCCC file. CCCC
file types that follow this pattern include:
* :py:class:`GEODST `
* :py:class:`DIF3D `
* :py:class:`NHFLUX ` (and multiple sub-classes)
* :py:class:`LABELS `
* :py:class:`PWDINT `
* :py:class:`RTFLUX `
* :py:class:`RZFLUX `
* :py:class:`RTFLUX `
The logic to parse or write each specific file format is contained within the
:py:meth:`Stream.readWrite` implementations of the respective subclasses.
"""
import io
import itertools
import os
import struct
from copy import deepcopy
from typing import List
import numpy as np
from armi import runLog
from armi.nuclearDataIO import nuclearFileMetadata
IMPLICIT_INT = "IJKLMN"
"""Letters that trigger implicit integer types in old FORTRAN 77 codes."""
class IORecord:
"""
A single CCCC record.
Reads or writes information to or from a stream.
Parameters
----------
stream
A collection of data to be read or written
hasRecordBoundaries : bool
A True value means the fortran file was written using access='sequential' and contains
a 4 byte int count at the beginning and end of each record. Otherwise, if False the
fortran file was written using access='direct'.
Notes
-----
The methods in this object often have `rw` prefixes, meaning the same method
can be used for both reading and writing. We consider this a significant
achievement that enforces consistency between the code for reading and writing
CCCC records. The tradeoff is that it's a bit challenging to comprehend at first.
"""
_intSize = struct.calcsize("i")
_longSize = struct.calcsize("q")
maxsize = len(str(2**31 - 1)) # limit to max short even though Python3 can go bigger.
_intFormat = " {{:>+{}}}".format(maxsize)
_intLength = maxsize + 1
_floatSize = struct.calcsize("f")
_floatFormat = " {:+.16E}"
_floatLength = 2 + 2 + 16 + 4
_characterSize = struct.calcsize("c")
count = 0
def __init__(self, stream, hasRecordBoundaries=True):
IORecord.count += 1
self._stream = stream
self.numBytes = 0
self.byteCount = 0
self._hasRecordBoundaries = hasRecordBoundaries
def __enter__(self):
"""Open the stream for reading/writing and return :code:`self`.
See Also
--------
armi.nuclearDataIO.cccc.IORecord.open
"""
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
return
try:
self.close()
except Exception as ee:
runLog.error("Failed to close CCCC record.")
runLog.error(ee)
raise BufferError(
"Failed to close record, {}.\n{}\n"
"It is possible too much data was read from the "
"record, and the end of the stream was reached.\n"
"".format(self, ee)
)
def open(self):
"""Abstract method for opening the stream."""
raise NotImplementedError()
def close(self):
"""Abstract method for closing the stream."""
raise NotImplementedError()
def rwInt(self, val):
"""Abstract method for reading or writing an integer.
Notes
-----
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`val` should have value, but when the record is being read,
:code:`val` can be :code:`None` or anything else; it is ignored.
"""
raise NotImplementedError()
def rwBool(self, val):
"""Read or write a boolean value from an integer."""
val = False if not isinstance(val, bool) else val
return bool(self.rwInt(int(val)))
def rwFloat(self, val):
"""Abstract method for reading or writing a floating point (single precision) value.
Notes
-----
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`val` should have value, but when the record is being read,
:code:`val` can be :code:`None` or anything else; it is ignored.
"""
raise NotImplementedError()
def rwDouble(self, val):
"""Abstract method for reading or writing a floating point (double precision) value.
Notes
-----
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`val` should have value, but when the record is being read,
:code:`val` can be :code:`None` or anything else; it is ignored.
"""
raise NotImplementedError()
def rwString(self, val, length):
"""Abstract method for reading or writing a string.
Notes
-----
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`val` should have value, but when the record is being read,
:code:`val` can be :code:`None` or anything else; it is ignored.
"""
raise NotImplementedError()
def rwList(self, contents, containedType, length, strLength=0):
"""
A method for reading and writing a (array) of items of a specific type.
Notes
-----
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`contents` should have value, but when the record is being read,
:code:`contents` can be :code:`None` or anything else; it is ignored.
Warning
-------
If a :code:`contents` evaluates to :code:`True`, the array must be the same size as
:code:`length`.
"""
actions = {
"int": self.rwInt,
"float": self.rwFloat,
"string": lambda val: self.rwString(val, strLength),
"double": self.rwDouble,
}
action = actions.get(containedType)
if action is None:
raise Exception('Cannot pack or unpack the type "{}".'.format(containedType))
# this little trick will make this work for both reading and writing, yay!
if contents is None or len(contents) == 0:
contents = [None for _ in range(length)]
return np.array([action(contents[ii]) for ii in range(length)])
def rwMatrix(self, contents, *shape):
"""A method for reading and writing a matrix of floating point values.
Notes
-----
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`contents` should have value, but when the record is being read,
:code:`contents` can be :code:`None` or anything else; it is ignored.
Warning
-------
If a :code:`contents` is not :code:`None`, the array must be the same shape as
:code:`*shape`.
"""
return self._rwMatrix(contents, self.rwFloat, *shape)
def rwDoubleMatrix(self, contents, *shape):
"""Read or write a matrix of floating point values.
Notes
-----
The method has a seemingly odd signature, because it is used for both reading and writing.
When writing, the :code:`contents` should have value, but when the record is being read,
:code:`contents` can be :code:`None` or anything else; it is ignored.
Warning
-------
If a :code:`contents` is not :code:`None`, the array must be the same shape as
:code:`*shape`.
"""
return self._rwMatrix(contents, self.rwDouble, *shape)
def rwIntMatrix(self, contents, *shape):
"""Read or write a matrix of int values."""
return self._rwMatrix(contents, self.rwInt, *shape)
@staticmethod
def _rwMatrix(contents, func, *shape):
"""
Read/write a matrix.
Notes
-----
This can be important for performance when reading large matrices (e.g. scatter
matrices). It may be worth investigating ``np.frombuffer`` on read and
something similar on write.
With shape, the first shape argument should be the outermost loop because
these are stored in column major order (the FORTRAN way).
Note that np.ndarrays can be built with ``order="F"`` to have column-major ordering.
So if you have ``((MR(I,J),I=1,NCINTI),J=1,NCINTJ)`` you would pass in
the shape as (NCINTJ, NCINTI).
"""
fortranShape = list(reversed(shape))
if contents is None or contents.size == 0:
contents = np.empty(fortranShape)
for index in itertools.product(*[range(ii) for ii in shape]):
fortranIndex = tuple(reversed(index))
contents[fortranIndex] = func(contents[fortranIndex])
return contents
def rwImplicitlyTypedMap(self, keys: List[str], contents) -> dict:
"""
Read a dict of floats and/or ints with FORTRAN77-style implicit typing.
Length of list is determined by length of list of keys passed in.
"""
for key in keys:
# ready for some implicit madness from the FORTRAN 77 days?
if key[0].upper() in IMPLICIT_INT:
contents[key] = self.rwInt(contents[key])
else:
contents[key] = self.rwFloat(contents[key])
return contents
class BinaryRecordReader(IORecord):
"""
Writes a single CCCC record in binary format.
Notes
-----
This class reads a single CCCC record in binary format. A CCCC record consists of a leading and
ending integer indicating how many bytes the record is. The data contained within the record may
be integer, float, double, or string.
"""
def open(self):
"""Open the record by reading the number of bytes in the record, this value will be used
to ensure the entire record was read.
"""
if not self._hasRecordBoundaries:
return
self.numBytes = self.rwInt(None)
self.byteCount -= 4
def close(self):
"""Closes the record by reading the number of bytes from then end of the record, if it
does not match the initial value, an exception will be raised.
"""
if not self._hasRecordBoundaries:
return
# now read end of record
numBytes2 = self.rwInt(None)
self.byteCount -= 4
if numBytes2 != self.numBytes:
raise BufferError(
"Number of bytes specified at end the of record, {}, "
"does not match the originally specified number, {}.\n"
"Read {} bytes.".format(numBytes2, self.numBytes, self.byteCount)
)
def rwInt(self, val):
"""Reads an integer value from the binary stream."""
self.byteCount += self._intSize
(i,) = struct.unpack("i", self._stream.read(self._intSize))
return i
def rwBool(self, val):
"""Read or write a boolean value from an integer."""
return IORecord.rwBool(self, val)
def rwLong(self, val):
"""Reads an integer value from the binary stream."""
self.byteCount += self._longSize
(ll,) = struct.unpack("q", self._stream.read(self._longSize))
return ll
def rwFloat(self, val):
"""Reads a single precision floating point value from the binary stream."""
self.byteCount += self._floatSize
(f,) = struct.unpack("f", self._stream.read(self._floatSize))
return f
def rwDouble(self, val):
"""Reads a double precision floating point value from the binary stream."""
self.byteCount += self._floatSize * 2
(d,) = struct.unpack("d", self._stream.read(self._floatSize * 2))
return d
def rwString(self, val, length):
"""Reads a string of specified length from the binary stream."""
self.byteCount += length
(s,) = struct.unpack("%ds" % length, self._stream.read(length))
return s.rstrip().decode() # convert bytes to string on reading.
class BinaryRecordWriter(IORecord):
"""
Reads a single CCCC record in binary format.
Reads binary information sequentially.
"""
def __init__(self, stream, hasRecordBoundaries=True):
IORecord.__init__(self, stream, hasRecordBoundaries)
self.data = None
def open(self):
self.data = []
def close(self):
if self._hasRecordBoundaries:
packedNumBytes = self._getPackedNumBytes()
self._stream.write(packedNumBytes)
for i in range(0, len(self.data) + 1, io.DEFAULT_BUFFER_SIZE):
self._write_buffer_to_stream(i)
if self._hasRecordBoundaries:
self._stream.write(packedNumBytes)
self.data = None
def _getPackedNumBytes(self):
return struct.pack("i", self.numBytes)
def _write_buffer_to_stream(self, i):
self._stream.write(b"".join(self.data[i : i + io.DEFAULT_BUFFER_SIZE]))
def rwInt(self, val):
self.numBytes += self._intSize
self.data.append(struct.pack("i", val))
return val
def rwBool(self, val):
"""Read or write a boolean value from an integer."""
return IORecord.rwBool(self, val)
def rwLong(self, val):
"""Reads an integer value from the binary stream."""
self.byteCount += self._longSize
self.data.append(struct.pack("q", val))
return val
def rwFloat(self, val):
self.numBytes += self._floatSize
self.data.append(struct.pack("f", val))
return val
def rwDouble(self, val):
self.numBytes += self._floatSize * 2
self.data.append(struct.pack("d", val))
return val
def rwString(self, val, length):
self.numBytes += length * self._characterSize
self.data.append(struct.pack("%ds" % length, val.ljust(length).encode("utf-8")))
return val
class AsciiRecordReader(BinaryRecordReader):
"""
Reads a single CCCC record in ASCII format.
See Also
--------
AsciiRecordWriter
"""
def close(self):
BinaryRecordReader.close(self)
# read one extra character for the new line \n... python somehow correctly figures out
# that on windows \r\n is really just a \n... no idea how.
self._stream.read(1)
def _getPackedNumBytes(self):
return self.numBytes
def _write_buffer_to_stream(self, i):
self._stream.write("".join(self.data[i : i + io.DEFAULT_BUFFER_SIZE]))
def rwInt(self, val):
return int(self._stream.read(self._intLength))
def rwFloat(self, val):
return float(self._stream.read(self._floatLength))
def rwDouble(self, val):
return self.rwFloat(val)
def rwString(self, val, length):
# read one space
self._stream.read(1)
return self._stream.read(length).rstrip()
class AsciiRecordWriter(IORecord):
r"""
Writes a single CCCC record in ASCII format.
Since there is no specific format of an ASCII CCCC record, the format is roughly the same as
the :py:class:`BinaryRecordWriter`, except that the :class:`AsciiRecordReader` puts a space in
front of all values (ints, floats, and strings), and puts a newline character :code:`\\n` at the
end of all records.
"""
def __init__(self, stream, hasRecordBoundaries=True):
IORecord.__init__(self, stream, hasRecordBoundaries)
self.data = None
self.numBytes = 0
def open(self):
self.data = []
def close(self):
self._stream.write(self._intFormat.format(self.numBytes))
self._stream.write("".join(self.data))
self._stream.write(self._intFormat.format(self.numBytes))
self._stream.write("\n")
self.data = None
def rwInt(self, val):
self.numBytes += self._intSize
self.data.append(self._intFormat.format(val))
return val
def rwFloat(self, val):
self.numBytes += self._floatSize
self.data.append(self._floatFormat.format(val))
return val
def rwDouble(self, val):
self.numBytes += self._floatSize * 2
self.data.append(self._floatFormat.format(val))
return val
def rwString(self, val, length):
self.numBytes += length * self._characterSize
self.data.append(" {value:<{length}}".format(length=length, value=val))
return val
class DataContainer:
"""
Data representation that can be read/written to/from with a cccc.Stream.
This is an optional convenience class expected to be used in
concert with :py:class:`StreamWithDataStructure`.
"""
def __init__(self):
# Need Metadata subclass for default keys
self.metadata = nuclearFileMetadata._Metadata()
class Stream:
"""
An abstract CCCC IO stream.
Warning
-------
This is more of a stream Parser/Serializer than an actual stream.
Notes
-----
A concrete instance of this class should implement the
:py:meth:`~armi.nuclearDataIO.cccc.Stream.readWrite` method.
"""
_fileModes = {
"rb": BinaryRecordReader,
"wb": BinaryRecordWriter,
"r": AsciiRecordReader,
"w": AsciiRecordWriter,
}
def __init__(self, fileName, fileMode):
"""
Create an instance of a :py:class:`~armi.nuclearDataIO.cccc.Stream`.
Parameters
----------
fileName : str
name of the file to be read
fileMode : str
the file mode, i.e. 'w' for writing ASCII, 'r' for reading ASCII, 'wb' for writing
binary, and 'rb' for reading binary.
"""
self._fileName = fileName
self._fileMode = fileMode
self._stream = None
if fileMode not in self._fileModes:
raise KeyError("{} not in {}".format("fileMode", list(self._fileModes.keys())))
def __deepcopy__(self, memo):
"""Open file objects can't be deepcopied so we clear them before copying."""
cls = self.__class__
result = cls.__new__(cls)
result._stream = None
memo[id(self)] = result
for k, v in self.__dict__.items():
if k != "_stream":
setattr(result, k, deepcopy(v, memo))
return result
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self._fileName)
def __enter__(self):
"""At the inception of a with command, open up the file for a read/write."""
try:
self._stream = open(self._fileName, self._fileMode)
except IOError:
runLog.error("Cannot find {} in {}".format(self._fileName, os.getcwd()))
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
"""At the termination of a with command, close the file."""
self._stream.close()
def readWrite(self):
"""This method should be implemented on any sub-classes to specify the order of records."""
raise NotImplementedError()
def createRecord(self, hasRecordBoundaries=True):
recordClass = self._fileModes[self._fileMode]
return recordClass(self._stream, hasRecordBoundaries)
@classmethod
def readBinary(cls, fileName: str):
"""Read data from a binary file into a data structure."""
return cls._read(fileName, "rb")
@classmethod
def readAscii(cls, fileName: str):
"""Read data from an ASCII file into a data structure."""
return cls._read(fileName, "r")
@classmethod
def _read(cls, fileName, fileMode):
raise NotImplementedError()
@classmethod
def writeBinary(cls, data: DataContainer, fileName: str):
"""Write the contents of a data container to a binary file."""
return cls._write(data, fileName, "wb")
@classmethod
def writeAscii(cls, data: DataContainer, fileName: str):
"""Write the contents of a data container to an ASCII file."""
return cls._write(data, fileName, "w")
@classmethod
def _write(cls, lib, fileName, fileMode):
raise NotImplementedError()
class StreamWithDataContainer(Stream):
"""
A cccc.Stream that reads/writes to a specialized data container.
This is a relatively common pattern so some of the boilerplate
is handled here.
Warning
-------
This is more of a stream Parser/Serializer than an actual stream.
Notes
-----
It should be possible to fully merge this with ``Stream``, which may make
this a little less confusing.
"""
def __init__(self, data: DataContainer, fileName: str, fileMode: str):
Stream.__init__(self, fileName, fileMode)
self._data = data
self._metadata = self._data.metadata
@staticmethod
def _getDataContainer() -> DataContainer:
raise NotImplementedError()
@classmethod
def _read(cls, fileName: str, fileMode: str):
data = cls._getDataContainer()
return cls._readWrite(
data,
fileName,
fileMode,
)
@classmethod
def _write(cls, data: DataContainer, fileName: str, fileMode: str):
return cls._readWrite(data, fileName, fileMode)
@classmethod
def _readWrite(cls, data: DataContainer, fileName: str, fileMode: str):
with cls(data, fileName, fileMode) as rw:
rw.readWrite()
return data
def getBlockBandwidth(m, nintj, nblok):
"""
Return block bandwidth JL, JU from CCCC interface files.
It is common for CCCC files to block data in various records with
a description along the lines of::
WITH M AS THE BLOCK INDEX, JL=(M-1)*((NINTJ-1)/NBLOK +1)+1
AND JU=MIN0(NINTJ,JUP) WHERE JUP=M*((NINTJ-1)/NBLOK +1)
This function computes JL and JU for these purposes. It also converts
JL and JU to zero based indices rather than 1 based ones, as is almost
always wanted when dealing with python/numpy matrices.
The term *bandwidth* refers to a kind of sparse matrix representation.
Some rows only have columns JL to JH in them rather than 0 to JMAX.
The non-zero band from JL to JH is what we're talking about here.
"""
x = (nintj - 1) // nblok + 1
jLow = (m - 1) * x + 1
jHigh = min(nintj, m * x)
return jLow - 1, jHigh - 1
================================================
FILE: armi/nuclearDataIO/cccc/compxs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
COMPXS is a binary file that contains multigroup macroscopic cross sections for homogenized
regions in a full core. The file format can be found in [DIF3D]_.
.. [DIF3D] Derstine, K. L. DIF3D: A Code to Solve One-, Two-, and
Three-Dimensional Finite-Difference Diffusion Theory Problems,
report, April 1984; Argonne, Illinois.
(https://digital.library.unt.edu/ark:/67531/metadc283553/:
accessed October 17, 2019), University of North Texas Libraries,
Digital Library, https://digital.library.unt.edu; crediting UNT
Libraries Government Documents Department.
The file structure is listed here ::
RECORD TYPE PRESENT IF
=================================== ==========
SPECIFICATIONS ALWAYS
COMPOSITION INDEPENDENT DATA ALWAYS
********* (REPEAT FOR ALL COMPOSITIONS)
* COMPOSITION SPECIFICATIONS ALWAYS
* ****** (REPEAT FOR ALL ENERGY GROUPS
* * IN THE ORDER OF DECREASING
* * ENERGY)
* * COMPOSITION MACROSCOPIC GROUP ALWAYS
* * CROSS SECTIONS
*********
POWER CONVERSION FACTORS ALWAYS
See Also
--------
:py:mod:`armi.nuclearDataIO.cccc.isotxs`
Examples
--------
>>> from armi.nuclearDataIO import compxs
>>> lib = compxs.readBinary("COMPXS")
>>> r0 = lib.regions[0]
>>> r0.macros.fission
# returns fission XS for this region
>>> r0.macros.higherOrderScatter[1]
# returns P1 scattering matrix
>>> r0.macros.higherOrderScatter[5] *= 0 # zero out P5 scattering matrix
>>> compxs.writeBinary(lib, "COMPXS2")
Notes
-----
Power conversion factors are used by some codes to determine how to scale the flux
in a region to a desired power based on either fissions/watt-second or
captures/watt-second. If the user does not plan on using these values, the COMPXS
format indicates the values should be set to ``-1E+20``.
The value of ``powerConvMult`` "times the group J integrated flux for the regions
containing the current composition yields the total power in those regions and
energy group J due to fissions and non-fission absorptions."
The ``d<1,2,3>Multiplier`` values are the first, second, and third dimension
directional diffusion coefficient multipliers, respectively. Similarly, the ``d<1,2,3>Additive``
values are the first, second, and third dimension directional diffusion coefficient
additive terms, respectively.
"""
from traceback import format_exc
import numpy as np
from scipy.sparse import csc_matrix
from armi import runLog
from armi.nuclearDataIO import cccc
from armi.nuclearDataIO.nuclearFileMetadata import (
COMPXS_POWER_CONVERSION_FACTORS,
REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF,
RegionXSMetadata,
)
from armi.nuclearDataIO.xsCollections import XSCollection
from armi.utils.properties import lockImmutableProperties, unlockImmutableProperties
def _getRegionIO():
return _CompxsRegionIO
def _flattenScatteringVector(colVector, group, numUpScatter, numDownScatter):
flatVector = colVector[group - numDownScatter : group + numUpScatter + 1].toarray().flatten()
return list(reversed(flatVector))
def compare(lib1, lib2, tolerance=0.0, verbose=False):
"""
Compare two COMPXS libraries and return True if equal, or False if not equal.
Parameters
----------
lib1: XSLibrary
first library
lib2: XSLibrary
second library
tolerance: float
Disregard errors that are less than tolerance.
verbose: bool
show the macroscopic cross sections that are not equal
Returns
-------
equals: bool
True if libraries are equal, else false
"""
from armi.nuclearDataIO.xsLibraries import compareLibraryNeutronEnergies
equals = True
equals &= compareLibraryNeutronEnergies(lib1, lib2, tolerance)
equals &= lib1.compxsMetadata.compare(lib2.compxsMetadata, lib1, lib2, tolerance)
for regionName in set(lib1.regionLabels + lib2.regionLabels):
region1 = lib1[regionName]
region2 = lib2[regionName]
if region1 is None or region2 is None:
warning = "Region {} is not in library {} and cannot be compared"
if region1:
runLog.warning(warning.format(region1, 2))
if region2:
runLog.warning(warning.format(region2, 1))
equals = False
continue
equals &= _compareRegionXS(region1, region2, tolerance, verbose)
return equals
def _compareRegionXS(region1, region2, tolerance, verbose):
"""Compare the macroscopic cross sections between two homogenized regions."""
return region1.macros.compare(region2.macros, None, tolerance, verbose)
class _CompxsIO(cccc.Stream):
"""Semi-abstract stream used for reading to/writing from a COMPXS file.
Parameters
----------
fileName: str
path to compxs file
lib: armi.nuclearDataIO.xsLibrary.CompxsLibrary
Compxs library that is being written to or read from `fileName`
fileMode: str
string indicating if ``fileName`` is being read or written, and
in ascii or binary format
getRegionFunc: function
function that returns a :py:class:`CompxsRegion` object given the name of
the region.
See Also
--------
armi.nuclearDataIO.cccc.isotxs.IsotxsIO
"""
_METADATA_TAGS = (
"numComps",
"numGroups",
"fileWideChiFlag",
"numFissComps",
"maxUpScatterGroups",
"maxDownScatterGroups",
"numDelayedFam",
"maxScatteringOrder",
)
def __init__(self, fileName, lib, fileMode, getRegionFunc):
cccc.Stream.__init__(self, fileName, fileMode)
self._lib = lib
self._metadata = self._getFileMetadata()
self._metadata.fileNames.append(fileName)
self._getRegion = getRegionFunc
self._isReading = "r" in self._fileMode
def _getFileMetadata(self):
return self._lib.compxsMetadata
def isReadingCompxs(self):
return self._isReading
def fileMode(self):
return self._fileMode
@classmethod
def _read(cls, fileName, fileMode):
from armi.nuclearDataIO.xsLibraries import CompxsLibrary
lib = CompxsLibrary()
return cls._readWrite(
lib,
fileName,
fileMode,
lambda containerKey: CompxsRegion(lib, containerKey),
)
@classmethod
def _write(cls, lib, fileName, fileMode):
return cls._readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey])
@classmethod
def _readWrite(cls, lib, fileName, fileMode, getRegionFunc):
with _CompxsIO(fileName, lib, fileMode, getRegionFunc) as rw:
rw.readWrite()
return lib
def readWrite(self):
"""
Read from or write to the COMPXS file.
See Also
--------
armi.nuclearDataIO.cccc.isotxs.IsotxsIO.readWrite : reading/writing ISOTXS files
"""
runLog.info("{} macroscopic cross library {}".format("Reading" if self._isReading else "Writing", self))
unlockImmutableProperties(self._lib)
try:
regNames = self._rw1DRecord(self._lib.regionLabels)
self._rw2DRecord()
for regLabel in regNames:
region = self._getRegion(regLabel)
regionIO = _getRegionIO()(region, self, self._lib)
regionIO.rwRegionData()
self._rw5DRecord()
except Exception:
raise OSError("Failed to {} {} \n\n\n{}".format("read" if self._isReading else "write", self, format_exc()))
finally:
lockImmutableProperties(self._lib)
def _rw1DRecord(self, regNames):
"""Write the specifications block."""
with self.createRecord() as record:
for datum in self._METADATA_TAGS:
self._metadata[datum] = record.rwInt(self._metadata[datum])
self._metadata["reservedFlag1"] = record.rwInt(self._metadata["reservedFlag1"])
self._metadata["reservedFlag2"] = record.rwInt(self._metadata["reservedFlag2"])
regNames = list(range(self._metadata["numComps"]))
return regNames
def _rw2DRecord(self):
"""Write the composition independent data block."""
with self.createRecord() as record:
if self._metadata["fileWideChiFlag"]:
self._metadata["fileWideChi"] = record.rwMatrix(
self._metadata["fileWideChi"],
(self._metadata["fileWideChiFlag"], self._metadata["numGroups"]),
)
self._rwLibraryEnergies(record)
self._metadata["minimumNeutronEnergy"] = record.rwDouble(self._metadata["minimumNeutronEnergy"])
self._rwDelayedProperties(record, self._metadata["numDelayedFam"])
def _rwLibraryEnergies(self, record):
self._lib.neutronVelocity = record.rwList(self._lib.neutronVelocity, "double", self._metadata["numGroups"])
self._lib.neutronEnergyUpperBounds = record.rwList(
self._lib.neutronEnergyUpperBounds, "double", self._metadata["numGroups"]
)
def _rwDelayedProperties(self, record, numDelayedFam):
if numDelayedFam:
self._metadata["delayedChi"] = record.rwMatrix(
self._metadata["delayedChi"],
(self._metadata["numGroups"], numDelayedFam),
)
self._metadata["delayedDecayConstant"] = record.rwList(
self._metadata["delayedDecayConstant"], "double", numDelayedFam
)
self._metadata["compFamiliesWithPrecursors"] = record.rwList(
self._metadata["compFamiliesWithPrecursors"],
"int",
self._metadata["numComps"],
)
def _rw5DRecord(self):
"""Write power conversion factors."""
numComps = self._getFileMetadata()["numComps"]
with self.createRecord() as record:
for factor in COMPXS_POWER_CONVERSION_FACTORS:
self._getFileMetadata()[factor] = record.rwList(self._getFileMetadata()[factor], "double", numComps)
readBinary = _CompxsIO.readBinary
readAscii = _CompxsIO.readAscii
writeBinary = _CompxsIO.writeBinary
writeAscii = _CompxsIO.writeAscii
class _CompxsRegionIO:
"""
Specific object assigned a single region to read/write composition information.
Used with _COMPXS object to read/write 3D and 4D records -
composition specifications and compsosition macroscopic cross sections.
Cross sections are read/written in order of decreasing energy.
This differs from the _COMPXS object, as this object acts on a single region, but
uses the file mode and file path from the _COMPXS region that instantiated this object.
"""
_ORDERED_PRIMARY_XS = ("absorption", "total", "removal", "transport")
def __init__(self, region, compxsIO, lib):
self._lib = lib
self._compxsIO = compxsIO
self._region = region
self._numGroups = self._getFileMetadata()["numGroups"]
self._fileMode = compxsIO.fileMode()
self._isReading = compxsIO.isReadingCompxs()
def _getRegionMetadata(self):
return self._region.metadata
def _getFileMetadata(self):
return self._lib.compxsMetadata
def rwRegionData(self):
"""Read/write the region specific information for this composition."""
self._rw3DRecord()
self._rw4DRecord()
def _rw3DRecord(self):
r"""Write the composition specifications block."""
with self._compxsIO.createRecord() as record:
self._getRegionMetadata()["chiFlag"] = record.rwInt(self._getRegionMetadata()["chiFlag"])
self._getRegionMetadata()["numUpScatterGroups"] = record.rwList(
self._getRegionMetadata()["numUpScatterGroups"], "int", self._numGroups
)
self._getRegionMetadata()["numDownScatterGroups"] = record.rwList(
self._getRegionMetadata()["numDownScatterGroups"],
"int",
self._numGroups,
)
if self._getRegionMetadata()["numPrecursorFamilies"]:
self._getRegionMetadata()["numFamI"] = record.rwList(
self._getRegionMetadata()["numFamI"],
"int",
self._getRegionMetadata()["numPrecursorFamilies"],
)
def _rw4DRecord(self):
r"""Write the composition macroscopic cross sections."""
if self._isReading:
self._region.allocateXS(self._getFileMetadata()["numGroups"])
for group in range(self._getFileMetadata()["numGroups"]):
with self._compxsIO.createRecord() as record:
self._rwGroup4DRecord(record, group, self._region.macros)
if self._isReading:
self._region.makeScatteringMatrices()
def _rwGroup4DRecord(self, record, group, macros):
self._rwPrimaryXS(record, group, macros)
self._rwScatteringMatrix(record, group, macros, 0)
for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF:
self._getRegionMetadata()[datum][group] = record.rwDouble(self._getRegionMetadata()[datum][group])
if self._getRegionMetadata()["numPrecursorFamilies"]:
self._getRegionMetadata()["numPrecursorsProduced", group] = record.rwList(
self._getRegionMetadata()["numPrecursorsProduced", group],
"int",
self._getRegionMetadata()["numPrecursorFamilies"],
)
macros.n2n[group] = record.rwDouble(macros.n2n[group])
for higherOrder in range(1, self._getFileMetadata()["maxScatteringOrder"] + 1):
self._rwScatteringMatrix(record, group, macros, higherOrder)
def _rwPrimaryXS(self, record, group, macros):
for xs in self._ORDERED_PRIMARY_XS:
macros[xs][group] = record.rwDouble(macros[xs][group])
if self._getRegionMetadata()["chiFlag"]:
macros["fission"][group] = record.rwDouble(macros["fission"][group])
macros["nuSigF"][group] = record.rwDouble(macros["nuSigF"][group])
macros["chi"][group] = record.rwList(macros["chi"][group], "double", self._getRegionMetadata()["chiFlag"])
def _rwScatteringMatrix(self, record, group, macros, order):
numUpScatter = self._getRegionMetadata()["numUpScatterGroups"][group]
numDownScatter = self._getRegionMetadata()["numDownScatterGroups"][group]
sparseMat = macros.higherOrderScatter[order] if order else macros.totalScatter
dataj = (
None
if self._isReading
else _flattenScatteringVector(sparseMat[:, group], group, numUpScatter, numDownScatter)
)
dataj = record.rwList(dataj, "double", numUpScatter + 1 + numDownScatter)
indicesj = list(reversed(range(group - numDownScatter, group + numUpScatter + 1)))
if self._isReading:
sparseMat.addColumnData(dataj, indicesj)
class _CompxsScatterMatrix:
"""When reading COMPXS scattering blocks, store the data here and then reconstruct after."""
def __init__(self, shape):
self.data = []
self.indices = []
self.indptr = [0]
self.shape = shape
def addColumnData(self, dataj, indicesj):
self.data.extend(dataj)
self.indices.extend(indicesj)
self.indptr.append(len(dataj) + self.indptr[-1])
def makeSparse(self, sparseFunc=csc_matrix):
self.data = np.array(self.data, dtype="d")
self.indices = np.array(self.indices, dtype="d")
self.indptr = np.array(self.indptr, dtype="d")
return sparseFunc((self.data, self.indices, self.indptr), shape=self.shape)
class CompxsRegion:
"""
Class for creating/tracking homogenized region information.
Notes
-----
Region objects are created from reading COMPXS files through
:py:meth:`~_CompxsIO.readWrite` and connected to the resulting library,
similar to instances of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`. This allows instances
of :py:class:`~armi.nuclearDataIO.xsLibraries.CompxsLibrary` to read from and write to ``COMPXS`` files,
access region information by name, and plot macroscopic cross sections from the homogenized regions.
The main attributes for an instance of `Region` are the macroscopic cross sections,
``macros``, and the metadata. The metadata deals primarily with delayed neutron information
and use of the ``fileWideChi``, if that option is set.
See Also
--------
armi.nuclearDataIO.xsNuclides.XSNuclide
Examples
--------
>>> lib = compxs.readBinary("COMPXS")
>>> lib.regions
...
>>> r0 = lib.regions[0]
>>> r10 = lib.regions[10]
>>> r0.isFissile
False
>>> r10.isFissile
True
>>> r10.macros.fission
array([0.01147095, 0.01006284, 0.0065597, 0.00660079, 0.005587,
...
0.08920149, 0.13035864, 0.16192732]
"""
_primaryXS = ("absorption", "total", "removal", "transport", "n2n")
def __init__(self, lib, regionNumber):
self.container = lib
lib[regionNumber] = self
self.regionNumber = regionNumber
self.macros = XSCollection(parent=self)
self.metadata = self._getMetadata()
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.regionNumber)
def _getFileMetadata(self):
return self.container.compxsMetadata
def _getMetadata(self):
specs = RegionXSMetadata()
chiFlag = specs["fileWideChiFlag"] = self._getFileMetadata()["fileWideChiFlag"]
if chiFlag:
self.macros.chi = specs["fileWideChi"] = self._getFileMetadata()["fileWideChi"]
compFamiliesWithPrecursors = self._getFileMetadata()["compFamiliesWithPrecursors"]
if compFamiliesWithPrecursors is not None and compFamiliesWithPrecursors.size:
specs["numPrecursorFamilies"] = compFamiliesWithPrecursors[self.regionNumber]
else:
specs["numPrecursorFamilies"] = 0
return specs
def initMetadata(self, groups):
"""Initialize the metadata for this region."""
self.metadata = self._getMetadata()
for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF:
if "Additive" in datum:
quantity = 0.0
else:
quantity = 1.0
self.metadata[datum] = groups * [quantity]
for datum in COMPXS_POWER_CONVERSION_FACTORS:
self.metadata[datum] = 1.0
@property
def isFissile(self):
return self.macros.fission is not None
def allocateXS(self, numGroups):
r"""
Allocate the cross section arrays.
When reading in the cross sections from a COMPXS file, the cross sections are read
for each energy group, i.e. ..math::
\Sigma_{a,1},\Sigma_{t,1},\Sigma_{rem,1}, \cdots,
\Sigma_{a,2},\Sigma_{t,2},\Sigma_{rem,2}, \cdots,
\Sigma_{a,G},\Sigma_{t,G{,\Sigma_{rem,G}
Since the cross sections can not be read in with a single read command, the
arrays are allocated here to be populated later.
Scattering matrices are read in as columns of a sparse scattering matrix and
reconstructed after all energy groups have been read in.
See Also
--------
:py:meth:`makeScatteringMatrices`
"""
for xs in self._primaryXS:
self.macros[xs] = np.zeros(numGroups)
self.macros.totalScatter = _CompxsScatterMatrix((numGroups, numGroups))
if self.metadata["chiFlag"]:
self.macros.fission = np.zeros(numGroups)
self.macros.nuSigF = np.zeros(numGroups)
self.macros.chi = np.zeros((numGroups, self.metadata["chiFlag"]))
if self._getFileMetadata()["maxScatteringOrder"]:
for scatterOrder in range(1, self._getFileMetadata()["maxScatteringOrder"] + 1):
self.macros.higherOrderScatter[scatterOrder] = _CompxsScatterMatrix((numGroups, numGroups))
for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF:
self.metadata[datum] = (np.zeros(numGroups) if "Additive" in datum else np.ones(numGroups)).tolist()
def makeScatteringMatrices(self):
r"""
Create the sparse scattering matrix from components.
The scattering matrix :math:`S_{i,j}=\Sigma_{s,i\rightarrow j}` is read in
from the COMPXS as segments on each column in three parts: ..math::
XSCATU_J = \lbrace S_{g', J}\vert g'=J+NUP(J), J+NUP(J)-1, cdots, J+1\rbrace
XSCATJ_J = S_{J,J}
XSCATD_J = \lbrace S_{g', J}\vert g'=J-1, J-2, \cdots, J_NDN(J) \rbrace
where :math:`NUP(J)` and :math:`NDN(J)` are the number of group that upscatter and
downscatter into energy group :math:`J`
See Also
--------
:py:class:`scipy.sparse.csc_matrix`
"""
self.macros.totalScatter = self.macros.totalScatter.makeSparse()
self.macros.totalScatter.eliminate_zeros()
if self._getFileMetadata()["maxScatteringOrder"]:
for sctOrdr, sctObj in self.macros.higherOrderScatter.items():
self.macros.higherOrderScatter[sctOrdr] = sctObj.makeSparse()
self.macros.higherOrderScatter[sctOrdr].eliminate_zeros()
def getXS(self, interaction):
"""
Get the macroscopic cross sections for a specific interaction.
See Also
--------
:py:meth:`armi.nucDirectory.XSNuclide.getXS`
"""
return self.macros[interaction]
def merge(self, other):
"""Merge attributes of two homogenized Regions."""
self.metadata = self.metadata.merge(other.metadata, self, other, "COMPXS", OSError)
self.macros.merge(other.macros)
================================================
FILE: armi/nuclearDataIO/cccc/dif3d.py
================================================
# Copyright 2023 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for reading from and writing to DIF3D files, which are module dependent
binary inputs for the DIF3D code.
"""
from armi import runLog
from armi.nuclearDataIO import cccc
FILE_SPEC_2D_PARAMS = (
[
"IPROBT",
"ISOLNT",
"IXTRAP",
"MINBSZ",
"NOUTMX",
"IRSTRT",
"LIMTIM",
"NUPMAX",
"IOSAVE",
"IOMEG1",
"INRMAX",
"NUMORP",
"IRETRN",
]
+ [f"IEDF{e}" for e in range(1, 11)]
+ [
"NOUTBQ",
"I0FLUX",
"NOEDIT",
"NOD3ED",
"ISRHED",
"NSN",
"NSWMAX",
"NAPRX",
"NAPRXZ",
"NFMCMX",
"NXYSWP",
"NZSWP",
"ISYMF",
"NCMRZS",
"ISEXTR",
"NPNO",
"NXTR",
"IOMEG2",
"IFULL",
"NVFLAG",
"ISIMPL",
"IWNHFL",
"IPERT",
"IHARM",
]
)
FILE_SPEC_3D_PARAMS = [
"EPS1",
"EPS2",
"EPS3",
"EFFK",
"FISMIN",
"PSINRM",
"POWIN",
"SIGBAR",
"EFFKQ",
"EPSWP",
] + [f"DUM{e}" for e in range(1, 21)]
TITLE_RANGE = 11
class Dif3dData(cccc.DataContainer):
def __init__(self):
cccc.DataContainer.__init__(self)
self.twoD = {e: None for e in FILE_SPEC_2D_PARAMS}
self.threeD = {e: None for e in FILE_SPEC_3D_PARAMS}
self.fourD = None
self.fiveD = None
class Dif3dStream(cccc.StreamWithDataContainer):
"""Tool to read and write DIF3D files."""
@staticmethod
def _getDataContainer() -> Dif3dData:
return Dif3dData()
def _rwFileID(self) -> None:
"""
Record for file identification information.
The parameters are stored as a dictionary under the attribute `metadata`.
"""
with self.createRecord() as record:
for param in ["HNAME", "HUSE1", "HUSE2"]:
self._metadata[param] = record.rwString(self._metadata[param], 8)
self._metadata["VERSION"] = record.rwInt(self._metadata["VERSION"])
def _rw1DRecord(self) -> None:
"""
Record for problem title, storage, and dump specifications.
The parameters are stored as a dictionary under the attribute `metadata`.
"""
with self.createRecord() as record:
for i in range(TITLE_RANGE):
param = f"TITLE{i}"
self._metadata[param] = record.rwString(self._metadata[param], 8)
self._metadata["MAXSIZ"] = record.rwInt(self._metadata["MAXSIZ"])
self._metadata["MAXBLK"] = record.rwInt(self._metadata["MAXBLK"])
self._metadata["IPRINT"] = record.rwInt(self._metadata["IPRINT"])
def _rw2DRecord(self) -> None:
"""
Record for DIF3D integer control parameters.
The parameters are stored as a dictionary under the attribute `twoD`.
"""
with self.createRecord() as record:
for param in FILE_SPEC_2D_PARAMS:
self._data.twoD[param] = record.rwInt(self._data.twoD[param])
def _rw3DRecord(self) -> None:
"""
Record for convergence criteria and other sundry floating point data (such as
k-effective).
The parameters are stored as a dictionary under the attribute `threeD`.
"""
with self.createRecord() as record:
for param in FILE_SPEC_3D_PARAMS:
self._data.threeD[param] = record.rwDouble(self._data.threeD[param])
def _rw4DRecord(self) -> None:
"""
Record for the optimum overrelaxation factors. This record is only present when
using DIF3D-FD and if `NUMORP` is greater than 0.
The parameters are stored as a dictionary under the attribute `fourD`. This
could be changed into a list in the future since this record represents groupwise
data.
"""
if self._data.twoD["NUMORP"] != 0:
omegaParams = [f"OMEGA{e}" for e in range(1, self._data.twoD["NUMORP"] + 1)]
with self.createRecord() as record:
# Initialize the record if we're reading
if self._data.fourD is None:
self._data.fourD = {omegaParam: None for omegaParam in omegaParams}
for omegaParam in omegaParams:
self._data.fourD[omegaParam] = record.rwDouble(self._data.fourD[omegaParam])
def _rw5DRecord(self) -> None:
"""
Record for the axial coarse mesh rebalancing boundaries. Coarse mesh balancing is
disabled in DIF3D-VARIANT, so this record is only relevant for DIF3D-Nodal. This
record is only present if `NCMRZS` is greater than 0.
The parameters are stored as a dictionary under the attribute `fiveD`.
"""
if self._data.twoD["NCMRZS"] != 0:
zcmrcParams = [f"ZCMRC{e}" for e in range(1, self._data.twoD["NCMRZS"] + 1)]
nzintsParams = [f"NZINTS{e}" for e in range(1, self._data.twoD["NCMRZS"] + 1)]
with self.createRecord() as record:
# Initialize the record if we're reading
if self._data.fiveD is None:
self._data.fiveD = {zcmrcParam: None for zcmrcParam in zcmrcParams}
self._data.fiveD.update({nzintsParam: None for nzintsParam in nzintsParams})
for zcmrcParam in zcmrcParams:
self._data.fiveD[zcmrcParam] = record.rwDouble(self._data.fiveD[zcmrcParam])
for nzintsParam in nzintsParams:
self._data.fiveD[nzintsParam] = record.rwInt(self._data.fiveD[nzintsParam])
def readWrite(self):
"""Reads or writes metadata and data from the five records of the DIF3D binary file.
.. impl:: Tool to read and write DIF3D files.
:id: I_ARMI_NUCDATA_DIF3D
:implements: R_ARMI_NUCDATA_DIF3D
The reading and writing of the DIF3D binary file is performed using
:py:class:`StreamWithDataContainer <.cccc.StreamWithDataContainer>`
from the :py:mod:`~armi.nuclearDataIO.cccc` package. This class
allows for the reading and writing of CCCC binary files, processing
one record at a time using subclasses of the :py:class:`IORecord
<.cccc.IORecord>`. Each record in a CCCC binary file consists of
words that represent integers (short or long), floating-point
numbers (single or double precision), or strings of data. One or
more of these words are parsed one at a time by the reader. Multiple
words processed together have meaning, such as such as groupwise
overrelaxation factors. While reading, the data is stored in a
Python dictionary as an attribute on the object, one for each
record. The keys in each dictionary represent the parsed grouping of
words in the records; for example, for the 4D record (stored as the
attribute ``fourD``), each groupwise overrelaxation factor is stored
as the key ``OMEGA{i}``, where ``i`` is the group number. See
:need:`I_ARMI_NUCDATA` for more details on the general
implementation.
Each record is also embedded with the record size at the beginning
and end of the record (always assumed to be present), which is used
for error checking at the end of processing each record.
The DIF3D reader processes the file identification record (stored as
the attribute ``_metadata``) and the five data records for the DIF3D
file, as defined in the specification for the file distributed with
the DIF3D software.
This class can also read and write an ASCII version of the DIF3D
file. While this format is not used by the DIF3D software, it can be
a useful representation for users to access the file in a
human-readable format.
"""
msg = f"{'Reading' if 'r' in self._fileMode else 'Writing'} DIF3D binary data {self}"
runLog.info(msg)
self._rwFileID()
self._rw1DRecord()
self._rw2DRecord()
self._rw3DRecord()
self._rw4DRecord()
self._rw5DRecord()
readBinary = Dif3dStream.readBinary
readAscii = Dif3dStream.readAscii
writeBinary = Dif3dStream.writeBinary
writeAscii = Dif3dStream.writeAscii
================================================
FILE: armi/nuclearDataIO/cccc/fixsrc.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FIXSRC is a CCCC standard data file for storing multigroup fixed sources on a triangular mesh.
Currently, the FIXSRC writing capability assumes a gamma (not neutron) fixed source problem.
This enables photon transport problems. [CCCC-IV]_
"""
import collections
import numpy as np
from armi import runLog
from armi.nuclearDataIO import cccc
def readBinary(fileName):
"""Read a binary FIXSRC file."""
with FIXSRC(fileName, "rb", np.zeros((0, 0, 0, 0))) as fs:
fs.readWrite()
return fs.fixSrc
def writeBinary(fileName, fixSrcArray):
"""Write fixed source data to a FIXSRC file."""
with FIXSRC(fileName, "wb", fixSrcArray) as fs:
fs.readWrite()
class FIXSRC(cccc.Stream):
"""Read or write a binary FIXSRC file from DIF3D fixed source input."""
def __init__(self, fileName, fileMode, fixSrc):
"""
Initialize a gamma FIXSRC class for reading or writing a binary FIXSRC file for DIF3D gamma
fixed source input.
If the intent is to write a gamma FIXSRC file, the variable FIXSRC.fixSrc, which contains
to-be-written core-wide multigroup gamma fixed source data, is constructed from an existing
neutron RTFLUX file.
Parameters
----------
fileName : str, optional
The file name of the RTFLUX/ATFLUX binary file to be read.
fileMode : str, optional
If 'wb', this class writes a FIXSRC binary file.
If 'rb', this class reads a preexisting FIXSRC binary file.
fixSrc : np.ndarray
Core-wide multigroup gamma fixed-source data.
"""
cccc.Stream.__init__(self, fileName, fileMode)
# copied from a sample FIXSRC output from "type 19" DIF3D input
self.label = "FIXSRC "
self.fileId = 1
self.fixSrc = fixSrc
ni, nj, nz, ng = self.fixSrc.shape
self.fc = collections.OrderedDict(
[
("itype", 0),
("ndim", 3),
("ngroup", ng),
("ninti", ni),
("nintj", nj),
("nintk", nz),
("idists", 1),
("ndcomp", 1),
("nscomp", 0),
("nedgi", 0),
("nedgj", 0),
("nedjk", 0),
("nblok", 1),
]
)
def readWrite(self):
"""Read or write a binary FIXSRC file for DIF3D fixed source input."""
runLog.info("{} gamma fixed source file {}".format("Reading" if "r" in self._fileMode else "Writing", self))
self._rwFileID()
self._rw1DRecord()
ng = self.fc["ngroup"]
nz = self.fc["nintk"]
for g in range(ng):
for z in range(nz):
self._rw3DRecord(g, z)
def _rwFileID(self):
"""Read file identification information."""
with self.createRecord() as fileIdRecord:
self.label = fileIdRecord.rwString(self.label, 24)
self.fileId = fileIdRecord.rwInt(self.fileId)
def _rw1DRecord(self):
"""Read/write parameters from/to the FIXSRC 1D block (file control)."""
with self.createRecord() as record:
for var in self.fc.keys():
self.fc[var] = record.rwInt(self.fc[var])
def _rw3DRecord(self, g, z):
"""
Read/write fixed source data from 3D block records.
Parameters
----------
g : int
The gamma energy group index.
z : int
The DIF3D axial node index.
"""
with self.createRecord() as record:
ni = self.fc["ninti"]
nj = self.fc["nintj"]
for j in range(nj):
for i in range(ni):
self.fixSrc[i, j, z, g] = record.rwDouble(self.fixSrc[i, j, z, g])
================================================
FILE: armi/nuclearDataIO/cccc/gamiso.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for reading GAMISO files which contains gamma cross section data.
GAMISO is a binary file created by MC**2-v3 that contains multigroup microscopic gamma cross
sections. GAMISO data is contained within a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
.. impl:: Tool to read and write GAMISO files.
:id: I_ARMI_NUCDATA_GAMISO
:implements: R_ARMI_NUCDATA_GAMISO
The majority of the functionality in this module is inherited from the
:py:mod:`~armi.nuclearDataIO.cccc.isotxs` module. See
:py:class:`~armi.nuclearDataIO.cccc.isotxs.IsotxsIO` and its associated
implementation :need:`I_ARMI_NUCDATA_ISOTXS` for more information. The only
difference from ISOTXS neutron data is a special treatment for gamma
velocities, which is done by overriding ``_rwLibraryEnergies``.
See [GAMSOR]_.
.. [GAMSOR] Smith, M. A., Lee, C. H., and Hill, R. N. GAMSOR: Gamma Source Preparation and DIF3D
Flux Solution. United States: N. p., 2016. Web. doi:10.2172/1343095. `On OSTI
`__
"""
from armi import runLog
from armi.nuclearDataIO import xsLibraries, xsNuclides
from armi.nuclearDataIO.cccc import isotxs
def compare(lib1, lib2):
"""Compare two XSLibraries, and return True if equal, or False if not."""
equal = True
# first check the lib properties (also need to unlock to prevent from getting an exception).
equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "gammaEnergyUpperBounds")
# compare the meta data
equal &= lib1.gamisoMetadata.compare(lib2.gamisoMetadata, lib1, lib2)
# check the nuclides
for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels):
nuc1 = lib1.get(nucName, None)
nuc2 = lib2.get(nucName, None)
if nuc1 is None or nuc2 is None:
continue
equal &= compareNuclideXS(nuc1, nuc2)
return equal
def compareNuclideXS(nuc1, nuc2):
equal = nuc1.gamisoMetadata.compare(nuc2.gamisoMetadata, nuc1.container, nuc2.container)
equal &= nuc1.gammaXS.compare(nuc2.gammaXS, [])
return equal
def addDummyNuclidesToLibrary(lib, dummyNuclides):
"""
This method adds DUMMY nuclides to the current GAMISO library.
Parameters
----------
lib : obj
GAMISO library object
dummyNuclides: list
List of DUMMY nuclide objects that will be copied and added to the GAMISO file
Notes
-----
Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to
provide a consistent set of nuclide-level data across all the nuclides in a
:py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
"""
if not dummyNuclides:
runLog.important("No dummy nuclide data provided to be added to {}".format(lib))
return False
elif len(lib.xsIDs) > 1:
runLog.warning(
"Cannot add dummy nuclide data to GAMISO library {} containing data for more than 1 XS ID.".format(lib)
)
return False
dummyNuclideKeysAddedToLibrary = []
for dummyNuclide in dummyNuclides:
dummyKey = dummyNuclide.nucLabel
if len(lib.xsIDs):
dummyKey += lib.xsIDs[0]
if dummyKey in lib:
continue
runLog.debug("Adding {} nuclide data to {}".format(dummyKey, lib))
newDummy = xsNuclides.XSNuclide(lib, dummyKey)
# Copy gamiso metadata from the isotxs metadata of the given dummy nuclide
for kk, vv in dummyNuclide.isotxsMetadata.items():
if kk in ["jj", "jband"]:
# clear out data here before populating with gamma groups
newDummy.gamisoMetadata[kk] = {}
for gNum in range(lib.gamisoMetadata["numGroups"]):
for bNum in range(lib.gamisoMetadata["maxScatteringBlocks"]):
newDummy.gamisoMetadata[kk][(gNum, bNum)] = 1
else:
newDummy.gamisoMetadata[kk] = vv
lib[dummyKey] = newDummy
dummyNuclideKeysAddedToLibrary.append(dummyKey)
return any(dummyNuclideKeysAddedToLibrary)
class _GamisoIO(isotxs.IsotxsIO):
"""
A reader/writer for GAMISO data files.
Notes
-----
The GAMISO file format is identical to ISOTXS.
"""
def _getFileMetadata(self):
return self._lib.gamisoMetadata
def _getNuclideIO(self):
return _GamisoNuclideIO
def _rwMessage(self):
runLog.debug("{} GAMISO data {}".format("Reading" if "r" in self._fileMode else "Writing", self))
def _rwLibraryEnergies(self, record):
# neutron velocity (cm/s)
metadata = self._getFileMetadata()
metadata["gammaVelocity..NOT"] = record.rwList(
metadata["gammaVelocity..NOT"], "float", self._metadata["numGroups"]
)
# read emax for each group in descending eV.
self._lib.gammaEnergyUpperBounds = record.rwMatrix(
self._lib.gammaEnergyUpperBounds, self._metadata["numGroups"]
)
readBinary = _GamisoIO.readBinary
readAscii = _GamisoIO.readAscii
writeBinary = _GamisoIO.writeBinary
writeAscii = _GamisoIO.writeAscii
class _GamisoNuclideIO(isotxs._IsotxsNuclideIO):
"""
A reader/writer for GAMISO nuclides.
Notes
-----
The GAMISO file format is identical to ISOTXS.
"""
_FILE_LABEL = "GAMISO"
def _getFileMetadata(self):
return self._lib.gamisoMetadata
def _getNuclideMetadata(self):
return self._nuclide.gamisoMetadata
def _getMicros(self):
return self._nuclide.gammaXS
================================================
FILE: armi/nuclearDataIO/cccc/geodst.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Read/write a CCCC GEODST geometry definition file.
GEODST files define fine and coarse meshes and mappings between
region numbers and mesh indices. They also store some zone
information.
File format definition is from [CCCC-IV]_.
Examples
--------
>>> geo = geodst.readBinary("GEODST")
>>> print(geo.xmesh)
>>> geo.zmesh[-1] *= 2 # make a modification to data
>>> geodst.writeBinary(geo, "GEODST2")
"""
import numpy as np
from armi.nuclearDataIO import cccc
GEODST = "GEODST"
# See CCCC-IV documentation for definitions
FILE_SPEC_1D_KEYS = (
"IGOM",
"NZONE",
"NREG",
"NZCL",
"NCINTI",
"NCINTJ",
"NCINTK",
"NINTI",
"NINTJ",
"NINTK",
"IMB1",
"IMB2",
"JMB1",
"JMB2",
"KMB1",
"KMB2",
"NBS",
"NBCS",
"NIBCS",
"NZWBB",
"NTRIAG",
"NRASS",
"NTHPT",
"NGOP1",
"NGOP2",
"NGOP3",
"NGOP4",
)
class GeodstData(cccc.DataContainer):
"""
Data representation that can be read from or written to a GEODST file.
The region numbers in this data structure START AT 1, not zero! Thus
you must always remember the off-by-one conversion when comparing
with list or matrix indices.
Notes
-----
Analogous to a IsotxsLibrary for ISTOXS files.
"""
def __init__(self):
cccc.DataContainer.__init__(self)
# 4D data
self.xmesh = None
self.ymesh = None
self.zmesh = None
self.iintervals = None
self.jintervals = None
self.kintervals = None
# 5D data
self.regionVolumes = None
self.bucklings = None
self.boundaryConstants = None
self.internalBlackBoundaryConstants = None
self.zonesWithBlackAbs = None
self.zoneClassifications = None
self.regionZoneNumber = None
# 6d
self.coarseMeshRegions = None
# 7d
self.fineMeshRegions = None
class GeodstStream(cccc.StreamWithDataContainer):
"""
Stream for reading to/writing from with GEODST data.
Parameters
----------
geom : GeodstData
Data structure
fileName: str
path to geodst file
fileMode: str
string indicating if ``fileName`` is being read or written, and
in ascii or binary format
"""
@staticmethod
def _getDataContainer() -> GeodstData:
return GeodstData()
def readWrite(self):
"""
Step through the structure of a GEODST file and read/write it.
Logic to control which records will be present is here, which
comes directly off the File specification.
.. impl:: Tool to read and write GEODST files.
:id: I_ARMI_NUCDATA_GEODST
:implements: R_ARMI_NUCDATA_GEODST
Reading and writing GEODST files is performed using the general
nuclear data I/O functionalities described in
:need:`I_ARMI_NUCDATA`. Reading/writing a GEODST file is performed
through the following steps:
#. Read/write file ID record
#. Read/write file specifications on 1D record.
#. Based on the geometry type (``IGOM``), one of following records
are read/written:
* Slab (1), cylinder (3), or sphere (3): Read/write 1-D coarse
mesh boundaries and fine mesh intervals.
* X-Y (6), R-Z (7), Theta-R (8), uniform triangular (9),
hexagonal (10), or R-Theta (11): Read/write 2-D coarse mesh
boundaries and fine mesh intervals.
* R-Theta-Z (12, 15), R-Theta-Alpha (13, 16), X-Y-Z (14),
uniform triangular-Z (17), hexagonal-Z(18): Read/write 3-D
coarse mesh boundaries and fine mesh intervals.
#. If the geometry is not zero-dimensional (``IGOM`` > 0) and
buckling values are specified (``NBS`` > 0): Read/write geometry
data from 5D record.
#. If the geometry is not zero-dimensional (``IGOM`` > 0) and region
assignments are coarse-mesh-based (``NRASS`` = 0): Read/write
region assignments to coarse mesh interval.
#. If the geometry is not zero-dimensional (``IGOM`` > 0) and region
assignments are fine-mesh-based (``NRASS`` = 1): Read/write
region assignments to fine mesh interval.
"""
self._rwFileID()
self._rw1DRecord()
geomType = self._metadata["IGOM"]
if 0 > geomType >= 3:
self._rw2DRecord()
elif 6 <= geomType <= 11:
self._rw3DRecord()
elif geomType >= 12:
self._rw4DRecord()
if geomType > 0 or self._metadata["NBS"] > 0:
self._rw5DRecord()
if geomType > 0:
if self._metadata["NRASS"] == 0:
self._rw6DRecord()
elif self._metadata["NRASS"] == 1:
self._rw7DRecord()
def _rwFileID(self):
"""
Read/write file id record.
Notes
-----
The number 28 was actually obtained from
a hex editor and may be code specific.
"""
with self.createRecord() as record:
self._metadata["label"] = record.rwString(self._metadata["label"], 28)
def _rw1DRecord(self):
"""
Read/write File specifications on 1D record.
This record contains 27 integers.
"""
with self.createRecord() as record:
for key in FILE_SPEC_1D_KEYS:
self._metadata[key] = record.rwInt(self._metadata[key])
def _rw2DRecord(self):
"""Read/write 1-D coarse mesh boundaries and fine mesh intervals."""
with self.createRecord() as record:
self._data.xmesh = record.rwList(self._data.xmesh, "double", self._metadata["NCINTI"] + 1)
self._data.iintervals = record.rwList(self._data.iintervals, "int", self._metadata["NCINTI"])
def _rw3DRecord(self):
"""Read/write 2-D coarse mesh boundaries and fine mesh intervals."""
with self.createRecord() as record:
self._data.xmesh = record.rwList(self._data.xmesh, "double", self._metadata["NCINTI"] + 1)
self._data.ymesh = record.rwList(self._data.ymesh, "double", self._metadata["NCINTJ"] + 1)
self._data.iintervals = record.rwList(self._data.iintervals, "int", self._metadata["NCINTI"])
self._data.jintervals = record.rwList(self._data.jintervals, "int", self._metadata["NCINTJ"])
def _rw4DRecord(self):
"""Read/write 3-D coarse mesh boundaries and fine mesh intervals."""
with self.createRecord() as record:
self._data.xmesh = record.rwList(self._data.xmesh, "double", self._metadata["NCINTI"] + 1)
self._data.ymesh = record.rwList(self._data.ymesh, "double", self._metadata["NCINTJ"] + 1)
self._data.zmesh = record.rwList(self._data.zmesh, "double", self._metadata["NCINTK"] + 1)
self._data.iintervals = record.rwList(self._data.iintervals, "int", self._metadata["NCINTI"])
self._data.jintervals = record.rwList(self._data.jintervals, "int", self._metadata["NCINTJ"])
self._data.kintervals = record.rwList(self._data.kintervals, "int", self._metadata["NCINTK"])
def _rw5DRecord(self):
"""Read/write Geometry data from 5D record."""
with self.createRecord() as record:
self._data.regionVolumes = record.rwList(self._data.regionVolumes, "float", self._metadata["NREG"])
self._data.bucklings = record.rwList(self._data.bucklings, "float", self._metadata["NBS"])
self._data.boundaryConstants = record.rwList(self._data.boundaryConstants, "float", self._metadata["NBCS"])
self._data.internalBlackBoundaryConstants = record.rwList(
self._data.internalBlackBoundaryConstants,
"float",
self._metadata["NIBCS"],
)
self._data.zonesWithBlackAbs = record.rwList(self._data.zonesWithBlackAbs, "int", self._metadata["NZWBB"])
self._data.zoneClassifications = record.rwList(
self._data.zoneClassifications, "int", self._metadata["NZONE"]
)
self._data.regionZoneNumber = record.rwList(self._data.regionZoneNumber, "int", self._metadata["NREG"])
def _rw6DRecord(self):
"""Read/write region assignments to coarse mesh interval."""
if self._data.coarseMeshRegions is None:
# initialize all-zeros here before reading now that we
# have the matrix dimension metadata available.
self._data.coarseMeshRegions = np.zeros(
(
self._metadata["NCINTI"],
self._metadata["NCINTJ"],
self._metadata["NCINTK"],
),
dtype=np.int32,
)
for ki in range(self._metadata["NCINTK"]):
with self.createRecord() as record:
self._data.coarseMeshRegions[:, :, ki] = record.rwIntMatrix(
self._data.coarseMeshRegions[:, :, ki],
self._metadata["NCINTJ"],
self._metadata["NCINTI"],
)
def _rw7DRecord(self):
"""Read/write region assignments to fine mesh interval."""
if self._data.fineMeshRegions is None:
# initialize all-zeros here before reading now that we
# have the matrix dimension metadata available.
self._data.fineMeshRegions = np.zeros(
(
self._metadata["NINTI"],
self._metadata["NINTJ"],
self._metadata["NINTK"],
),
dtype=np.int16,
)
for ki in range(self._metadata["NINTK"]):
with self.createRecord() as record:
self._data.fineMeshRegions[:, :, ki] = record.rwIntMatrix(
self._data.fineMeshRegions[:, :, ki],
self._metadata["NINTJ"],
self._metadata["NINTI"],
)
readBinary = GeodstStream.readBinary
readAscii = GeodstStream.readAscii
writeBinary = GeodstStream.writeBinary
writeAscii = GeodstStream.writeAscii
================================================
FILE: armi/nuclearDataIO/cccc/isotxs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module reads and writes ISOTXS files.
ISOTXS is a binary file that contains multigroup microscopic cross sections.
ISOTXS stands for *Isotope Cross Sections*.
ISOTXS files are often created by a lattice physics code such as MC2 or DRAGON and
used as input to a global flux solver such as DIF3D.
This module implements reading and writing of the
ISOTXS file format, consistent with [CCCC-IV]_.
Examples
--------
>>> from armi.nuclearDataIO.cccc import isotxs
>>> myLib = isotxs.readBinary("ISOTXS-ref")
>>> nuc = myLib.getNuclide("U235", "AA")
>>> fis5 = nuc.micros.fission[5]
>>> scat = nuc.micros.scatter[(0, 5, 6, 1)] # 1st order elastic scatter from group 5->6
>>> nuc.micros.fission[7] = fis5 * 1.01 # you can modify the isotxs too.
>>> captureEnergy = nuc.isotxsMetadata["ecapt"]
>>> isotxs.writeBinary(myLib, "ISOTXS-modified")
"""
import itertools
import traceback
import numpy as np
from scipy import sparse
from armi import runLog
from armi.nuclearDataIO import cccc, xsLibraries, xsNuclides
from armi.utils import properties
# scattering block definitions from ISOTXS
# The definition is:
TOTAL_SCATTER = 0 # 000 + NN = total scattering for Legendre Order NN
ELASTIC_SCATTER = 100 # 100 + NN, ELASTIC SCATTERING
INELASTIC_SCATTER = 200 # 200 + NN, INELASTIC SCATTERING
N2N_SCATTER = 300 # 300 + NN, (N,2N) SCATTERING
def compareSet(fileNames, tolerance=0.0, verbose=False):
"""
Takes a list of strings and reads all binaries with that name comparing them in all combinations.
Notes
-----
useful for finding mcc bugs when you want to compare a series of very similar isotxs outputs
Verbose gets VERY long
"""
comparisons = []
xsLibs = [readBinary(fileName) for fileName in fileNames]
for thisXSLib, thatXSLib in itertools.combinations(xsLibs, 2):
# all unique combinations with 2 items
runLog.info("\n*****\n*****comparing {} and {}\n*****".format(thisXSLib, thatXSLib))
comparisons.append((compare(thisXSLib, thatXSLib, tolerance, verbose), thisXSLib, thatXSLib))
sameFileNames = "\n"
for comparison in comparisons:
if comparison[0]:
sameFileNames += "\t{} and {}\n".format(comparison[1], comparison[2])
sameFileNames = sameFileNames + "None were the same" if sameFileNames == "\n" else sameFileNames
runLog.info("the following libraries are the same within the specified tolerance:{}".format(sameFileNames))
def compare(lib1, lib2, tolerance=0.0, verbose=False):
"""
Compare two XSLibraries, and return True if equal, or False if not.
Notes
-----
Tolerance allows the user to ignore small changes that may be caused by
small library differences or floating point calculations
the closer to zero the more differences will be shown
10**-5 is a good tolerance to use if not using default.
Verbose shows the XS matrixes that are not equal
"""
equal = True
# first check the lib properties (also need to unlock to prevent from getting an exception).
equal &= xsLibraries.compareLibraryNeutronEnergies(lib1, lib2, tolerance)
# compare the meta data
equal &= lib1.isotxsMetadata.compare(lib2.isotxsMetadata, lib1, lib2)
# check the nuclides
for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels):
nuc1 = lib1.get(nucName, None)
nuc2 = lib2.get(nucName, None)
if nuc1 is None or nuc2 is None:
warning = "Nuclide {:>20} in library {} is not present in library {} and cannot be compared"
if nuc1:
runLog.warning(warning.format(nuc1, 1, 2))
if nuc2:
runLog.warning(warning.format(nuc2, 2, 1))
equal = False
continue
nucEqual = compareNuclideXS(nuc1, nuc2, tolerance, verbose, nucName)
equal &= nucEqual
return equal
def compareNuclideXS(nuc1, nuc2, tolerance=0.0, verbose=False, nucName=""):
equal = nuc1.isotxsMetadata.compare(nuc2.isotxsMetadata, nuc1, nuc2)
equal &= nuc1.micros.compare(nuc2.micros, [], tolerance, verbose, nucName=nucName)
return equal
def addDummyNuclidesToLibrary(lib, dummyNuclides):
"""
This method adds DUMMY nuclides to the current ISOTXS library.
Parameters
----------
lib : obj
ISOTXS library object
dummyNuclides: list
List of DUMMY nuclide objects that will be copied and added to the GAMISO file
Notes
-----
Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to provide a
consistent set of nuclide-level data across all the nuclides in a
:py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
"""
if not dummyNuclides:
runLog.important("No dummy nuclide data provided to be added to {}".format(lib))
return False
elif len(lib.xsIDs) > 1:
runLog.warning(
"Cannot add dummy nuclide data to ISOTXS library {} containing data for more than 1 XS ID.".format(lib)
)
return False
dummyNuclideKeysAddedToLibrary = []
for dummyNuclide in dummyNuclides:
dummyKey = dummyNuclide.nucLabel
if len(lib.xsIDs):
dummyKey += lib.xsIDs[0]
if dummyKey in lib:
continue
newDummy = xsNuclides.XSNuclide(lib, dummyKey)
newDummy.micros = dummyNuclide.micros
# Copy isotxs metadata from the isotxs metadata of the given dummy nuclide
for kk, vv in dummyNuclide.isotxsMetadata.items():
if kk in ["jj", "jband"]:
newDummy.isotxsMetadata[kk] = {}
for mm in vv:
newDummy.isotxsMetadata[kk][mm] = 1
else:
newDummy.isotxsMetadata[kk] = vv
lib[dummyKey] = newDummy
dummyNuclideKeysAddedToLibrary.append(dummyKey)
return any(dummyNuclideKeysAddedToLibrary)
class IsotxsIO(cccc.Stream):
"""
A semi-abstract stream for reading and writing to a :py:class:`~armi.nuclearDataIO.isotxs.Isotxs`.
Notes
-----
This is a bit of a special case compared to most other CCCC files because of the special
nuclide-level container in addition to the XSLibrary container.
The :py:meth:`~armi.nuclearDataIO.isotxs.IsotxsIO.readWrite` defines the ISOTXS file structure as
specified in http://t2.lanl.gov/codes/transx-hyper/isotxs.html.
"""
_FILE_LABEL = "ISOTXS"
def __init__(self, fileName, lib, fileMode, getNuclideFunc):
cccc.Stream.__init__(self, fileName, fileMode)
self._lib = lib
self._metadata = self._getFileMetadata()
self._metadata.fileNames.append(fileName)
self._getNuclide = getNuclideFunc
def _getFileMetadata(self):
return self._lib.isotxsMetadata
def _getNuclideIO(self):
return _IsotxsNuclideIO
@classmethod
def _read(cls, fileName, fileMode):
lib = xsLibraries.IsotxsLibrary()
return cls._readWrite(
lib,
fileName,
fileMode,
lambda containerKey: xsNuclides.XSNuclide(lib, containerKey),
)
@classmethod
def _write(cls, lib, fileName, fileMode):
return cls._readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey])
@classmethod
def _readWrite(cls, lib, fileName, fileMode, getNuclideFunc):
with cls(fileName, lib, fileMode, getNuclideFunc) as rw:
rw.readWrite()
return lib
def _rwMessage(self):
runLog.debug("{} ISOTXS data {}".format("Reading" if "r" in self._fileMode else "Writing", self))
def _updateFileLabel(self):
"""
Update the file label when reading in the ISOTXS-like file if it differs from its expected value.
Notes
-----
This occurs when MC2-3 is preparing GAMISO files.
The merging of ISOTXS-like files fail if the labels are not unique (i.e. merging ISOTXS into GAMISO with
each file having a file label of `ISOTXS`.
"""
if self._metadata["label"] != self._FILE_LABEL:
runLog.debug(
"File label in {} is not the expected type. Updating the label from {} to {}".format(
self, self._metadata["label"], self._FILE_LABEL
)
)
self._metadata["label"] = self._FILE_LABEL
def readWrite(self):
"""Read and write ISOTSX file.
.. impl:: Tool to read and write ISOTXS files.
:id: I_ARMI_NUCDATA_ISOTXS
:implements: R_ARMI_NUCDATA_ISOTXS
Reading and writing ISOTXS files is performed using the general
nuclear data I/O functionalities described in
:need:`I_ARMI_NUCDATA`. Reading/writing a ISOTXS file is performed
through the following steps:
#. Read/write file ID record
#. Read/write file 1D record, which includes:
* Number of energy groups (``NGROUP``)
* Maximum number of up-scatter groups (``MAXUP``)
* Maximum number of down-scatter groups (``MAXDN``)
* Maximum scattering order (``MAXORD``)
* File-wide specification on fission spectrum type, i.e. vector
or matrix (``ICHIST``)
* Maximum number of blocks of scattering data (``MSCMAX``)
* Subblocking control for scatter matrices (``NSBLOK``)
#. Read/write file 2D record, which includes:
* Library IDs for each isotope (``HSETID(I)``)
* Isotope names (``HISONM(I)``)
* Global fission spectrum (``CHI(J)``) if file-wide spectrum is
specified (``ICHIST`` = 1)
* Energy group structure (``EMAX(J)`` and ``EMIN``)
* Locations of each nuclide record in the file (``LOCA(I)``)
.. note::
The offset data is not read from the binary file because
the ISOTXS reader can dynamically calculate the offset
itself. Therefore, during a read operation, this data is
ignored.
#. Read/write file 4D record for each nuclide, which includes
isotope-dependent, group-independent data.
#. Read/write file 5D record for each nuclide, which includes
principal cross sections.
#. Read/write file 6D record for each nuclide, which includes
fission spectrum if it is flagged as a matrix (``ICHI`` > 1).
#. Read/write file 7D record for each nuclide, which includes the
scattering matrices.
"""
self._rwMessage()
properties.unlockImmutableProperties(self._lib)
try:
self._fileID()
numNucs = self._rw1DRecord(len(self._lib))
nucNames = self._rw2DRecord(numNucs, self._lib.nuclideLabels)
if self._metadata["fileWideChiFlag"] > 1:
self._rw3DRecord()
for nucLabel in nucNames:
# read nuclide name, other global stuff from the ISOTXS library
nuc = self._getNuclide(nucLabel)
if "r" in self._fileMode:
# on add nuclides when reading
self._lib[nucLabel] = nuc
nuclideIO = self._getNuclideIO()(nuc, self, self._lib)
nuclideIO.rwNuclide()
except Exception:
raise OSError("Failed to read/write {} \n\n\n{}".format(self, traceback.format_exc()))
finally:
properties.lockImmutableProperties(self._lib)
def _fileID(self):
with self.createRecord() as record:
self._metadata["label"] = record.rwString(self._metadata["label"], 24)
self._metadata["fileId"] = record.rwInt(self._metadata["fileId"])
self._updateFileLabel()
def _rw1DRecord(self, numNucs):
with self.createRecord() as record:
self._metadata["numGroups"] = record.rwInt(self._metadata["numGroups"])
numNucs = record.rwInt(numNucs)
self._metadata["maxUpScatterGroups"] = record.rwInt(self._metadata["maxUpScatterGroups"])
self._metadata["maxDownScatterGroups"] = record.rwInt(self._metadata["maxDownScatterGroups"])
self._metadata["maxScatteringOrder"] = record.rwInt(self._metadata["maxScatteringOrder"])
self._metadata["fileWideChiFlag"] = record.rwInt(self._metadata["fileWideChiFlag"])
self._metadata["maxScatteringBlocks"] = record.rwInt(self._metadata["maxScatteringBlocks"])
self._metadata["subblockingControl"] = record.rwInt(self._metadata["subblockingControl"])
return numNucs
def _rw2DRecord(self, numNucs, nucNames):
"""
Read 2D ISOTXS record.
Notes
-----
Contains isotope names, global chi distribution, energy group structure, and locations of
each nuclide record in the file
"""
with self.createRecord() as record:
# skip "merger test..." string
self._metadata["libraryLabel"] = record.rwString(self._metadata["libraryLabel"], 12 * 8)
nucNames = record.rwList(nucNames, "string", numNucs, 8)
if self._metadata["fileWideChiFlag"] == 1:
# file-wide chi distribution vector listed here.
self._metadata["chi"] = record.rwMatrix(self._metadata["chi"], self._metadata["numGroups"])
self._rwLibraryEnergies(record)
self._metadata["minimumNeutronEnergy"] = record.rwFloat(self._metadata["minimumNeutronEnergy"])
record.rwList(self._computeNuclideRecordOffset(), "int", numNucs)
return nucNames
def _rwLibraryEnergies(self, record):
# neutron velocity (cm/s)
self._lib.neutronVelocity = record.rwMatrix(self._lib.neutronVelocity, self._metadata["numGroups"])
# read emax for each group in descending eV.
self._lib.neutronEnergyUpperBounds = record.rwMatrix(
self._lib.neutronEnergyUpperBounds, self._metadata["numGroups"]
)
def _rw3DRecord(self):
"""Read file-wide chi-distribution matrix."""
raise NotImplementedError
def _computeNuclideRecordOffset(self):
"""
Compute the record offset of each nuclide.
Notes
-----
The offset data is not read from the binary file because the ISOTXS
reader can dynamically calculate the offset itself. Therefore, during a
read operation, this data is ignored.
"""
recordsPerNuclide = [self._computeNumIsotxsRecords(nuc) for nuc in self._lib.nuclides]
return [sum(recordsPerNuclide[0:ii]) for ii in range(len(self._lib))]
def _computeNumIsotxsRecords(self, nuclide):
"""Compute the number of ISOTXS records for a specific nuclide."""
numRecords = 2
metadata = self._getNuclideIO()(nuclide, self, self._lib)._getNuclideMetadata()
if metadata["chiFlag"] > 1:
numRecords += 1
numRecords += sum(1 for _ord in metadata["ords"] if _ord > 0)
return numRecords
readBinary = IsotxsIO.readBinary
readAscii = IsotxsIO.readAscii
writeBinary = IsotxsIO.writeBinary
writeAscii = IsotxsIO.writeAscii
class _IsotxsNuclideIO:
"""
A reader/writer class for ISOTXS nuclides.
Notes
-----
This is to be used in conjunction with an IsotxsIO object.
"""
def __init__(self, nuclide, isotxsIO, lib):
self._nuclide = nuclide
self._metadata = self._getNuclideMetadata()
self._isotxsIO = isotxsIO
self._lib = lib
self._fileWideChiFlag = self._getFileMetadata()["fileWideChiFlag"]
self._fileWideChi = self._getFileMetadata()["chi"]
self._numGroups = self._getFileMetadata()["numGroups"]
self._maxScatteringBlocks = self._getFileMetadata()["maxScatteringBlocks"]
self._subblockingControl = self._getFileMetadata()["subblockingControl"]
def _getFileMetadata(self):
return self._lib.isotxsMetadata
def _getNuclideMetadata(self):
return self._nuclide.isotxsMetadata
def _getMicros(self):
return self._nuclide.micros
def rwNuclide(self):
"""Read nuclide name, other global stuff from the ISOTXS library."""
properties.unlockImmutableProperties(self._nuclide)
try:
self._rw4DRecord()
self._nuclide.updateBaseNuclide()
self._rw5DRecord()
if self._metadata["chiFlag"] > 1:
self._rw6DRecord()
# get scatter matrix
for blockNumIndex in range(self._maxScatteringBlocks):
for subBlock in range(self._subblockingControl):
if self._metadata["ords"][blockNumIndex] > 0:
# ords flag == 1 implies this scatter type of scattering exists on this nuclide.
self._rw7DRecord(blockNumIndex, subBlock)
finally:
properties.lockImmutableProperties(self._nuclide)
def _rw4DRecord(self):
"""
Read 4D ISOTXS record.
Notes
-----
Read the following individual nuclide XS record. Load data into nuc.
This record contains non-mg data like atomic mass, temperature, and some flags.
"""
with self._isotxsIO.createRecord() as nucRecord:
# read string data
for datum in ["nuclideId", "libName", "isoIdent"]:
self._metadata[datum] = nucRecord.rwString(self._metadata[datum], 8)
# read float data
for datum in ["amass", "efiss", "ecapt", "temp", "sigPot", "adens"]:
self._metadata[datum] = nucRecord.rwFloat(self._metadata[datum])
# read integer data
for datum in [
"classif",
"chiFlag",
"fisFlag",
"nalph",
"np",
"n2n",
"nd",
"nt",
"ltot",
"ltrn",
"strpd",
]:
self._metadata[datum] = nucRecord.rwInt(self._metadata[datum])
# defines what kind of scattering block each block is; total, inelastic, elastic, n2n
self._metadata["scatFlag"] = nucRecord.rwList(self._metadata["scatFlag"], "int", self._maxScatteringBlocks)
# number of scattering orders in this block. if 0, this block isn't present.
self._metadata["ords"] = nucRecord.rwList(self._metadata["ords"], "int", self._maxScatteringBlocks)
# bandwidth of this block: number of groups that scatter into this group, including this one.
jband = self._metadata["jband"] or {}
for n in range(self._maxScatteringBlocks):
for j in range(self._numGroups):
jband[j, n] = nucRecord.rwInt(jband.get((j, n), None))
self._metadata["jband"] = jband
# position of in-group scattering for scattering data in group j
jj = self._metadata["jj"] or {}
# Some mcc**2 cases seem to just have a bunch of 1's listed here.
# does this mean we never have upscatter? possibly.
for n in range(self._maxScatteringBlocks):
for j in range(self._numGroups):
jj[j, n] = nucRecord.rwInt(jj.get((j, n), None))
self._metadata["jj"] = jj
def _rw5DRecord(self):
"""Read principal microscopic MG XS data for a nuclide."""
with self._isotxsIO.createRecord() as record:
micros = self._getMicros()
nuc = self._nuclide
numGroups = self._numGroups
micros.transport = record.rwMatrix(micros.transport, self._metadata["ltrn"], numGroups)
micros.total = record.rwMatrix(micros.total, self._metadata["ltot"], numGroups)
micros.nGamma = record.rwMatrix(micros.nGamma, numGroups)
if self._metadata["fisFlag"] > 0:
micros.fission = record.rwMatrix(micros.fission, numGroups)
micros.neutronsPerFission = record.rwMatrix(micros.neutronsPerFission, numGroups)
else:
micros.fission = micros.getDefaultXs(numGroups)
micros.neutronsPerFission = micros.getDefaultXs(numGroups)
if self._metadata["chiFlag"] == 1:
micros.chi = record.rwMatrix(micros.chi, numGroups)
elif self._metadata["fisFlag"] > 0:
if self._fileWideChiFlag != 1:
raise OSError("Fissile nuclide {} in library but no individual or global chi!".format(nuc))
micros.chi = self._fileWideChi
else:
micros.chi = micros.getDefaultXs(numGroups)
# read some other important XS, if they exist
for xstype in ["nalph", "np", "n2n", "nd", "nt"]:
if self._metadata[xstype]:
micros.__dict__[xstype] = record.rwMatrix(micros.__dict__[xstype], numGroups)
else:
micros.__dict__[xstype] = micros.getDefaultXs(numGroups)
# coordinate direction transport cross section (for various coordinate directions)
if self._metadata["strpd"] > 0:
micros.strpd = record.rwMatrix(micros.strpd, self._metadata["strpd"], numGroups)
else:
micros.strpd = micros.getDefaultXs(numGroups)
def _rw6DRecord(self):
"""Reads nuclide-level chi dist."""
raise NotImplementedError
def _rw7DRecord(self, blockNumIndex, subBlock):
"""
Read scatter matrix.
Parameters
----------
blockNumIndex : int
Index of the scattering block (aka type of scattering) in this nuclide
subBlock : int
Index-tracking integer. Since neutrons don't scatter to and from all energies,
there is a bandwidth defined to save on storage.
Notes
-----
The data is stored as a giant array, and read in as a CSR matrix. The below matrix is
lower triangular, where periods are non-zero.
. 0 0 0 0 0
. . 0 0 0 0
. . . 0 0 0
. . . . 0 0
. . . . . 0
. . . . . .
The data is read in rows starting at the top and going to the bottom.
Per row, there are JBAND non-zero entries. Per row, there are JJ non-zero entries on or
beyond the diagonal.
. 0 0 0 0 0
- - - - - -
- - - - - -
- - - - - -
- - - - - -
- - - - - -
Additionally, the data is reversed for whatever reason. So, let's say we are reading the
third row in our ficitious matrix. JBAND is 2, JJ is 1. We will read "1" first, and then
"2" from the ISOTXS. Since they are backwards, we need to reverse the numbers before
putting them into the matrix.
. 0 0 0 0 0
. . - - - -
. 2 1 - - -
- - - - - -
- - - - - -
- - - - - -
However, since we are reading a CSR, we can just add the indices in reverse (this is fast)
and read the data in as is (which is a bit slower). Then we will allow the CSR matrix to
fix the order later on, if necessary.
"""
scatter = self._getScatterMatrix(blockNumIndex)
if scatter is not None:
scatter = scatter.toarray()
with self._isotxsIO.createRecord() as record:
ng = self._numGroups
nsblok = self._subblockingControl
m = subBlock + 1 # fix starting at zero problem and use same indices as CCCC specification
# be careful with starting indices at 0 here!!
lordn = self._metadata["ords"][blockNumIndex]
# this is basically how many scattering cross sections there are for this scatter type for this nuclide
jl = (m - 1) * ((ng - 1) // nsblok + 1) + 1
jup = m * ((ng - 1) // nsblok + 1)
ju = min(ng, jup)
metadata = self._metadata
indptr = [0]
indices = []
dataVals = []
for _scatterLoopOrder in range(lordn):
for g in range(jl - 1, ju):
jup = g + metadata["jj"][g, blockNumIndex]
bandWidth = metadata["jband"][g, blockNumIndex]
jdown = jup - bandWidth
if scatter is None:
indptr.append(len(indices) + bandWidth)
# add the indices in reverse
indices.extend(range(jup - 1, jdown - 1, -1))
# read the data as-is
for _ in range(bandWidth):
dataVals.append(record.rwFloat(0.0))
else:
for xs in reversed(scatter[g, jdown:jup].tolist()):
record.rwFloat(xs)
if scatter is None:
# we're reading.
scatter = sparse.csr_matrix((np.array(dataVals), indices, indptr), shape=(ng, ng))
scatter.eliminate_zeros()
self._setScatterMatrix(blockNumIndex, scatter)
def _getScatterBlockNum(self, scatterType):
"""
Determine which scattering block is elastic scattering.
This information is stored in the scatFlab libparam and is
possibly different for each nuclide (e.g. C, B-10, etc.)
Parameters
----------
scatterType : int
ISOTXS-defined special int flag for a scatter type (100 for elastic, etc.)
Returns
-------
blockNum : int
A index of the scatter matrix.
"""
try:
return np.where(self._metadata["scatFlag"] == scatterType)[0][0]
except IndexError:
return None
def _getElasticScatterBlockNumIndex(self, legendreOrder=0):
return self._getScatterBlockNum(ELASTIC_SCATTER + legendreOrder)
def _getInelasticScatterBlockNumIndex(self):
return self._getScatterBlockNum(INELASTIC_SCATTER)
def _getN2nScatterBlockNumIndex(self):
return self._getScatterBlockNum(N2N_SCATTER)
def _getTotalScatterBlockNumIndex(self):
return self._getScatterBlockNum(TOTAL_SCATTER)
def _setScatterMatrix(self, blockNumIndex, scatterMatrix):
"""
Sets scatter matrix data to the proper ``scatterMatrix`` for this ``blockNum``.
blockNumIndex : int
Index of a scattering block.
"""
if blockNumIndex == self._getElasticScatterBlockNumIndex():
self._getMicros().elasticScatter = scatterMatrix
elif blockNumIndex == self._getInelasticScatterBlockNumIndex():
self._getMicros().inelasticScatter = scatterMatrix
elif blockNumIndex == self._getN2nScatterBlockNumIndex():
self._getMicros().n2nScatter = scatterMatrix
elif blockNumIndex == self._getTotalScatterBlockNumIndex():
self._getMicros().totalScatter = scatterMatrix
elif blockNumIndex == self._getElasticScatterBlockNumIndex(1):
self._getMicros().elasticScatter1stOrder = scatterMatrix
else:
self._getMicros().higherOrderScatter[blockNumIndex] = scatterMatrix
def _getScatterMatrix(self, blockNumIndex):
"""
Get the scatter matrix for a particular blockNum.
Notes
-----
This logic could be combined with _setScatterMatrix.
"""
if blockNumIndex == self._getElasticScatterBlockNumIndex():
scatterMatrix = self._getMicros().elasticScatter
elif blockNumIndex == self._getInelasticScatterBlockNumIndex():
scatterMatrix = self._getMicros().inelasticScatter
elif blockNumIndex == self._getN2nScatterBlockNumIndex():
scatterMatrix = self._getMicros().n2nScatter
elif blockNumIndex == self._getTotalScatterBlockNumIndex():
scatterMatrix = self._getMicros().totalScatter
elif blockNumIndex == self._getElasticScatterBlockNumIndex(1):
scatterMatrix = self._getMicros().elasticScatter1stOrder
else:
scatterMatrix = self._getMicros().higherOrderScatter.get(blockNumIndex, None)
return scatterMatrix
================================================
FILE: armi/nuclearDataIO/cccc/labels.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reads and writes region and composition label data from a LABELS interface file.
LABELS files are produced by DIF3D/VARIANT. They are very similar in structure
and format to CCCC files but are not officially in the CCCC documents.
The file structure is listed here::
RECORD TYPE PRESENT IF
=============================== ================
FILE IDENTIFICATION ALWAYS
SPECIFICATIONS ALWAYS
LABEL AND AREA DATA ALWAYS
FINITE-GEOMETRY TRANSVERSE NHTS1.GT.0 OR
DISTANCES NGTS2.GT.0
NUCLIDE SET LABELS NSETS.GT.1
ALIAS ZONE LABELS NALIAS.GT.0
GENERAL CONTROL-ROD MODEL DATA NBANKS.GT.0
***********(REPEAT FOR ALL BANKS)
* CONTROL-ROD BANK DATA NBANKS.GT.0
*
* *******(REPEAT FOR ALL RODS IN BANK)
* * CONTROL-ROD CHANNEL DATA (LLCHN+LLROD+MMESH).GT.0
**********
BURNUP DEPENDENT CROSS SECTION NVARY.GT.0
SPECIFICATIONS
BURNUP DEPENDENT GROUPS MAXBRN.GT.0
BURNUP DEPENDENT FITTING MAXORD.GT.0
COEFFICIENTS
Reference: [DIF3D]_.
Examples
--------
>>> labelData = LabelStream.readBinary("LABELS")
"""
from armi import runLog
from armi.nuclearDataIO import cccc
LABELS = "LABELS"
FILE_SPEC_1D_KEYS = [
"numZones",
"numRegions",
"numAreas",
"numRegionAreaAssignments",
"numHalfHeightsDirection1",
"numHalfHeightsDirection2",
"numNuclideSets",
"numZoneAliases",
"numTrianglesPerHex",
"numHexagonalRings",
"numControlRodChannels",
"numControlRodBanks",
"numAxialFineMeshBins",
"maxControlRodBankTimes",
"maxControlRodsPerBank",
"maxControlRodsMeshes",
"maxControlRodPieces",
"maxControlRodChannels",
"numBurnupDependentIsotopes",
"maxBurnupDependentGroups",
"maxBurnupPolynomialOrder",
"modelDimensions",
]
class LabelsData(cccc.DataContainer):
"""
Data structure containing various region, zone, area, nuclide labels.
This is the data structure that is read from or written to a LABELS file.
"""
def __init__(self):
cccc.DataContainer.__init__(self)
self.regionLabels = []
self.zoneLabels = []
self.areaLabels = []
self.regionAreaAssignments = []
self.halfHeightsDirection1 = []
self.halfHeightsDirection2 = []
self.extrapolationDistance1 = []
self.extrapolationDistance2 = []
self.nuclideSetLabels = []
self.aliasZoneLabels = []
class LabelsStream(cccc.StreamWithDataContainer):
"""
Class for reading and writing the LABELS interface file produced by DIF3D/VARIANT.
Notes
-----
Contains region and composition labels, area data, half heights, nuclide set labels, alias zone labels,
control-rod model data, and burnup dependent cross section data.
See Also
--------
armi.nuclearDataIO.cccc.compxs
"""
@staticmethod
def _getDataContainer() -> LabelsData:
return LabelsData()
def readWrite(self):
runLog.info("{} LABELS data {}".format("Reading" if "r" in self._fileMode else "Writing", self))
self._rwFileID()
self._rw1DRecord()
self._rw2DRecord()
if self._metadata["numHalfHeightsDirection1"] > 0 or self._metadata["numHalfHeightsDirection2"] > 0:
self._rw3DRecord()
if self._metadata["numNuclideSets"] > 1:
self._rw4DRecord()
if self._metadata["numZoneAliases"] > 0:
self._rw5DRecord()
if self._metadata["numControlRodBanks"] > 0:
self._rw6DRecord()
self._rw7DRecord()
self._rw8DRecord()
if self._metadata["numBurnupDependentIsotopes"] > 0:
self._rw9DRecord()
if self._metadata["maxBurnupDependentGroups"] > 0:
self._rw10DRecord()
if self._metadata["maxBurnupPolynomialOrder"] > 0:
self._rw11DRecord()
def _rwFileID(self):
with self.createRecord() as record:
for name in ["hname", "huse", "huse2"]:
self._metadata[name] = record.rwString(self._metadata[name], 8)
self._metadata["version"] = record.rwInt(self._metadata["version"])
def _rw1DRecord(self):
"""Read/write the file specification data."""
with self.createRecord() as record:
for param in FILE_SPEC_1D_KEYS:
self._metadata[param] = record.rwInt(self._metadata[param])
self._metadata["dummy"] = record.rwList(self._metadata["dummy"], "int", 2)
def _rw2DRecord(self):
"""Read/write the label and area data."""
with self.createRecord() as record:
self._data.zoneLabels = record.rwList(self._data.zoneLabels, "string", self._metadata["numZones"], 8)
self._data.regionLabels = record.rwList(
self._data.regionLabels,
"string",
self._metadata["numRegions"],
8,
)
self._data.areaLabels = record.rwList(self._data.areaLabels, "string", self._metadata["numAreas"], 8)
self._data.regionAreaAssignments = record.rwList(
self._data.regionAreaAssignments,
"string",
self._metadata["numRegionAreaAssignments"],
8,
)
def _rw3DRecord(self):
"""Read/write the finite-geometry transverse distances."""
with self.createRecord() as record:
self._data.halfHeightsDirection1 = record.rwList(
self._data.halfHeightsDirection1,
"float",
self._metadata["numHalfHeightsDirection1"],
)
self._data.extrapolationDistance1 = record.rwList(
self._data.extrapolationDistance1,
"float",
self._metadata["numHalfHeightsDirection1"],
)
self._data.halfHeightsDirection2 = record.rwList(
self._data.halfHeightsDirection2,
"float",
self._metadata["numHalfHeightsDirection2"],
)
self._data.extrapolationDistance2 = record.rwList(
self._data.extrapolationDistance2,
"float",
self._metadata["numHalfHeightsDirection2"],
)
def _rw4DRecord(self):
"""Read/write the nuclide labels."""
with self.createRecord() as record:
self._data.nuclideSetLabels = record.rwList(
self._data.nuclideSetLabels,
"string",
self._metadata["numNuclideSets"],
8,
)
def _rw5DRecord(self):
"""Read/write the zone aliases."""
with self.createRecord() as record:
self._data.aliasZoneLabels = record.rwList(
self._data.aliasZoneLabels,
"string",
self._metadata["numZoneAliases"],
8,
)
def _rw6DRecord(self):
"""Read/write the general control-rod model data."""
raise NotImplementedError("Control rod data not implemented")
def _rw7DRecord(self):
"""Read/write the control-rod bank data."""
raise NotImplementedError("Control rod data not implemented")
def _rw8DRecord(self):
"""Read/write the control-rod channel data."""
raise NotImplementedError("Control rod data not implemented")
def _rw9DRecord(self):
"""Read/write the burnup-dependent cross section specifications."""
raise NotImplementedError("BU dependent XS data not implemented")
def _rw10DRecord(self):
"""Read/write the burnup-dependent group data."""
raise NotImplementedError("BU dependent XS data not implemented")
def _rw11DRecord(self):
"""Read/write the burnup-dependent fitting coefficient data."""
raise NotImplementedError("BU dependent XS data not implemented")
readBinary = LabelsStream.readBinary
readAscii = LabelsStream.readAscii
writeBinary = LabelsStream.writeBinary
writeAscii = LabelsStream.writeAscii
================================================
FILE: armi/nuclearDataIO/cccc/nhflux.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NHFLUX is a CCCC interface file that stores flux moments and partial currents from
DIF3D-Nodal and DIF3D-VARIANT.
Examples
--------
>>> nhfluxData = NfluxStream.readBinary("NHFLUX")
>>> NhfluxStream.writeAscii(nhfluxData, "nhflux.ascii")
"""
import numpy as np
from armi.nuclearDataIO import cccc
FILE_SPEC_1D_KEYS = (
"ndim",
"ngroup",
"ninti",
"nintj",
"nintk",
"iter",
"effk",
"power",
"nSurf",
"nMom",
"nintxy",
"npcxy",
"nscoef",
"itrord",
"iaprx",
"ileak",
"iaprxz",
"ileakz",
"iorder",
)
FILE_SPEC_1D_KEYS_VARIANT11 = (
"npcbdy",
"npcsym",
"npcsec",
"iwnhfl",
"nMoms",
)
class NHFLUX(cccc.DataContainer):
"""
An abstraction of a NHFLUX file. This format is defined in the DIF3D manual. Note that the
format for DIF3D-Nodal and DIF3D-VARIANT are not the same. The VARIANT NHFLUX format has
recently changed, so this reader is only compatible with files produced by v11.0 of the solver.
Attributes
----------
metadata : file control
The NHFLUX file control info (sort of global for this library). This is the contents of the
1D data block on the file.
incomingPointersToAllAssemblies: 2-D list of floats
This is an index map for the "internal surfaces" between DIF3D nodal indexing and DIF3D
GEODST indexing. It can be used to process incoming partial currents. This uses the same
ordering as the geodstCoordMap attribute.
externalCurrentPointers : list of ints
This is an index map for the "external surfaces" between DIF3D nodal indexing and DIF3D
GEODST indexing. "External surfaces" are important because they contain the INCOMING partial
currents from the outer reactor boundary. This uses the same ordering as geodstCoordMap,
except that each assembly now has multiple subsequent indices. For example, for a hexagonal
core, if hex of index n (0 to N-1) has a surface of index k (0 to 5) that lies on the vacuum
boundary, then the index of that surface is N*6 + k + 1.
geodstCoordMap : list of ints
This is an index map between DIF3D nodal and DIF3D GEODST. It is necessary for interpreting
the ordering of flux and partial current data in the NHFLUX file. Note that this mapping
between DIF3D-Nodal and DIF3D-VARIANT is not the same.
outgoingPCSymSeCPointers: list of ints
This is an index map for the outpgoing partial currents on the symmetric and sector lateral
boundary. It is only present for DIF3D-VARIANT for hexagonal cores.
ingoingPCSymSeCPointers: list of ints
This is an index map for the ingoing (or incoming) partial currents on the symmetric and
sector lateral boundary. It is only present for DIF3D-VARIANT for hexagonal cores.
fluxMomentsAll : 4-D list of floats
This contains all the flux moments for all core assemblies. The jth planar flux moment of
assembly i in group g in axial node k is fluxMoments[i][k][j][g]. The assemblies are ordered
according to the geodstCoordMap attribute. For DIF3D-VARIANT, this includes both even and
odd parity moments.
partialCurrentsHexAll : 5-D list of floats
This contains all the OUTGOING partial currents for all core assemblies. The OUTGOING
partial current on surface j in assembly i in axial node k in group g is
partialCurrentsHex[i][k][j][g][m], where m=0. The assemblies are ordered according to the
geodstCoordMap attribute. For DIF3D-VARIANT, higher-order data is available for the m axis.
partialCurrentsHex_extAll : 4-D list of floats
This contains all the INCOMING partial currents on "external surfaces", which are adjacent
to the reactor outer boundary (usually vacuum). Internal reflective surfaces are NOT
included in this! These "external surfaces" are ordered according to
externalCurrentPointers. For DIF3D-VARIANT, higher-order data is available for the last
axis.
partialCurrentsZAll : 5-D list of floats
This contains all the upward and downward partial currents for all core assemblies. The
assemblies are ordered according to the geodstCoordMap attribute. For DIF3D-VARIANT, higher-
order data is available for the last axis.
Warning
-------
DIF3D outputs NHFLUX at every time node, but REBUS outputs NHFLUX only at every cycle.
See Also
--------
[VARIANT-95]_ and [VARIANT-2014]_.
.. [VARIANT-95] G. Palmiotti, E. E. Lewis, and C. B. Carrico, VARIANT: VARIational Anisotropic
Nodal Transport for Multidimensional Cartesian and Hexagonal Geometry Calculation, ANL-95/40,
Argonne National Laboratory, Argonne, IL (October 1995).
.. [VARIANT-2014] Smith, M. A., Lewis, E. E., and Shemon, E. R. DIF3D-VARIANT 11.0: A Decade of
Updates. United States: N. p., 2014. Web. doi:10.2172/1127298.
https://publications.anl.gov/anlpubs/2014/04/78313.pdf
"""
def __init__(self, fName="NHFLUX", variant=False, numDataSetsToRead=1):
"""
Initialize the NHFLUX or NAFLUX reader object.
Parameters
----------
fName : str, optional
Filename of the NHFLUX binary file to be read.
variant : bool, optional
Whether or not this NHFLUX/NAFLUX file has the DIF3D-VARIANT output format, which is
different than the DIF3D-Nodal format.
"""
cccc.DataContainer.__init__(self)
self.metadata["variantFlag"] = variant
self.metadata["numDataSetsToRead"] = numDataSetsToRead
# Initialize instance array variables
self.incomingPointersToAllAssemblies: np.ndarray = np.array([])
self.externalCurrentPointers: np.ndarray = np.array([])
self.geodstCoordMap: np.ndarray = np.array([])
if self.metadata["variantFlag"]:
self.outgoingPCSymSecPointers: np.ndarray = np.array([])
self.ingoingPCSymSecPointers: np.ndarray = np.array([])
self.fluxMomentsAll: np.ndarray = np.array([])
self.partialCurrentsHexAll: np.ndarray = np.array([])
self.partialCurrentsHex_extAll: np.ndarray = np.array([])
self.partialCurrentsZAll: np.ndarray = np.array([])
@property
def fluxMoments(self):
"""
For DIF3D-Nodal, this property is equivalent to the attribute `fluxMomentsAll`. For
DIF3D-VARIANT, this property represents the even-parity flux moments.
Read-only property (there is no setter).
"""
nMom = self.metadata["nMom"]
return self.fluxMomentsAll[..., :nMom, :]
@property
def partialCurrentsHex(self):
"""
For DIF3D-Nodal, this property is almost always equivalent to the attribute
``partialCurrentsHex``. For DIF3D-VARIANT, this property returns the zeroth-order moment of
the outgoing radial currents.
Read-only property (there is no setter).
"""
return self.partialCurrentsHexAll[..., 0]
@property
def partialCurrentsHex_ext(self):
"""
For DIF3D-Nodal, this property is almost always equivalent to the attribute
`partialCurrentsHex_ext`. For DIF3D-VARIANT, this property returns the zeroth-order
moment of the incoming/ingoing radial currents.
Read-only property (there is no setter).
"""
return self.partialCurrentsHex_extAll[..., 0]
@property
def partialCurrentsZ(self):
"""
For DIF3D-Nodal, this property is almost always equivalent to the attribute
`partialCurrentsZ`. For DIF3D-VARIANT, this property returns the zeroth-order
moment of the axial currents.
Read-only property (there is no setter).
"""
return self.partialCurrentsZAll[..., 0]
class NhfluxStream(cccc.StreamWithDataContainer):
@staticmethod
def _getDataContainer() -> NHFLUX:
return NHFLUX()
def readWrite(self):
"""
Read everything from the DIF3D binary file NHFLUX.
Read all surface-averaged partial currents, all planar moments, and the DIF3D nodal
coordinate mapping system.
Notes
-----
This method should be private but conflicts with ``_readWrite`` so we need a
better name.
Parameters
----------
numDataSetsToRead : int, optional
The number of whole-core flux data sets included in this NHFLUX/NAFLUX file that one
wishes to be read. Some NHFLUX/NAFLUX files, such as NAFLUX files written by
SASSYS/DIF3D-K, contain more than one flux data set. Each data set overwrites the
previous one on the NHFLUX class object, which will contain only the
``numDataSetsToRead-th`` data set. The first numDataSetsToRead-1 data sets are
essentially skipped over.
"""
self._rwFileID()
self._rwBasicFileData1D()
# This control info only exists for VARIANT. We can only process entries with 0 or 1.
if self._metadata["variantFlag"] and self._metadata["iwnhfl"] == 2:
msg = (
"This reader can only read VARIANT NHFLUX files where 'iwnhfl'=0 (both "
"fluxes and currents are present) or 'iwnhfl'=1 (only fluxes are present). "
)
raise ValueError(msg)
# Read the hex ordering map between DIF3D nodal and DIF3D GEODST. Also read index
# pointers to incoming partial currents on outer reactor surface (these don't
# belong to any assembly). Incoming partial currents are non-zero due to flux
# extrapolation
self._rwGeodstCoordMap2D()
# Number of energy groups
ng = self._metadata["ngroup"]
# Number of axial nodes (same for each assembly in DIF3D)
nz = self._metadata["nintk"]
# Number of XY partial currents on the boundary. Note that for the same model, this
# number is not the same between Nodal and VARIANT; VARIANT has more.
numPartialCurrentsHex_ext = self._metadata["npcxy"] - self._metadata["nintxy"] * self._metadata["nSurf"]
# Typically, flux and current data has units of n/cm^2/s. However, when reading
# an NHFLUX file produced by VARPOW (where 'iwnhfl'=1), the flux-only data has units
# of W/cc (there is no current data written to the file).
if self._data.fluxMomentsAll.size == 0:
# Initialize using metadata info for reading
totalMoments = (
self._metadata["nMom"]
if not self._metadata["variantFlag"]
else (self._metadata["nMom"] + self._metadata["nMoms"])
)
self._data.fluxMomentsAll = np.zeros((self._metadata["nintxy"], nz, totalMoments, ng))
if self._metadata["iwnhfl"] != 1:
self._data.partialCurrentsHexAll = np.zeros(
(
self._metadata["nintxy"],
nz,
self._metadata["nSurf"],
ng,
self._metadata["nscoef"],
)
)
self._data.partialCurrentsHex_extAll = np.zeros(
(numPartialCurrentsHex_ext, nz, ng, self._metadata["nscoef"])
)
self._data.partialCurrentsZAll = np.zeros(
(self._metadata["nintxy"], nz + 1, 2, ng, self._metadata["nscoef"])
)
for _n in range(self._metadata["numDataSetsToRead"]):
# Each record contains nodal data for ONE energy group in ONE axial core slice.
# Must loop through all energy groups and all axial core slices.
# The axial surface partial currents are indexed by axial surface (NOT by axial node),
# so there are nz+1 records for z-surface currents
# Loop through all energy groups: high-to-low for forward flux, low-to-high for
# adjoint flux
for g in range(ng):
gEff = self._getEnergyGroupIndex(g)
# Loop through axial nodes
for z in range(nz):
# Process flux moments
self._data.fluxMomentsAll[:, z, :, gEff] = self._rwFluxMoments3D(
self._data.fluxMomentsAll[:, z, :, gEff]
)
# Process currents
if self._metadata["iwnhfl"] != 1:
# Loop through axial nodes
for z in range(nz):
(
self._data.partialCurrentsHexAll[:, z, :, gEff, :],
self._data.partialCurrentsHex_extAll[:, z, gEff, :],
) = self._rwHexPartialCurrents4D(
self._data.partialCurrentsHexAll[:, z, :, gEff, :],
self._data.partialCurrentsHex_extAll[:, z, gEff, :],
)
# Loop through axial surfaces (NOT axial nodes, because there is a "+1")
for z in range(nz + 1):
self._data.partialCurrentsZAll[:, z, :, gEff, :] = self._rwZPartialCurrents5D(
self._data.partialCurrentsZAll[:, z, :, gEff, :]
)
def _getNumOuterSurfacesHex(self):
"""
The word "outer" in the method name means along the outside of the core. Thus, this
is the number of lateral hex surfaces on the outer core boundary (usually vacuum...internal
reflective boundaries do NOT count).
"""
# Both Nodal and VARIANT files should return the same number, but they are calculated
# differently between the two codes
if self._metadata["variantFlag"]:
numOuterSurfacesHex = self._metadata["npcbdy"]
else:
# Nodal does not have an "npcbdy" metadata parameter, so numOuterSurfacesHex
# must be calculated differently. Performing the same calculation below in VARIANT,
# which is possible to do, can return a different number, so that is why
# we cannot use the same calculation for both codes.
numOuterSurfacesHex = self._metadata["npcxy"] - self._metadata["nintxy"] * self._metadata["nSurf"]
return numOuterSurfacesHex
def _rwFileID(self):
"""
Read/write file id record.
Notes
-----
The username, version, etc are embedded in this string but it's
usually blank.
"""
with self.createRecord() as record:
self._metadata["label"] = record.rwString(self._metadata["label"], 28)
def _rwBasicFileData1D(self):
"""Read basic data parameters (number of energy groups, assemblies, axial nodes, etc.)."""
# Dummy values are stored because sometimes they get assigned
# unexpected values anyway, and so we still want to preserve those values anyway
if self._metadata["variantFlag"]:
keys = FILE_SPEC_1D_KEYS + FILE_SPEC_1D_KEYS_VARIANT11 + tuple(f"IDUM{e:>02}" for e in range(1, 7))
else:
keys = FILE_SPEC_1D_KEYS + tuple(tuple(f"IDUM{e:>02}" for e in range(1, 12)))
with self.createRecord() as record:
self._metadata.update(record.rwImplicitlyTypedMap(keys, self._metadata))
def _rwGeodstCoordMap2D(self):
"""
Read/write core geometry indexing from the NHFLUX 2D block.
This reads the 2-D (x,y) indexing for assemblies. geodstCoordMap maps DIF3D
nodal hex indexing to DIF3D GEODST indexing.
This DIF3D GEODST indexing is different than (but similar to) the MCNP GEODST ordering.
For Nodal, let N be the number of assemblies. Let M be the number of
"external hex surfaces" exposed to the outer reactor boundary (usually vacuum). M
does NOT include reflective surfaces!
N = self._metadata['nintxy']
M = self._metadata['npcxy'] - self._metadata['nintxy']*6
N*6 + M = self._metadata['npcxy']
For VARIANT in hexagonal geometry, there are two additional datasets for outgoing
and ingoing partial currents on the symmetric and sector xy-plane boundary.
Examples
--------
geodstCoordMap[NodalIndex] = geodstIndex
See Also
--------
nuclearDataIO.NHFLUX.__init__
nuclearDataIO.NHFLUX._rwHexPartialCurrents4D
nuclearDataIO.ISOTXS.read2D
nuclearDataIO.SPECTR.read2D
"""
with self.createRecord() as record:
# Number of unique assemblies - this is N in the comments above
nAssem = self._metadata["nintxy"]
# Number of lateral surfaces per assembly (this is 6 for hexagonal cores)
nSurf = self._metadata["nSurf"]
numExternalSurfaces = self._getNumOuterSurfacesHex()
# Initialize np arrays to store all node ordering (and node surface ordering)
# data. We don't actually use incomingPointersToAllAssemblies (basically
# equivalent to nearest neighbors indices), but it's here in case someone
# needs it in the future.
# Initialize data size when reading
if self._data.incomingPointersToAllAssemblies.size == 0:
# Index pointers to INCOMING partial currents on assemblies
self._data.incomingPointersToAllAssemblies = np.zeros((nSurf, nAssem), dtype=int)
# Index pointers to OUTGOING partial currents on core outer boundary
self._data.externalCurrentPointers = np.zeros((numExternalSurfaces), dtype=int)
# Index pointers to DIF3D GEODST ordering of each assembly
self._data.geodstCoordMap = np.zeros(nAssem, dtype=int)
self._data.incomingPointersToAllAssemblies = record.rwIntMatrix(
self._data.incomingPointersToAllAssemblies, nAssem, nSurf
)
self._data.externalCurrentPointers = record.rwList(
self._data.externalCurrentPointers, "int", numExternalSurfaces
)
self._data.geodstCoordMap = record.rwList(self._data.geodstCoordMap, "int", nAssem)
# There is additional data to process for VARIANT
if self._metadata["variantFlag"]:
# Number of symmetry and sector surface pointers
npcsto = self._metadata["npcsym"] + self._metadata["npcsec"]
if self._data.outgoingPCSymSecPointers.size == 0:
self._data.outgoingPCSymSecPointers = np.zeros(npcsto, dtype=int)
self._data.ingoingPCSymSecPointers = np.zeros(npcsto, dtype=int)
self._data.outgoingPCSymSecPointers = record.rwList(self._data.outgoingPCSymSecPointers, "int", npcsto)
self._data.ingoingPCSymSecPointers = record.rwList(self._data.ingoingPCSymSecPointers, "int", npcsto)
def _rwFluxMoments3D(self, contents):
r"""
Read/write multigroup flux moments from the NHFLUX 3D block.
This reads/writes the planar moments for each DIF3D node on ONE x,y plane. The
planar moments for DIF3D nodes on different x,y planes (different axial slices) are
in a different 3D record, so this method must be repeatedly executed in order to
process them all.
Format is ``((FLUX(I,J),I=1,NMOM),J=1,NINTXY)`` so we must pass in ``NINTXY`` as
the first item in the shape. However, the caller of this method wants the shape
to be (nintxy, nMom) so we actually have to transpose it on the way in/out.
nMom can also be nMoms when reading/writing for VARIANT.
"""
nMom = self._metadata["nMom"]
with self.createRecord() as record:
result = record.rwDoubleMatrix(
contents[:, :nMom].T,
self._metadata["nintxy"],
nMom,
)
contents[:, :nMom] = result.T
# If we have VARIANT data, then we also need to process the odd-parity moments.
if self._metadata["variantFlag"] and self._metadata["nMoms"] > 0:
result = record.rwDoubleMatrix(
contents[:, nMom:].T,
self._metadata["nintxy"],
self._metadata["nMoms"],
)
contents[:, nMom:] = result.T
return contents
def _rwHexPartialCurrents4D(self, surfCurrents, externalSurfCurrents):
r"""
Read/write multigroup lateral partial OUTGOING currents from the NHFLUX 4D block.
This reads all OUTGOING partial currents for all assembly block lateral surfaces
at a fixed axial position. For a hexagonal core, there are 6 surfaces per assembly
axial block. The data for the 2 axial surfaces of each block are in the 5D records.
Each 4D record contains all the surface partial currents on ONE x,y plane. The
surface data on different x,y planes (different axial slices) are in a different
4D record, so this method must be repeatedly executed in order to process them all.
If the reactor contains N assemblies and M exterior surfaces (surfaces adjacent to
vacuum boundary), this record will contain N*6 + M partial currents. The N*6
assembly OUTGOING partial currents are listed first, followed by the M INCOMING
partial currents from the outer reactor edge.
N = self._metadata['nintxy']
M = self._metadata['npcxy'] - self._metadata['nintxy']*6
N*6 + M = self._metadata['npcxy']
Notes
-----
These data are harder to read with rwMatrix, though it could be done if we
discarded the unwanted data at another level if that is much faster.
"""
with self.createRecord() as record:
nAssem = self._metadata["nintxy"]
nSurf = self._metadata["nSurf"]
# This is equal to one for Nodal diffusion theory, but greater than one for
# VARIANT.
nscoef = self._metadata["nscoef"]
numPartialCurrentsHex_ext = self._metadata["npcxy"] - self._metadata["nintxy"] * self._metadata["nSurf"]
# Loop through all lateral surfaces of all assemblies
for i in range(nAssem):
for j in range(nSurf):
for m in range(nscoef):
# OUTGOING partial currents on each lateral surface in each assembly.
# If m > 0, other NSCOEF options (i.e., half-angle integrated
# flux when reading DIF3D-Nodal data, and higher current moments
# when reading DIF3D-VARIANT data) are processed.
surfCurrents[i, j, m] = record.rwDouble(surfCurrents[i, j, m])
for j in range(numPartialCurrentsHex_ext):
for m in range(nscoef):
# INCOMING current at each surface of outer core boundary. If m > 0,
# other NSCOEF options (i.e., half-angle integrated flux when
# reading DIF3D-Nodal data, and higher current moments when reading
# DIF3D-VARIANT data) are processed.
externalSurfCurrents[j, m] = record.rwDouble(externalSurfCurrents[j, m])
return surfCurrents, externalSurfCurrents
def _rwZPartialCurrents5D(self, surfCurrents):
"""
Read/write multigroup axial partial currents from the NHFLUX 5D block.
Most other NHFLUX data is indexed by DIF3D node (each axial core slice in its own record).
HOWEVER, "top" and "bottom" surfaces of each DIF3D node are instead indexed by axial
surface. If there are Z axial nodes, then there are Z+1 axial surfaces. Thus, there
are Z+1 5D records, while there are only Z 3D and Z 4D records.
Each 5D record (each axial surface) contains two partial currents for each assembly position.
The first is the UPWARD partial current, while the second is the DOWNWARD partial current.
Returns
-------
surfCurrents : 3-D list of floats
This contains all the upward and downward partial currents in all assemblies
on ONE whole-core axial slice. The assemblies are ordered according to
self.geodstCoordMap.
See Also
--------
nuclearDataIO.NHFLUX._rwBasicFileData1D
nuclearDataIO.NHFLUX._rwGeodstCoordMap2D
"""
with self.createRecord() as record:
nAssem = self._metadata["nintxy"]
nSurf = 2
nscoef = self._metadata["nscoef"]
# Loop through all (up and down) partial currents on all hexes
# These loops are in a different order than in the 4D record above!!!
# Here we loop through surface FIRST and assemblies SECOND!!!
for j in range(nSurf):
for i in range(nAssem):
for m in range(nscoef):
# Outward partial current. For m > 0, other NSCOEF options
# (i.e., half-angle integrated flux when reading DIF3D-Nodal
# data, and higher current moments when reading DIF3D-VARIANT
# data) are processed.
surfCurrents[i, j, m] = record.rwDouble(surfCurrents[i, j, m])
return surfCurrents
def _getEnergyGroupIndex(self, g):
"""
Real fluxes stored in NHFLUX have "normal" (or "forward") energy groups. Also see the
subclass method NAFLUX.getEnergyGroupIndex().
"""
return g
class NafluxStream(NhfluxStream):
"""
NAFLUX is similar in format to the NHFLUX, but contains adjoint flux.
It has reversed energy group ordering.
"""
def _getEnergyGroupIndex(self, g):
"""Adjoint fluxes stored in NAFLUX have "reversed" (or "backward") energy groups."""
ng = self._metadata["ngroup"]
return ng - g - 1
class NhfluxStreamVariant(NhfluxStream):
"""
Stream for VARIANT version of NHFLUX.
Notes
-----
Can be deleted after have the NHFLUX data container be the public interface.
"""
@staticmethod
def _getDataContainer() -> NHFLUX:
return NHFLUX(variant=True)
class NafluxStreamVariant(NafluxStream):
"""
Stream for VARIANT version of NAFLUX.
Notes
-----
Can be deleted after have the NHFLUX data container be the public interface.
"""
@staticmethod
def _getDataContainer() -> NHFLUX:
return NHFLUX(variant=True)
def getNhfluxReader(adjointFlag, variantFlag):
"""
Returns the appropriate DIF3D nodal flux binary file reader class, either NHFLUX (real) or
NAFLUX (adjoint).
"""
if adjointFlag:
reader = NafluxStreamVariant if variantFlag else NafluxStream
else:
reader = NhfluxStreamVariant if variantFlag else NhfluxStream
return reader
================================================
FILE: armi/nuclearDataIO/cccc/pmatrx.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for reading PMATRX files which contain gamma productions from fission reactions.
See [GAMSOR]_ and [MC23]_.
.. [MC23] Lee, Changho, Jung, Yeon Sang, and Yang, Won Sik. MC2-3: Multigroup Cross Section
Generation Code for Fast Reactor Analysis Nuclear. United States: N. p., 2018. Web.
doi:10.2172/1483949. (`OSTI
`__)
"""
import traceback
from armi import runLog
from armi.nuclearDataIO import cccc, xsLibraries, xsNuclides
from armi.utils import properties
def compare(lib1, lib2):
"""Compare two XSLibraries, and return True if equal, or False if not."""
equal = True
# first check the lib properties (also need to unlock to prevent from getting an exception).
equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "neutronEnergyUpperBounds")
equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "gammaEnergyUpperBounds")
equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "neutronDoseConversionFactors")
equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, "gammaDoseConversionFactors")
# compare the meta data
equal &= lib1.pmatrxMetadata.compare(lib2.pmatrxMetadata, lib1, lib2)
# check the nuclides
for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels):
nuc1 = lib1.get(nucName, None)
nuc2 = lib2.get(nucName, None)
if nuc1 is None or nuc2 is None:
continue
equal &= compareNuclideXS(nuc1, nuc2)
return equal
def compareNuclideXS(nuc1, nuc2):
equal = nuc1.pmatrxMetadata.compare(nuc2.pmatrxMetadata, nuc1.container, nuc2.container)
for attrName in [
"neutronHeating",
"neutronDamage",
"gammaHeating",
"isotropicProduction",
"linearAnisotropicProduction",
"nOrderProductionMatrix",
]:
val1 = getattr(nuc1, attrName)
val2 = getattr(nuc2, attrName)
if not properties.numpyHackForEqual(val1, val2):
runLog.important(
"{} and {} have different `{}` attributes:\n{}\n{}".format(nuc1, nuc2, attrName, val1, val2)
)
equal &= False
return equal
def addDummyNuclidesToLibrary(lib, dummyNuclides):
"""
This method adds DUMMY nuclides to the current PMATRX library.
Parameters
----------
lib : obj
PMATRX library object
dummyNuclides: list
List of DUMMY nuclide objects that will be copied and added to the PMATRX file
Notes
-----
Since MC2-3 does not write DUMMY nuclide information for PMATRX files, this is necessary to provide a
consistent set of nuclide-level data across all the nuclides in a
:py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
"""
if not dummyNuclides:
runLog.important("No dummy nuclide data provided to be added to {}".format(lib))
return False
if len(lib.xsIDs) > 1:
runLog.warning(
"Cannot add dummy nuclide data to PMATRX library {} containing data for more than 1 XS ID.".format(lib)
)
return False
dummyNuclideKeysAddedToLibrary = []
for dummy in dummyNuclides:
dummyKey = dummy.nucLabel + lib.xsIDs[0]
if dummyKey in lib:
continue
runLog.debug("Adding {} nuclide data to {}".format(dummyKey, lib))
newDummy = xsNuclides.XSNuclide(lib, dummyKey)
newDummy.pmatrxMetadata["hasNeutronHeatingAndDamage"] = False
newDummy.pmatrxMetadata["maxScatteringOrder"] = 0
newDummy.pmatrxMetadata["hasGammaHeating"] = False
newDummy.pmatrxMetadata["numberNeutronXS"] = 0
newDummy.pmatrxMetadata["collapsingRegionNumber"] = 0
lib[dummyKey] = newDummy
dummyNuclideKeysAddedToLibrary.append(dummyKey)
return any(dummyNuclideKeysAddedToLibrary)
def readBinary(fileName):
"""Read a binary PMATRX file into an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object."""
return _read(fileName, "rb")
def readAscii(fileName):
"""Read an ASCII PMATRX file into an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object."""
return _read(fileName, "r")
def _read(fileName, fileMode):
lib = xsLibraries.IsotxsLibrary()
return _readWrite(
lib,
fileName,
fileMode,
lambda containerKey: xsNuclides.XSNuclide(lib, containerKey),
)
def writeBinary(lib, fileName):
"""Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary`
object to a binary file.
"""
return _write(lib, fileName, "wb")
def writeAscii(lib, fileName):
"""Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary`
object to an ASCII file.
"""
return _write(lib, fileName, "w")
def _write(lib, fileName, fileMode):
return _readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey])
def _readWrite(lib, fileName, fileMode, getNuclideFunc):
with PmatrxIO(fileName, lib, fileMode, getNuclideFunc) as rw:
rw.readWrite()
return lib
class PmatrxIO(cccc.Stream):
def __init__(self, fileName, xsLib, fileMode, getNuclideFunc):
cccc.Stream.__init__(self, fileName, fileMode)
self._lib = xsLib
self._metadata = xsLib.pmatrxMetadata
self._metadata.fileNames.append(fileName)
self._getNuclide = getNuclideFunc
self._dummyNuclideKeysAddedToLibrary = []
def _rwMessage(self):
runLog.debug("{} PMATRX data {}".format("Reading" if "r" in self._fileMode else "Writing", self))
def readWrite(self):
"""Read and write PMATRX files.
.. impl:: Tool to read and write PMATRX files.
:id: I_ARMI_NUCDATA_PMATRX
:implements: R_ARMI_NUCDATA_PMATRX
Reading and writing PMATRX files is performed using the general
nuclear data I/O functionalities described in
:need:`I_ARMI_NUCDATA`. Reading/writing a PMATRX file is performed
through the following steps:
#. Read/write global information including:
* Number of gamma energy groups
* Number of neutron energy groups
* Maximum scattering order
* Maximum number of compositions
* Maximum number of materials
* Maximum number of regions
#. Read/write energy group structure for neutrons and gammas
#. Read/write dose conversion factors
#. Read/write gamma production matrices for each nuclide, as well as
other reaction constants related to neutron-gamma production.
"""
self._rwMessage()
properties.unlockImmutableProperties(self._lib)
try:
numNucs = self._rwFileID()
self._rwGroupStructure()
self._rwDoseConversionFactor()
self._rwIsotopes(numNucs)
except Exception:
runLog.error(traceback.format_exc())
raise OSError("Failed to read/write {}".format(self))
finally:
properties.lockImmutableProperties(self._lib)
def _rwFileID(self):
with self.createRecord() as record:
for name in [
"numberCollapsingSpatialRegions",
"numGammaGroups",
"numNeutronGroups",
]:
self._metadata[name] = record.rwInt(self._metadata[name])
self._metadata["hasInPlateData"] = record.rwBool(self._metadata["hasInPlateData"])
numNucs = record.rwInt(len(self._lib))
self._metadata["hasDoseConversionFactor"] = record.rwBool(self._metadata["hasDoseConversionFactor"])
for name in [
"maxScatteringOrder",
"maxNumberOfCompositions",
"maxMaterials",
"maxNumberOfRegions",
"maxNumberOfCollapsingRegions",
"_dummy1",
"_dummy2",
]:
self._metadata[name] = record.rwInt(self._metadata[name])
return numNucs
def _rwGroupStructure(self):
with self.createRecord() as record:
self._lib.neutronEnergyUpperBounds = record.rwMatrix(
self._lib.neutronEnergyUpperBounds, self._metadata["numNeutronGroups"]
)
self._metadata["minimumNeutronEnergy"] = record.rwFloat(self._metadata["minimumNeutronEnergy"])
# The lower bound energy is included in this list. We'll drop it to maintain consistency with other
# libs by holding only the upper bounds.
self._lib.gammaEnergyUpperBounds = record.rwMatrix(
self._lib.gammaEnergyUpperBounds, self._metadata["numGammaGroups"]
)
self._metadata["minimumGammaEnergy"] = record.rwFloat(self._metadata["minimumGammaEnergy"])
def _rwDoseConversionFactor(self):
if self._metadata["hasDoseConversionFactor"]:
with self.createRecord() as record:
self._lib.neutronDoseConversionFactors = record.rwList(
self._lib.neutronDoseConversionFactors,
"float",
self._metadata["numNeutronGroups"],
)
self._lib.gammaDoseConversionFactors = record.rwList(
self._lib.gammaDoseConversionFactors,
"float",
self._metadata["numGammaGroups"],
)
def _rwIsotopes(self, numNucs):
with self.createRecord() as record:
nuclideLabels = record.rwList(self._lib.nuclideLabels, "string", numNucs, 8)
record.rwList([1000] * numNucs, "int", numNucs)
numNeutronGroups = self._metadata["numNeutronGroups"]
numGammaGroups = self._metadata["numGammaGroups"]
for nucLabel in nuclideLabels:
nuclide = self._getNuclide(nucLabel)
nuclide.updateBaseNuclide()
nuclideReader = _PmatrxNuclideIO(nuclide, self, numNeutronGroups, numGammaGroups)
nuclideReader.rwNuclide()
if "r" in self._fileMode:
# on add nuclides when reading
self._lib[nucLabel] = nuclide
def _rwCompositions(self):
if self._metadata["hasInPlateData"]:
raise NotImplementedError()
class _PmatrxNuclideIO:
def __init__(self, nuclide, pmatrixIO, numNeutronGroups, numGammaGroups):
self._nuclide = nuclide
self._metadata = nuclide.pmatrxMetadata
self._pmatrixIO = pmatrixIO
self._numNeutronGroups = numNeutronGroups
self._numGammaGroups = numGammaGroups
def rwNuclide(self):
self._rwNuclideHeading()
self._rwNeutronHeatingAndDamage()
self._rwReactionXS()
self._rwGammaHeating()
self._rwCellAveragedProductionMatrix()
def _rwNuclideHeading(self):
with self._pmatrixIO.createRecord() as record:
self._metadata["hasNeutronHeatingAndDamage"] = record.rwBool(self._metadata["hasNeutronHeatingAndDamage"])
self._metadata["maxScatteringOrder"] = record.rwInt(self._metadata["maxScatteringOrder"])
self._metadata["hasGammaHeating"] = record.rwBool(self._metadata["hasGammaHeating"])
self._metadata["numberNeutronXS"] = record.rwInt(self._metadata["numberNeutronXS"])
self._metadata["collapsingRegionNumber"] = record.rwInt(self._metadata["collapsingRegionNumber"])
def _rwNeutronHeatingAndDamage(self):
if not self._metadata["hasNeutronHeatingAndDamage"]:
return
with self._pmatrixIO.createRecord() as record:
self._nuclide.neutronHeating = record.rwMatrix(self._nuclide.neutronHeating, self._numNeutronGroups)
self._nuclide.neutronDamage = record.rwMatrix(self._nuclide.neutronDamage, self._numNeutronGroups)
def _rwReactionXS(self):
numActivationXS = self._metadata["numberNeutronXS"]
pmatrixParams = self._metadata
activationXS = self._metadata["activationXS"] = self._metadata["activationXS"] or [None] * numActivationXS
activationMT = self._metadata["activationMT"] = self._metadata["activationMT"] or [None] * numActivationXS
activationMTU = self._metadata["activationMTU"] = self._metadata["activationMTU"] or [None] * numActivationXS
for xsNum in range(numActivationXS):
with self._pmatrixIO.createRecord() as record:
pmatrixParams["activationXS"][xsNum] = record.rwList(activationXS[xsNum], self._numNeutronGroups)
pmatrixParams["activationMT"][xsNum] = record.rwInt(activationMT[xsNum])
pmatrixParams["activationMTU"][xsNum] = record.rwInt(activationMTU[xsNum])
def _rwGammaHeating(self):
if not self._metadata["hasGammaHeating"]:
return
with self._pmatrixIO.createRecord() as record:
self._nuclide.gammaHeating = record.rwMatrix(self._nuclide.gammaHeating, self._numGammaGroups)
def _rwCellAveragedProductionMatrix(self):
for lrd in range(1, 1 + self._metadata["maxScatteringOrder"]):
with self._pmatrixIO.createRecord() as record:
prodMatrix = self._getProductionMatrix(lrd)
prodMatrix = record.rwMatrix(prodMatrix, self._numNeutronGroups, self._numGammaGroups)
self._setProductionMatrix(lrd, prodMatrix)
def _getProductionMatrix(self, order):
if order == 1:
return self._nuclide.isotropicProduction
elif order == 2:
return self._nuclide.linearAnisotropicProduction
else:
return self._nuclide.nOrderProductionMatrix[order]
def _setProductionMatrix(self, order, matrix):
if order == 1:
self._nuclide.isotropicProduction = matrix
elif order == 2:
self._nuclide.linearAnisotropicProduction = matrix
else:
self._nuclide.nOrderProductionMatrix[order] = matrix
================================================
FILE: armi/nuclearDataIO/cccc/pwdint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Read/write a CCCC PWDINT power density definition file.
PWDINT files power density at mesh intervals.
File format definition is from [CCCC-IV]_.
Examples
--------
>>> pwr = pwdint.readBinary("PWDINT")
>>> pwdint.writeBinary(pwr, "PWDINT2")
"""
import numpy as np
from armi.nuclearDataIO import cccc
PWDINT = "PWDINT"
# See CCCC-IV documentation for definitions
FILE_SPEC_1D_KEYS = (
"TIME",
"POWER",
"VOL",
"NINTI",
"NINTJ",
"NINTK",
"NCY",
"NBLOK",
)
class PwdintData(cccc.DataContainer):
"""
Data representation that can be read from or written to a PWDINT file.
This contains a mapping from the i,j,k GEODST mesh to power density
in Watts/cm^3.
"""
def __init__(self):
cccc.DataContainer.__init__(self)
self.powerDensity = np.array([])
class PwdintStream(cccc.StreamWithDataContainer):
"""
Stream for reading to/writing from with PWDINT data.
Parameters
----------
power : PwdintData
Data structure
fileName: str
path to pwdint file
fileMode: str
string indicating if ``fileName`` is being read or written, and
in ascii or binary format
"""
@staticmethod
def _getDataContainer() -> PwdintData:
return PwdintData()
def readWrite(self):
"""
Step through the structure of a PWDINT file and read/write it.
Logic to control which records will be present is here, which
comes directly off the File specification.
"""
self._rwFileID()
self._rw1DRecord()
self._rw2DRecord()
def _rwFileID(self):
with self.createRecord() as record:
self._metadata["hname"] = record.rwString(self._metadata["hname"], 8)
for name in ["huse", "huse2"]:
self._metadata[name] = record.rwString(self._metadata[name], 6)
self._metadata["version"] = record.rwInt(self._metadata["version"])
self._metadata["mult"] = record.rwInt(self._metadata["mult"])
def _rw1DRecord(self):
"""Read/write File specifications on 1D record."""
with self.createRecord() as record:
self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata))
def _rw2DRecord(self):
"""Read/write power density by mesh point."""
imax = self._metadata["NINTI"]
jmax = self._metadata["NINTJ"]
kmax = self._metadata["NINTK"]
nblck = self._metadata["NBLOK"]
if self._data.powerDensity.size == 0:
# initialize all-zeros here before reading now that we
# have the matrix dimension metadata available.
self._data.powerDensity = np.zeros(
(imax, jmax, kmax),
dtype=np.float32,
)
for ki in range(kmax):
for bi in range(nblck):
jL, jU = cccc.getBlockBandwidth(bi + 1, jmax, nblck)
with self.createRecord() as record:
self._data.powerDensity[:, jL : jU + 1, ki] = record.rwMatrix(
self._data.powerDensity[:, jL : jU + 1, ki],
jU - jL + 1,
imax,
)
readBinary = PwdintStream.readBinary
readAscii = PwdintStream.readAscii
writeBinary = PwdintStream.writeBinary
writeAscii = PwdintStream.writeAscii
================================================
FILE: armi/nuclearDataIO/cccc/rtflux.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Read and write the Regular Total flux from a RTFLUX CCCC interface file.
RTFLUX is a CCCC standard data file for storing multigroup total flux on a mesh of any
geometry type. It is defined in [CCCC-IV]_.
ATFLUX is in the same format but holds adjoint flux rather than regular flux.
Examples
--------
>>> flux = rtflux.RtfluxStream.readBinary("RTFLUX")
>>> rtflux.RtfluxStream.writeBinary(flux, "RTFLUX2")
>>> adjointFlux = rtflux.AtfluxStream.readBinary("ATFLUX")
See Also
--------
NHFLUX
Reads/write nodal hex flux moments
RZFLUX
Reads/writes total fluxes from zones
"""
import numpy as np
from armi.nuclearDataIO import cccc
RTFLUX = "RTFLUX"
ATFLUX = "ATFLUX"
# See CCCC-IV documentation for definitions
FILE_SPEC_1D_KEYS = (
"NDIM",
"NGROUP",
"NINTI",
"NINTJ",
"NINTK",
"ITER",
"EFFK",
"POWER",
"NBLOK",
)
class RtfluxData(cccc.DataContainer):
"""
Multigroup flux as a function of i,j,k and g indices.
The metadata also contains the power and k-eff.
This is the data structure that is read from or written to a RTFLUX file.
"""
def __init__(self):
cccc.DataContainer.__init__(self)
self.groupFluxes: np.ndarray = np.array([])
"""Maps i,j,k,g indices to total real or adjoint flux in n/cm^2-s"""
class RtfluxStream(cccc.StreamWithDataContainer):
"""
Stream for reading/writing a RTFLUX or ATFLUX file.
Parameters
----------
flux : RtfluxData
Data structure
fileName: str
path to RTFLUX file
fileMode: str
string indicating if ``fileName`` is being read or written, and
in ascii or binary format
"""
@staticmethod
def _getDataContainer() -> RtfluxData:
return RtfluxData()
def readWrite(self):
"""Step through the structure of the file and read/write it."""
self._rwFileID()
self._rw1DRecord()
if self._metadata["NDIM"] == 1:
self._rw2DRecord()
elif self._metadata["NDIM"] >= 2:
self._rw3DRecord()
else:
raise ValueError(f"Invalid NDIM value {self._metadata['NDIM']} in {self}.")
def _rwFileID(self):
"""
Read/write file id record.
Notes
-----
The username, version, etc are embedded in this string but it's
usually blank.
"""
with self.createRecord() as record:
self._metadata["label"] = record.rwString(self._metadata["label"], 28)
def _rw1DRecord(self):
"""Read/write File specifications on 1D record."""
with self.createRecord() as record:
self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata))
def _rw2DRecord(self):
"""Read/write 1-dimensional regular total flux."""
raise NotImplementedError("1-D RTFLUX files are not yet implemented.")
def _rw3DRecord(self):
"""
Read/write multi-dimensional regular total flux.
The records contain blocks of values in the i-j planes.
"""
ng = self._metadata["NGROUP"]
imax = self._metadata["NINTI"]
jmax = self._metadata["NINTJ"]
kmax = self._metadata["NINTK"]
nblck = self._metadata["NBLOK"]
if self._data.groupFluxes.size == 0:
self._data.groupFluxes = np.zeros((imax, jmax, kmax, ng))
for gi in range(ng):
gEff = self.getEnergyGroupIndex(gi)
for k in range(kmax):
# data in i-j plane may be blocked
for bi in range(nblck):
# compute blocking parameters
jLow, jUp = cccc.getBlockBandwidth(bi + 1, jmax, nblck)
numZonesInBlock = jUp - jLow + 1
with self.createRecord() as record:
# pass in shape in fortran (read) order
self._data.groupFluxes[:, jLow : jUp + 1, k, gEff] = record.rwDoubleMatrix(
self._data.groupFluxes[:, jLow : jUp + 1, k, gEff],
numZonesInBlock,
imax,
)
def getEnergyGroupIndex(self, g):
r"""
Real fluxes stored in RTFLUX have "normal" (or "forward") energy groups.
Also see the subclass method ATFLUX.getEnergyGroupIndex().
0 based, so if NG=33 and you want the third group, this return 2.
"""
return g
class AtfluxStream(RtfluxStream):
r"""
This is a subclass for the ATFLUX file, which is identical in format to the RTFLUX file except
that it contains the adjoint flux and has reversed energy group ordering.
"""
def getEnergyGroupIndex(self, g):
r"""
Adjoint fluxes stored in ATFLUX have "reversed" (or "backward") energy groups.
0 based, so if NG=33 and you want the third group (g=2), this returns 30.
"""
ng = self._metadata["NGROUP"]
return ng - g - 1
def getFDFluxReader(adjointFlag):
r"""
Returns the appropriate DIF3D FD flux binary file reader class,
either RTFLUX (real) or ATFLUX (adjoint).
"""
if adjointFlag:
return AtfluxStream
else:
return RtfluxStream
================================================
FILE: armi/nuclearDataIO/cccc/rzflux.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for reading/writing RZFLUX CCCC interface files.
RZFLUX contains Regular Zone Flux, or multigroup flux by neutron energy group
in each zone. It also can hold some convergence and neutron balance information.
The format is defined in [CCCC-IV]_.
Examples
--------
>>> flux = rzflux.readBinary("RZFLUX")
>>> flux.groupFluxes[2, 0] *= 1.1
>>> rzflux.writeBinary(flux, "RZFLUX2")
>>> rzflux.writeAscii(flux, "RZFLUX2.ascii")
"""
from enum import Enum
import numpy as np
from armi.nuclearDataIO import cccc
RZFLUX = "RZFLUX"
# See CCCC-IV documentation for definitions
FILE_SPEC_1D_KEYS = (
"TIME",
"POWER",
"VOL",
"EFFK",
"EIVS",
"DKDS",
"TNL",
"TNA",
"TNSL",
"TNBL",
"TNBAL",
"TNCRA",
"X1",
"X2",
"X3",
"NBLOK",
"ITPS",
"NZONE",
"NGROUP",
"NCY",
)
class Convergence(Enum):
"""Convergence behavior flags for ITPS from RZFLUX file."""
NO_ITERATIONS = 0
CONVERGED = 1
CONVERGING = 2
DIVERGING = 3
class RzfluxData(cccc.DataContainer):
"""
Data representation that can be read from or written to a RZFLUX file.
Notes
-----
Analogous to a IsotxsLibrary for ISTOXS files.
"""
def __init__(self):
cccc.DataContainer.__init__(self)
# 2D data
self.groupFluxes = None
class RzfluxStream(cccc.StreamWithDataContainer):
"""
Stream for reading to/writing from with RZFLUX data.
Parameters
----------
flux : RzfluxData
Data structure
fileName: str
path to RZFLUX file
fileMode: str
string indicating if ``fileName`` is being read or written, and
in ascii or binary format
"""
@staticmethod
def _getDataContainer() -> RzfluxData:
return RzfluxData()
def readWrite(self):
"""Step through the structure of the file and read/write it."""
self._rwFileID()
self._rw1DRecord()
self._rw2DRecord()
def _rwFileID(self):
"""
Read/write file id record.
Notes
-----
The username, version, etc are embedded in this string but it's
usually blank. The number 28 was actually obtained from
a hex editor and may be code specific.
"""
with self.createRecord() as record:
self._metadata["label"] = record.rwString(self._metadata["label"], 28)
def _rw1DRecord(self):
"""Read/write File specifications on 1D record."""
with self.createRecord() as record:
vals = record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata)
self._metadata.update(vals)
def _rw2DRecord(self):
"""
Read/write the multigroup fluxes (n/cm^2-s) into a NxG matrix.
Notes
-----
Zones are blocked into multiple records so we have to block or unblock
them.
rwMatrix reverses the indices into FORTRAN data order so be
very careful with the indices.
"""
nz = self._metadata["NZONE"]
ng = self._metadata["NGROUP"]
nb = self._metadata["NBLOK"]
if self._data.groupFluxes is None:
# initialize all-zeros here before reading now that we
# have the matrix dimension metadata available.
self._data.groupFluxes = np.zeros(
(ng, nz),
dtype=np.float32,
)
for bi in range(nb):
jLow, jUp = cccc.getBlockBandwidth(bi + 1, nz, nb)
numZonesInBlock = jUp - jLow + 1
with self.createRecord() as record:
# pass in shape in fortran (read) order
self._data.groupFluxes[:, jLow : jUp + 1] = record.rwMatrix(
self._data.groupFluxes[:, jLow : jUp + 1],
numZonesInBlock,
ng,
)
readBinary = RzfluxStream.readBinary
readAscii = RzfluxStream.readAscii
writeBinary = RzfluxStream.writeBinary
writeAscii = RzfluxStream.writeAscii
================================================
FILE: armi/nuclearDataIO/cccc/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_cccc.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test CCCC."""
import io
import unittest
from armi.nuclearDataIO import cccc
class CcccIOStreamTests(unittest.TestCase):
def test_initWithFileMode(self):
self.assertIsInstance(cccc.Stream("some-file", "rb"), cccc.Stream)
self.assertIsInstance(cccc.Stream("some-file", "wb"), cccc.Stream)
self.assertIsInstance(cccc.Stream("some-file", "r"), cccc.Stream)
self.assertIsInstance(cccc.Stream("some-file", "w"), cccc.Stream)
with self.assertRaises(KeyError):
cccc.Stream("some-file", "bacon")
class CcccBinaryRecordTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.writerClass = cccc.BinaryRecordWriter
cls.readerClass = cccc.BinaryRecordReader
def setUp(self):
self.streamCls = io.BytesIO
def test_writeAndReadSimpleIntegerRecord(self):
value = 42
stream = self.streamCls()
with self.writerClass(stream) as writer:
writer.rwInt(value)
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(writer.numBytes, reader.numBytes)
self.assertEqual(value, reader.rwInt(None))
self.assertEqual(4, writer.numBytes)
def test_writeAndReadSimpleFloatRecord(self):
stream = self.streamCls()
value = -33.322222
with self.writerClass(stream) as writer:
writer.rwFloat(value)
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(writer.numBytes, reader.numBytes)
self.assertAlmostEqual(value, reader.rwFloat(None), 5)
self.assertEqual(4, writer.numBytes)
def test_writeAndReadSimpleStringRecord(self):
stream = self.streamCls()
value = "Howdy, partner!"
size = 8 * 8
with self.writerClass(stream) as writer:
writer.rwString(value, size)
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(writer.numBytes, reader.numBytes)
self.assertEqual(value, reader.rwString(None, size))
self.assertEqual(size, writer.numBytes)
def test_readPartialRecord(self):
"""Not reading an entire record raises an exception."""
# I'm going to create a record with two pieces of data, and only read one...
stream = self.streamCls()
value = 99
with self.writerClass(stream) as writer:
writer.rwInt(value)
writer.rwInt(value)
self.assertEqual(8, writer.numBytes)
with self.assertRaises(BufferError):
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(value, reader.rwInt(None))
def test_readingBeyondRecordRaisesException(self):
# I'm going to create a record with two pieces of data, and only read one...
stream = self.streamCls()
value = 77
with self.writerClass(stream) as writer:
writer.rwInt(value)
self.assertEqual(4, writer.numBytes)
with self.assertRaises(BufferError):
with self.readerClass(self.streamCls(stream.getvalue())) as reader:
self.assertEqual(value, reader.rwInt(None))
self.assertEqual(4, reader.rwInt(None))
class CcccAsciiRecordTests(CcccBinaryRecordTests):
"""Runs the same tests as CcccBinaryRecordTests, but using ASCII readers and writers."""
@classmethod
def setUpClass(cls):
cls.writerClass = cccc.AsciiRecordWriter
cls.readerClass = cccc.AsciiRecordReader
def setUp(self):
self.streamCls = io.StringIO
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_compxs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the COMPXS reader/writer with a simple problem."""
import os
import unittest
import numpy as np
from scipy.sparse import csc_matrix
from armi import nuclearDataIO
from armi.nuclearDataIO.cccc import compxs
from armi.nuclearDataIO.xsLibraries import CompxsLibrary
from armi.tests import COMPXS_PATH
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestCompxs(unittest.TestCase):
"""Test the compxs reader/writer."""
@property
def binaryWritePath(self):
return os.path.join(self._testMethodName + "compxs-b")
@property
def asciiWritePath(self):
return os.path.join(self._testMethodName + "compxs-a.txt")
@classmethod
def setUpClass(cls):
try:
cls.lib = compxs.readAscii(COMPXS_PATH)
except Exception as ee:
raise Exception("Failed to load COMPXS ascii.\n{}".format(ee))
cls.fissileRegion = cls.lib.regions[1]
cls.numGroups = cls.lib.compxsMetadata["numGroups"]
def test_libraryData(self):
"""Test library data including energy group information and number of compositions."""
self.assertEqual(11, self.numGroups)
self.assertEqual(14190675.0, max(self.lib.neutronEnergyUpperBounds))
self.assertAlmostEqual(0.41745778918, min(self.lib.neutronEnergyUpperBounds))
def test_regionPrimaryXS(self):
"""Test the primary cross sections for the second region - fissile."""
expectedMacros = {
"absorption": [
0.00810444,
0.0049346,
0.00329084,
0.00500318,
0.00919719,
0.01548523,
0.02816499,
0.04592259,
0.09402685,
0.12743879,
0.20865865,
],
"fission": [
0.00720288,
0.00398085,
0.00181345,
0.00236554,
0.00341723,
0.00564286,
0.0110835,
0.0211668,
0.04609869,
0.09673319,
0.16192732,
],
"total": [
0.18858715,
0.18624092,
0.22960965,
0.27634201,
0.33255093,
0.61437815,
0.42582573,
0.48091191,
0.4931102,
0.49976887,
0.58214497,
],
"removal": [
0.07268185,
0.03577923,
0.01127517,
0.01003666,
0.01254067,
0.02686466,
0.02881869,
0.04606618,
0.09605395,
0.13462841,
0.20865865,
],
"transport": [
0.10812569,
0.13096095,
0.18227532,
0.24610402,
0.29647433,
0.55842311,
0.40818328,
0.45512788,
0.45669781,
0.49153138,
0.55067248,
],
"nuSigF": [
0.02247946,
0.01047702,
0.00449566,
0.00576889,
0.00829842,
0.01373361,
0.02697533,
0.05151573,
0.11224934,
0.23570964,
0.39456832,
],
"chi": [
[1.38001099e-01],
[6.28044390e-01],
[2.04412257e-01],
[2.63437497e-02],
[2.85959793e-03],
[3.03098935e-04],
[3.19825784e-05],
[3.42715844e-06],
[3.00034836e-07],
[3.87667231e-08],
[2.66151779e-13],
],
}
for xsName, expectedXS in expectedMacros.items():
actualXS = self.fissileRegion.macros[xsName]
self.assertTrue(np.allclose(actualXS, expectedXS))
def test_totalScatterMatrix(self):
"""
Test the total scattering matrix by comparing the sparse components.
Sparse matrices can be constructed from three vectors: data, indices, and indptr.
For column matrix, the row indices for column ``j`` are stored in
``indices[indptr[j]:indptr[j + 1]]`` and the corresponding data is stored in
``data[indptr[j]:indptr[j + 1]]``.
See Also
--------
scipy.sparse.csc_matrix
"""
expectedSparseData = np.array(
[
1.15905297e-01,
1.50461698e-01,
4.19181830e-02,
2.18334481e-01,
2.66726391e-02,
2.06841438e-02,
2.66305350e-01,
7.93398724e-03,
3.74972053e-03,
2.82068371e-03,
3.20010257e-01,
4.98916288e-03,
4.64327778e-05,
3.62943322e-04,
2.33116653e-04,
5.87513494e-01,
3.33728477e-03,
4.05355062e-05,
3.40557886e-06,
5.05978110e-05,
2.44368007e-05,
3.97007043e-01,
1.13794357e-02,
5.81324838e-06,
3.57958695e-06,
4.21100811e-07,
6.02755319e-06,
3.70765519e-06,
4.34845744e-01,
6.53692627e-04,
3.65838392e-07,
1.91840932e-07,
6.47891881e-08,
4.70903065e-07,
7.53010883e-07,
3.97056267e-01,
1.43584939e-04,
1.69959524e-08,
7.63482393e-09,
1.07996799e-08,
7.79766262e-08,
1.42976480e-07,
3.65140459e-01,
2.02709238e-03,
1.62021799e-09,
1.25812112e-09,
3.39504415e-09,
2.13443401e-06,
7.75326455e-06,
3.73486301e-01,
7.18962870e-03,
4.72605255e-15,
5.11975260e-13,
1.25417930e-08,
4.57563838e-08,
]
)
expectedSparseIndices = [
0,
1,
0,
2,
1,
0,
3,
2,
1,
0,
4,
3,
2,
1,
0,
5,
4,
3,
2,
1,
0,
6,
5,
4,
3,
2,
1,
0,
7,
6,
4,
3,
2,
1,
0,
8,
7,
4,
3,
2,
1,
0,
9,
8,
4,
3,
2,
1,
0,
10,
9,
4,
2,
1,
0,
]
expectedSparseIndptr = [0, 1, 3, 6, 10, 15, 21, 28, 35, 42, 49, 55]
actualTotalScatter = self.fissileRegion.macros.totalScatter.toarray()
expectedTotalScatter = csc_matrix(
(expectedSparseData, expectedSparseIndices, expectedSparseIndptr),
actualTotalScatter.shape,
).toarray()
self.assertTrue(np.allclose(actualTotalScatter, expectedTotalScatter))
def test_binaryRW(self):
"""Test to make sure the binary read/writer reads/writes the exact same library."""
with TemporaryDirectoryChanger():
compxs.writeBinary(self.lib, self.binaryWritePath)
self.assertTrue(compxs.compare(self.lib, compxs.readBinary(self.binaryWritePath)))
def test_asciiRW(self):
"""Test to make sure the ascii reader/writer reads/writes the exact same library."""
with TemporaryDirectoryChanger():
compxs.writeAscii(self.lib, self.asciiWritePath)
self.assertTrue(compxs.compare(self.lib, compxs.readAscii(self.asciiWritePath)))
def test_mergeCompxsLibraries(self):
"""Test to verify the compxs merging returns a library with new regions."""
someLib = CompxsLibrary()
someLib.merge(self.lib)
self.assertEqual(len(self.lib.regions), len(someLib.regions))
self.assertTrue(self.lib.compxsMetadata.compare(someLib.compxsMetadata, self.lib, someLib))
def test_getCOMPXSFileName(self):
self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=0), "COMPXS-c0")
self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=1), "COMPXS-c1")
self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=23), "COMPXS-c23")
self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(), "COMPXS")
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_dif3d.py
================================================
# Copyright 2023 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test reading/writing of DIF3D binary input."""
import os
import unittest
from armi.nuclearDataIO.cccc import dif3d
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
SIMPLE_HEXZ_INP = os.path.join(THIS_DIR, "../../tests", "simple_hexz.inp")
SIMPLE_HEXZ_DIF3D = os.path.join(THIS_DIR, "fixtures", "simple_hexz.dif3d")
class TestDif3dSimpleHexz(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Load DIF3D data from binary file. This binary file was generated by running
dif3d.exe v11.0r3284 on the SIMPLE_HEXZ_INP file above (and renaming the DIF3D
binary file to simple_hexz.dif3d).
"""
cls.df = dif3d.Dif3dStream.readBinary(SIMPLE_HEXZ_DIF3D)
def test__rwFileID(self):
"""Verify the file identification info.
.. test:: Test reading DIF3D files.
:id: T_ARMI_NUCDATA_DIF3D0
:tests: R_ARMI_NUCDATA_DIF3D
"""
self.assertEqual(self.df.metadata["HNAME"], "DIF3D")
self.assertEqual(self.df.metadata["HUSE1"], "")
self.assertEqual(self.df.metadata["HUSE2"], "")
self.assertEqual(self.df.metadata["VERSION"], 1)
def test__rwFile1DRecord(self):
"""Verify the rest of the metadata.
.. test:: Test reading DIF3D files.
:id: T_ARMI_NUCDATA_DIF3D1
:tests: R_ARMI_NUCDATA_DIF3D
"""
TITLE_A6 = ["3D Hex", "-Z to", "genera", "te NHF", "LUX fi", "le"]
EXPECTED_TITLE = TITLE_A6 + [""] * 5
for i in range(dif3d.TITLE_RANGE):
self.assertEqual(self.df.metadata[f"TITLE{i}"], EXPECTED_TITLE[i])
self.assertEqual(self.df.metadata["MAXSIZ"], 10000)
self.assertEqual(self.df.metadata["MAXBLK"], 1800000)
self.assertEqual(self.df.metadata["IPRINT"], 0)
def test__rw2DRecord(self):
"""Verify the control parameters."""
EXPECTED_2D = [
0,
0,
0,
10000,
30,
0,
1000000000,
5,
0,
0,
50,
0,
1,
1,
0,
0,
0,
110,
10,
100,
1,
0,
0,
0,
0,
0,
0,
0,
0,
10,
40,
32,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
for i, param in enumerate(dif3d.FILE_SPEC_2D_PARAMS):
self.assertEqual(self.df.twoD[param], EXPECTED_2D[i])
def test__rw3DRecord(self):
"""Verify the convergence criteria and other floating point data."""
EXPECTED_3D = [
1e-7,
1e-5,
1e-5,
3.823807613470224e-01,
1e-3,
4e-2,
1e0,
0e0,
0e0,
9.999999747378752e-05,
] + [0.0 for i in range(1, 21)]
for i, param in enumerate(dif3d.FILE_SPEC_3D_PARAMS):
self.assertEqual(self.df.threeD[param], EXPECTED_3D[i])
def test__rw4DRecord(self):
"""Verify the optimum overrelaxation factors."""
self.assertEqual(self.df.fourD, None)
def test__rw5DRecord(self):
"""Verify the axial coarse-mesh rebalance boundaries."""
self.assertEqual(self.df.fiveD, None)
def test_writeBinary(self):
"""Verify binary equivalence of written DIF3D file.
.. test:: Test writing DIF3D files.
:id: T_ARMI_NUCDATA_DIF3D2
:tests: R_ARMI_NUCDATA_DIF3D
"""
with TemporaryDirectoryChanger():
dif3d.Dif3dStream.writeBinary(self.df, "DIF3D2")
with open(SIMPLE_HEXZ_DIF3D, "rb") as f1, open("DIF3D2", "rb") as f2:
expectedData = f1.read()
actualData = f2.read()
for expected, actual in zip(expectedData, actualData):
self.assertEqual(expected, actual)
class TestDif3dEmptyRecords(unittest.TestCase):
def test_empty4and5Records(self):
"""Since the inputs results in these being None, get test coverage another way."""
df = dif3d.Dif3dStream.readBinary(SIMPLE_HEXZ_DIF3D)
# Hack some values that allow 4 and 5 records to be populated \
# and then populate them
df.twoD["NUMORP"] = 1
df.twoD["NCMRZS"] = 1
df.fourD = {"OMEGA1": 1.0}
df.fiveD = {"ZCMRC1": 1.0, "NZINTS1": 10}
with TemporaryDirectoryChanger():
# Write then read a new one
dif3d.Dif3dStream.writeBinary(df, "DIF3D2")
df2 = dif3d.Dif3dStream.readBinary("DIF3D2")
# Kinda a null test, but this coverage caught some code mistakes!
self.assertEqual(df2.fourD, df.fourD)
self.assertEqual(df2.fiveD, df.fiveD)
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_fixsrc.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the reading and writing of the DIF3D FIXSRC file format."""
import os
import unittest
import numpy as np
from armi.nuclearDataIO.cccc import fixsrc
from armi.utils.directoryChangers import TemporaryDirectoryChanger
# ruff: noqa: E501
FIXSRC_ASCII = """0 0 0 0 0 0 0.4008E+10 0.4210E+10 0.4822E+10 0.5154E+10 0.4926E+10 0.4621E+10
0.4246E+10 0.3757E+10 0.3311E+10 0.3479E+10 0.357E+10 0.324E+10 0.2942E+10 0.2903E+10 0.2925E+10 0.2763E+10 0.2414E+10 0.2036E+10
0.1656E+10 0.1477E+10 0.1455E+10 0.1434E+10 0.1297E+10 0.1153E+10 0.101E+10 0.8841E+9 0.7923E+9 0.7266E+9 0.6575E+9 0.589E+9
0.5027E+9 0.4146E+9 0.3474E+9 0.3015E+9 0.2403E+9 0.2356E+9 0.1634E+9 0.1521E+9 0.1258E+9 0.9032E+8 0.6156E+8 0.3983E+8
0.3134E+8 0.303E+8 0.2983E+8 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0"""
FIXSRC_ARRAY = np.array(FIXSRC_ASCII.split(), dtype=np.float32).reshape((3, 3, 2, 4))
class TestFixsrc(unittest.TestCase):
def test_writeReadBinaryLoop(self):
with TemporaryDirectoryChanger() as newDir:
fileName = "fixsrc_writeBinary.bin"
binaryFilePath = os.path.join(newDir.destination, fileName)
fixsrc.writeBinary(binaryFilePath, FIXSRC_ARRAY)
self.assertIn(fileName, os.listdir(newDir.destination))
self.assertGreater(os.path.getsize(binaryFilePath), 0)
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_gamiso.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test GAMISO reading and writing."""
import os
import unittest
from copy import deepcopy
from armi.nuclearDataIO import xsLibraries
from armi.nuclearDataIO.cccc import gamiso, isotxs
from armi.nuclearDataIO.xsNuclides import XSNuclide
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
FIXTURE_DIR = os.path.join(THIS_DIR, "..", "..", "tests", "fixtures")
GAMISO_AA = os.path.join(FIXTURE_DIR, "AA.gamiso")
class TestGamiso(unittest.TestCase):
def setUp(self):
self.xsLib = xsLibraries.IsotxsLibrary()
def test_compare(self):
"""Compare the input binary GAMISO file.
.. test:: Test reading GAMISO files.
:id: T_ARMI_NUCDATA_GAMISO0
:tests: R_ARMI_NUCDATA_GAMISO
"""
gamisoAA = gamiso.readBinary(GAMISO_AA)
self.xsLib.merge(deepcopy(gamisoAA))
self.assertTrue(gamiso.compare(self.xsLib, gamisoAA))
def test_writeBinary(self):
"""Write a binary GAMISO file.
.. test:: Test writing GAMISO files.
:id: T_ARMI_NUCDATA_GAMISO1
:tests: R_ARMI_NUCDATA_GAMISO
"""
with TemporaryDirectoryChanger():
data = gamiso.readBinary(GAMISO_AA)
binData = gamiso.writeBinary(data, "gamiso.out")
self.assertTrue(gamiso.compare(data, binData))
def test_addDummyNuclidesToLibrary(self):
dummyNuclides = [XSNuclide(None, "U238AA")]
before = self.xsLib.getNuclides("")
self.assertEqual(len(self.xsLib.xsIDs), 0)
self.assertTrue(gamiso.addDummyNuclidesToLibrary(self.xsLib, dummyNuclides))
self.assertEqual(len(self.xsLib.xsIDs), 1)
self.assertEqual(list(self.xsLib.xsIDs)[0], "38")
after = self.xsLib.getNuclides("")
self.assertGreater(len(after), len(before))
diff = set(after).difference(set(before))
self.assertEqual(len(diff), 1)
self.assertEqual(list(diff)[0].xsId, "38")
def test_addDummyNuclidesToLibraryNumGroups(self):
isoLib = isotxs.readBinary(os.path.join(FIXTURE_DIR, "ISOAA"))
gamLib = gamiso.readBinary(GAMISO_AA)
gamLib.gamisoMetadata["numGroups"] = 50
dummyNuc = XSNuclide(isoLib, "DMP1AA")
dummyNuc.isotxsMetadata = isoLib.getNuclides("AA")[0].isotxsMetadata
gamiso.addDummyNuclidesToLibrary(gamLib, [dummyNuc])
self.assertEqual(gamLib["DMP1AA"].nucLabel, "DMP1")
self.assertEqual(gamLib["DMP1AA"].gamisoMetadata["jband"][(49, 3)], 1)
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_geodst.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test GEODST reading and writing."""
import os
import unittest
from numpy.testing import assert_equal
from armi.nuclearDataIO.cccc import geodst
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
SIMPLE_GEODST = os.path.join(THIS_DIR, "fixtures", "simple_hexz.geodst")
class TestGeodst(unittest.TestCase):
"""
Tests the GEODST class.
This reads from a GEODST file that was created using DIF3D 11 on a small
test hex reactor in 1/3 geometry.
"""
def test_readGeodst(self):
"""Ensure we can read a GEODST file.
.. test:: Test reading GEODST files.
:id: T_ARMI_NUCDATA_GEODST0
:tests: R_ARMI_NUCDATA_GEODST
"""
geo = geodst.readBinary(SIMPLE_GEODST)
self.assertEqual(geo.metadata["IGOM"], 18)
self.assertAlmostEqual(geo.xmesh[1], 16.79, places=5) # hex pitch
self.assertAlmostEqual(geo.zmesh[-1], 448.0, places=5) # top of reactor in cm
self.assertEqual(geo.coarseMeshRegions.shape, (10, 10, len(geo.zmesh) - 1))
self.assertEqual(geo.coarseMeshRegions.min(), 0)
self.assertEqual(geo.coarseMeshRegions.max(), geo.metadata["NREG"])
def test_writeGeodst(self):
"""Ensure that we can write a modified GEODST.
.. test:: Test writing GEODST files.
:id: T_ARMI_NUCDATA_GEODST1
:tests: R_ARMI_NUCDATA_GEODST
"""
with TemporaryDirectoryChanger():
geo = geodst.readBinary(SIMPLE_GEODST)
geo.zmesh[-1] *= 2
geodst.writeBinary(geo, "GEODST2")
geo2 = geodst.readBinary("GEODST2")
self.assertAlmostEqual(geo2.zmesh[-1], 448.0 * 2, places=5)
assert_equal(geo.kintervals, geo2.kintervals)
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_isotxs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the workings of the library wrappers."""
import unittest
from armi import nuclearDataIO
from armi.nucDirectory.nuclideBases import NuclideBases
from armi.nuclearDataIO import xsLibraries
from armi.nuclearDataIO.cccc import isotxs
from armi.tests import ISOAA_PATH
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestIsotxs(unittest.TestCase):
"""Tests the ISOTXS class."""
@classmethod
def setUpClass(cls):
# load a library that is in the ARMI tree. This should
# be a small library with LFPs, Actinides, structure, and coolant
cls.lib = isotxs.readBinary(ISOAA_PATH)
def test_writeBinary(self):
"""Test reading in an ISOTXS file, and then writing it back out again.
Now, the library here can't guarantee the output will be the same as the
input. But we can guarantee the written file is still valid, by reading
it again.
.. test:: Write ISOTSX binary files.
:id: T_ARMI_NUCDATA_ISOTXS0
:tests: R_ARMI_NUCDATA_ISOTXS
"""
with TemporaryDirectoryChanger():
origLib = isotxs.readBinary(ISOAA_PATH)
fname = self._testMethodName + "temp-aa.isotxs"
isotxs.writeBinary(origLib, fname)
lib = isotxs.readBinary(fname)
# validate the written file is still valid
nucs = lib.nuclides
self.assertTrue(nucs)
self.assertIn("AA", lib.xsIDs)
nuc = lib["U235AA"]
self.assertIsNotNone(nuc)
with self.assertRaises(KeyError):
lib.getNuclide("nonexistent", "zz")
def test_isotxsGeneralData(self):
nucs = self.lib.nuclides
self.assertTrue(nucs)
self.assertIn("AA", self.lib.xsIDs)
nuc = self.lib["U235AA"]
self.assertIsNotNone(nuc)
with self.assertRaises(KeyError):
self.lib.getNuclide("nonexistent", "zz")
def test_isotxsDetailedData(self):
self.assertEqual(50, len(self.lib.nuclides))
groups = self.lib.neutronEnergyUpperBounds
self.assertEqual(33, len(groups))
self.assertEqual(14072911.0, max(groups))
self.assertEqual(0.4139941930770874, min(groups))
# file-wide chi
self.assertEqual(33, len(self.lib.isotxsMetadata["chi"]))
self.assertEqual(1.0000016745038094, sum(self.lib.isotxsMetadata["chi"]))
def test_getScatteringWeights(self):
self.assertEqual(1650, len(self.lib.getScatterWeights()))
refVector = [
0.0,
0.9924760291647134,
0.007523970835286507,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
for v1, v2 in zip(refVector, self.lib.getScatterWeights()["U235AA", 1].todense().T.tolist()[0]):
self.assertAlmostEqual(v1, v2)
def test_getNuclide(self):
nuclideBases = NuclideBases()
self.assertEqual(nuclideBases.byName["U235"], self.lib.getNuclide("U235", "AA")._base)
self.assertEqual(nuclideBases.byName["PU239"], self.lib.getNuclide("PU239", "AA")._base)
def test_n2nIsReactionBased(self):
"""
ARMI assumes ISOTXS n2n reactions are all reaction-based. Test this.
The alternative is production based.
Previous studies show that MC**2-2 is reaction based.
"""
nuc = self.lib.getNuclide("U235", "AA")
fromMatrix = nuc.micros.n2nScatter.sum(axis=0).getA1() # convert to ndarray
for base, matrix in zip(fromMatrix, nuc.micros.n2n):
self.assertAlmostEqual(base, matrix)
def test_getScatterWeights(self):
scatWeights = self.lib.getScatterWeights()
vals = scatWeights["U235AA", 4]
self.assertAlmostEqual(sum(vals), 1.0)
def test_getISOTXSFileName(self):
self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=0), "ISOTXS-c0")
self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=1), "ISOTXS-c1")
self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=0, node=1), "ISOTXS-c0n1")
self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=23), "ISOTXS-c23")
self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(xsID="AA"), "ISOAA")
self.assertEqual(
nuclearDataIO.getExpectedISOTXSFileName(xsID="AA", suffix="test"),
"ISOAA-test",
)
self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(), "ISOTXS")
with self.assertRaises(ValueError):
# Error when over specified
nuclearDataIO.getExpectedISOTXSFileName(cycle=10, xsID="AA")
def test_getGAMISOFileName(self):
self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=0), "cycle0.gamiso")
self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=1), "cycle1.gamiso")
self.assertEqual(
nuclearDataIO.getExpectedGAMISOFileName(cycle=1, node=3),
"cycle1node3.gamiso",
)
self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=23), "cycle23.gamiso")
self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(xsID="AA"), "AA.gamiso")
self.assertEqual(
nuclearDataIO.getExpectedGAMISOFileName(xsID="AA", suffix="test"),
"AA-test.gamiso",
)
self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(), "GAMISO")
with self.assertRaises(ValueError):
# Error when over specified
nuclearDataIO.getExpectedGAMISOFileName(cycle=10, xsID="AA")
class Isotxs_merge_Tests(unittest.TestCase):
def test_mergeMccV2FilesRemovesTheFileWideChi(self):
"""Test merging ISOTXS files.
.. test:: Read ISOTXS files.
:id: T_ARMI_NUCDATA_ISOTXS1
:tests: R_ARMI_NUCDATA_ISOTXS
"""
isoaa = isotxs.readBinary(ISOAA_PATH)
self.assertAlmostEqual(1.0, sum(isoaa.isotxsMetadata["chi"]), 5)
self.assertAlmostEqual(1, isoaa.isotxsMetadata["fileWideChiFlag"])
someIsotxs = xsLibraries.IsotxsLibrary()
# semi-copy...
someIsotxs.merge(isoaa)
self.assertAlmostEqual(1.0, sum(someIsotxs.isotxsMetadata["chi"]), 5)
self.assertEqual(1, someIsotxs.isotxsMetadata["fileWideChiFlag"])
# OK, now I need to delete all the nuclides, so we can merge again.
for key in someIsotxs.nuclideLabels:
del someIsotxs[key]
someIsotxs.merge(isotxs.readBinary(ISOAA_PATH))
self.assertEqual(None, someIsotxs.isotxsMetadata["chi"])
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_labels.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the reading and writing of the DIF3D/VARIANT LABELS interface file."""
import os
import unittest
from armi.nuclearDataIO.cccc import labels
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
LABELS_FILE_BIN = os.path.join(THIS_DIR, "fixtures", "labels.binary")
LABELS_FILE_ASCII = os.path.join(THIS_DIR, "fixtures", "labels.ascii")
class TestLabels(unittest.TestCase):
"""Tests for labels."""
def test_readLabelsBinary(self):
expectedName = "LABELS"
expectedTrianglesPerHex = 6
expectedNumZones = 5800
expectedNumRegions = 2900
expectedNumHexagonalRings = 13
labelsData = labels.readBinary(LABELS_FILE_BIN)
self.assertEqual(labelsData.metadata["hname"], expectedName)
self.assertEqual(labelsData.metadata["numTrianglesPerHex"], expectedTrianglesPerHex)
self.assertEqual(labelsData.metadata["numZones"], expectedNumZones)
self.assertEqual(labelsData.metadata["numRegions"], expectedNumRegions)
self.assertEqual(labelsData.metadata["numHexagonalRings"], expectedNumHexagonalRings)
self.assertEqual(len(labelsData.regionLabels), expectedNumRegions)
def test_writeLabelsAscii(self):
with TemporaryDirectoryChanger():
labelsData = labels.readBinary(LABELS_FILE_BIN)
labels.writeAscii(labelsData, self._testMethodName + "labels.ascii")
with open(self._testMethodName + "labels.ascii", "r") as f:
actualData = f.read().splitlines()
with open(LABELS_FILE_ASCII) as f:
expectedData = f.read().splitlines()
for expected, actual in zip(expectedData, actualData):
self.assertEqual(expected, actual)
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_nhflux.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test reading/writing of NHFLUX dataset."""
import os
import unittest
import numpy as np
from armi.nuclearDataIO.cccc import nhflux
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
SIMPLE_HEXZ_INP = os.path.join(THIS_DIR, "../../tests", "simple_hexz.inp")
SIMPLE_HEXZ_NHFLUX = os.path.join(THIS_DIR, "fixtures", "simple_hexz.nhflux")
SIMPLE_HEXZ_NHFLUX_VARIANT = os.path.join(THIS_DIR, "fixtures", "simple_hexz.nhflux.variant")
class TestNhflux(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Load NHFLUX data from binary file."""
cls.nhf = nhflux.NhfluxStream.readBinary(SIMPLE_HEXZ_NHFLUX)
def test_fc(self):
"""Verify the file control info."""
self.assertEqual(self.nhf.metadata["ndim"], 3)
self.assertEqual(self.nhf.metadata["ngroup"], 4)
self.assertEqual(self.nhf.metadata["ninti"], 5)
self.assertEqual(self.nhf.metadata["nintj"], 5)
self.assertEqual(self.nhf.metadata["nintk"], 6)
self.assertEqual(self.nhf.metadata["nSurf"], 6)
self.assertEqual(self.nhf.metadata["nMom"], 5)
self.assertEqual(self.nhf.metadata["nintxy"], 19)
self.assertEqual(self.nhf.metadata["npcxy"], 144)
self.assertEqual(self.nhf.metadata["iaprx"], 4)
self.assertEqual(self.nhf.metadata["iaprxz"], 3)
variantControlInfo = nhflux.FILE_SPEC_1D_KEYS_VARIANT11
for info in variantControlInfo:
self.assertTrue(info not in self.nhf.metadata)
def test_fluxMoments(self):
"""
Verify that the flux moments are properly read.
The 5 flux moments values are manually verified for two nodes. The indices
are converted to zero based from the original by subtracting one.
"""
# node 1 (ring=1, position=1), axial=3, group=2
i = 0 # first one in node map (ring=1, position=1)
# 13 = 2*5 + 2 + 1 => (i=2, j=2)
self.assertEqual(self.nhf.geodstCoordMap[i], 13)
iz, ig = 2, 1 # zero based
self.assertTrue(
np.allclose(
self.nhf.fluxMoments[i, iz, :, ig],
[1.424926e08, -2.018375e-01, 2.018375e-01, -2.018374e-01, 1.758205e06],
)
)
# node 8 (ring=3, position=2), axial=6, group=1
i = 7 # ring=3, position=2
self.assertEqual(self.nhf.geodstCoordMap[i], 20) # 20 = 3*5 + 4 + 1 => (i=4, j=3)
iz, ig = 5, 0 # zero based
self.assertTrue(
np.allclose(
self.nhf.fluxMoments[i, iz, :, ig],
[7.277324e06, -1.453915e06, -1.453915e06, 2.362100e-02, -8.626439e05],
)
)
def test_xyPartialCurrents(self):
"""
Verify that the XY-directed partial currents can be read.
The surface partial currents can be used to reconstruct the surface
flux and corner flux values. This test shows that the outgoing current
in one hex is identical to the incoming current in the adjacent hex.
"""
# node 2 (ring=3, position=1), axial=4, group=2, surface=4, outgoing
iNode, iSurf, iz, ig = 1, 3, 3, 1 # zero based
self.assertEqual(self.nhf.geodstCoordMap[iNode], 15)
self.assertAlmostEqual(self.nhf.partialCurrentsHex[iNode, iz, iSurf, ig] / 1.5570424e07, 1.0)
# node 14 (ring=2, position=1), axial=4, group=2, surface=1, incoming
iNode, iSurf = 13, 0
ipcpnt = self.nhf.incomingPointersToAllAssemblies[iSurf, iNode]
iNode1, iSurf1 = divmod(ipcpnt - 1, self.nhf.metadata["nSurf"])
self.assertEqual(iNode1, 1) # node 2
self.assertEqual(iSurf1, 3) # surface 4
def test_zPartialCurrents(self):
"""
Verify that the Z-directed partial currents can be read.
The Z-directed partial currents are manually checked for one node
surface.
"""
# node 15 (ring=2, position=3), axial=3, group=3, j=1 (z-plus)
iNode, iz, ig, j = 14, 2, 2, 0
self.assertAlmostEqual(self.nhf.partialCurrentsZ[iNode, iz, j, ig] / 1.6928521e06, 1.0)
def test_write(self):
"""Verify binary equivalence of written binary file."""
with TemporaryDirectoryChanger():
nhflux.NhfluxStream.writeBinary(self.nhf, "NHFLUX2")
with open(SIMPLE_HEXZ_NHFLUX, "rb") as f1, open("NHFLUX2", "rb") as f2:
expectedData = f1.read()
actualData = f2.read()
for expected, actual in zip(expectedData, actualData):
self.assertEqual(expected, actual)
class TestNhfluxVariant(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Load NHFLUX data from binary file. This file was produced using VARIANT v11.0."""
cls.nhf = nhflux.NhfluxStreamVariant.readBinary(SIMPLE_HEXZ_NHFLUX_VARIANT)
def test_fc(self):
"""Verify the file control info."""
# These entries exist for both Nodal and VARIANT, but have different values
# for the same model
print(self.nhf.metadata.items())
self.assertEqual(self.nhf.metadata["nMom"], 35)
self.assertEqual(self.nhf.metadata["nscoef"], 3)
# These entries are only for VARIANT
self.assertEqual(self.nhf.metadata["npcbdy"], 30)
self.assertEqual(self.nhf.metadata["npcsym"], 0)
self.assertEqual(self.nhf.metadata["npcsec"], 0)
self.assertEqual(self.nhf.metadata["iwnhfl"], 0)
self.assertEqual(self.nhf.metadata["nMoms"], 0)
def test_fluxMoments(self):
# node 1 (ring=1, position=1), axial=3, group=2
i = 0
self.assertEqual(self.nhf.geodstCoordMap[i], 13)
iz, ig = 2, 1
fluxMoments = self.nhf.fluxMoments[i, iz, :, ig]
numZeroFluxMoments = fluxMoments[fluxMoments == 0.0].shape[0]
self.assertTrue(numZeroFluxMoments == 23)
actualNonzeroFluxMoments = fluxMoments[fluxMoments != 0.0]
expectedNonzeroFluxMoments = [
1.42816534e08,
-5.97642574e06,
-1.54354423e06,
-2.15736929e06,
-1.53415481e06,
5.54278533e04,
7.74699855e04,
2.38133712e04,
6.69907176e03,
5.49027950e03,
9.01170812e03,
1.05852790e04,
]
self.assertTrue(np.allclose(actualNonzeroFluxMoments, expectedNonzeroFluxMoments))
def test_write(self):
"""Verify binary equivalence of written binary file."""
with TemporaryDirectoryChanger():
nhflux.NhfluxStreamVariant.writeBinary(self.nhf, "NHFLUX2")
with open(SIMPLE_HEXZ_NHFLUX_VARIANT, "rb") as f1, open("NHFLUX2", "rb") as f2:
expectedData = f1.read()
actualData = f2.read()
for expected, actual in zip(expectedData, actualData):
self.assertEqual(expected, actual)
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_pmatrx.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the workings of the library wrappers."""
import filecmp
import unittest
from armi import nuclearDataIO
from armi.nuclearDataIO.cccc import pmatrx
from armi.nuclearDataIO.tests import test_xsLibraries
from armi.utils import properties
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestPmatrxNuclides(unittest.TestCase):
@classmethod
def setUpClass(cls):
# load a library that is in the ARMI tree. This should
# be a small library with LFPs, Actinides, structure, and coolant
cls.libAA = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)
cls.libAB = pmatrx.readBinary(test_xsLibraries.PMATRX_AB)
def _nuclideGeneralHelper(self, u235):
self.assertEqual(0, len(u235.pmatrxMetadata["activationXS"]))
self.assertEqual(0, len(u235.pmatrxMetadata["activationMT"]))
self.assertEqual(0, len(u235.pmatrxMetadata["activationMTU"]))
self.assertEqual(33, len(u235.neutronHeating))
self.assertEqual(33, len(u235.neutronDamage))
self.assertEqual(21, len(u235.gammaHeating))
# if there are more scattering orders, should add tests for them as well...
self.assertEqual(1, u235.pmatrxMetadata["maxScatteringOrder"])
self.assertEqual((21, 33), u235.isotropicProduction.shape)
def test_pmatrxNuclideDataAA(self):
self._nuclideGeneralHelper(self.libAA["U235AA"])
def test_pmatrxNuclideDataAB(self):
self._nuclideGeneralHelper(self.libAB["U235AB"])
def test_nuclideDataIsDifferent(self):
aa = self.libAA["U235AA"]
ab = self.libAB["U235AB"]
self.assertFalse((aa.isotropicProduction == ab.isotropicProduction).all())
def test_getPMATRXFileName(self):
self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=0), "cycle0.pmatrx")
self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=1), "cycle1.pmatrx")
self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=23), "cycle23.pmatrx")
self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(xsID="AA"), "AA.pmatrx")
self.assertEqual(
nuclearDataIO.getExpectedPMATRXFileName(xsID="AA", suffix="test"),
"AA-test.pmatrx",
)
self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(), "PMATRX")
with self.assertRaises(ValueError):
# Error when over specified
nuclearDataIO.getExpectedPMATRXFileName(cycle=10, xsID="AA")
class TestPmatrx(unittest.TestCase):
"""Tests the Pmatrx gamma production matrix."""
@classmethod
def setUpClass(cls):
# load a library that is in the ARMI tree. This should
# be a small library with LFPs, Actinides, structure, and coolant
cls.lib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
def test_pmatrxGammaEnergies(self):
energies = [
20000000.0,
10000000.0,
8000000.0,
7000000.0,
6000000.0,
5000000.0,
4000000.0,
3000000.0,
2500000.0,
2000000.0,
1500000.0,
1000000.0,
700000.0,
450000.0,
300000.0,
150000.0,
100000.0,
74999.8984375,
45000.0,
30000.0,
20000.0,
]
self.assertTrue((energies == self.lib.gammaEnergyUpperBounds).all())
def test_pmatrxNeutronEnergies(self):
energies = [
14190675.0,
10000000.0,
6065306.5,
3678794.75,
2231302.0,
1353353.125,
820850.0,
497870.625,
301973.75,
183156.34375,
111089.875,
67379.390625,
40867.66796875,
24787.498046875,
15034.3779296875,
9118.810546875,
5530.8388671875,
3354.624267578125,
2034.6827392578125,
1234.097412109375,
748.5178833007812,
453.9991149902344,
275.36444091796875,
167.01695251464844,
101.30089569091797,
61.44210433959961,
37.26651382446289,
22.6032772064209,
13.709582328796387,
8.31528091430664,
3.9278604984283447,
0.5315780639648438,
0.41745778918266296,
]
self.assertTrue((energies == self.lib.neutronEnergyUpperBounds).all())
def test_pmatrxNuclideNames(self):
names = [
"U235AA",
"U238AA",
"PU39AA",
"FE54AA",
"FE56AA",
"FE57AA",
"FE58AA",
"NA23AA",
"ZR90AA",
"ZR91AA",
"ZR92AA",
"ZR93AA",
"ZR94AA",
"ZR95AA",
"ZR96AA",
"XE28AA",
"XE29AA",
"XE30AA",
"XE31AA",
"XE32AA",
"XE33AA",
"XE34AA",
"XE35AA",
"XE36AA",
"FP40AA",
]
self.assertEqual(names, self.lib.nuclideLabels)
def test_pmatrxDoesntHaveDoseConversionFactors(self):
with self.assertRaises(properties.ImmutablePropertyError):
_bacon = self.lib.neutronDoseConversionFactors
with self.assertRaises(properties.ImmutablePropertyError):
_turkey = self.lib.gammaDoseConversionFactors
# bravo!
class TestProdMatrix(TestPmatrx):
"""
Tests related to reading a PMATRX that was written by ARMI.
Note that this runs all the tests from TestPmatrx.
"""
def test_writtenIsIdenticalToOriginal(self):
"""Make sure our writer produces something identical to the original.
.. test:: Test reading and writing PMATRIX files.
:id: T_ARMI_NUCDATA_PMATRX
:tests: R_ARMI_NUCDATA_PMATRX
"""
origLib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)
fname = self._testMethodName + "temp-aa.pmatrx"
pmatrx.writeBinary(origLib, fname)
_lib = pmatrx.readBinary(fname)
self.assertTrue(filecmp.cmp(test_xsLibraries.PMATRX_AA, fname))
class TestProdMatrixFromAscii(TestPmatrx):
"""
Tests that show you can read and write pmatrx files from ascii libraries.
Notes
-----
This runs all the tests from TestPmatrx.
"""
@classmethod
def setUpClass(cls):
cls.origLib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
self.fname = self._testMethodName + "temp-aa.pmatrx.ascii"
lib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)
pmatrx.writeAscii(lib, self.fname)
self.lib = pmatrx.readAscii(self.fname)
def tearDown(self):
self.td.__exit__(None, None, None)
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_pwdint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test PWDINT reading and writing."""
import os
import unittest
from armi.nuclearDataIO.cccc import pwdint
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
SIMPLE_PWDINT = os.path.join(THIS_DIR, "fixtures", "simple_cartesian.pwdint")
class TestGeodst(unittest.TestCase):
r"""
Tests the PWDINT class.
This reads from a PWDINT file that was created using DIF3D 11 on a small
test hex reactor in 1/3 geometry.
"""
def test_readGeodst(self):
"""Ensure we can read a PWDINT file."""
pwr = pwdint.readBinary(SIMPLE_PWDINT)
self.assertGreater(pwr.powerDensity.min(), 0.0)
def test_writeGeodst(self):
"""Ensure that we can write a modified PWDINT."""
with TemporaryDirectoryChanger():
pwr = pwdint.readBinary(SIMPLE_PWDINT)
pwdint.writeBinary(pwr, "PWDINT2")
pwr2 = pwdint.readBinary("PWDINT2")
self.assertTrue((pwr2.powerDensity == pwr.powerDensity).all())
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_rtflux.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test rtflux reading and writing."""
import os
import unittest
from armi.nuclearDataIO.cccc import rtflux
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
# This rtflux was made by DIF3D 11 in a Cartesian test case.
SIMPLE_RTFLUX = os.path.join(THIS_DIR, "fixtures", "simple_cartesian.rtflux")
class Testrtflux(unittest.TestCase):
r"""Tests the rtflux class."""
def test_readrtflux(self):
"""Ensure we can read a rtflux file."""
flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX)
self.assertEqual(
flux.groupFluxes.shape,
(
flux.metadata["NINTI"],
flux.metadata["NINTJ"],
flux.metadata["NINTK"],
flux.metadata["NGROUP"],
),
)
def test_writertflux(self):
"""Ensure that we can write a modified rtflux file."""
with TemporaryDirectoryChanger():
flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX)
# perturb off-diag item to check row/col ordering
flux.groupFluxes[2, 1, 3, 5] *= 1.1
flux.groupFluxes[1, 2, 4, 6] *= 1.2
rtflux.RtfluxStream.writeBinary(flux, "rtflux2")
flux2 = rtflux.RtfluxStream.readBinary("rtflux2")
self.assertAlmostEqual(flux2.groupFluxes[2, 1, 3, 5], flux.groupFluxes[2, 1, 3, 5])
def test_rwAscii(self):
"""Ensure that we can read/write in ascii format."""
with TemporaryDirectoryChanger():
flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX)
rtflux.RtfluxStream.writeAscii(flux, "rtflux.ascii")
flux2 = rtflux.RtfluxStream.readAscii("rtflux.ascii")
self.assertTrue((flux2.groupFluxes == flux.groupFluxes).all())
def test_adjoint(self):
"""Ensure adjoint reads energy groups differently."""
real = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX)
adjoint = rtflux.AtfluxStream.readBinary(SIMPLE_RTFLUX)
self.assertFalse((real.groupFluxes == adjoint.groupFluxes).all())
g = 3
self.assertTrue(
(real.groupFluxes[:, :, :, g] == adjoint.groupFluxes[:, :, :, real.metadata["NGROUP"] - g - 1]).all()
)
================================================
FILE: armi/nuclearDataIO/cccc/tests/test_rzflux.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test rzflux reading and writing."""
import os
import unittest
from armi.nuclearDataIO.cccc import rzflux
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(__file__)
# This RZFLUX was made by DIF3D 11 in a Cartesian test case.
SIMPLE_RZFLUX = os.path.join(THIS_DIR, "fixtures", "simple_cartesian.rzflux")
class TestRzflux(unittest.TestCase):
"""Tests the rzflux class."""
def test_readRzflux(self):
"""Ensure we can read a RZFLUX file."""
flux = rzflux.readBinary(SIMPLE_RZFLUX)
self.assertEqual(flux.groupFluxes.shape, (flux.metadata["NGROUP"], flux.metadata["NZONE"]))
def test_writeRzflux(self):
"""Ensure that we can write a modified RZFLUX file."""
with TemporaryDirectoryChanger():
flux = rzflux.readBinary(SIMPLE_RZFLUX)
rzflux.writeBinary(flux, "RZFLUX2")
self.assertTrue(binaryFilesEqual(SIMPLE_RZFLUX, "RZFLUX2"))
# perturb off-diag item to check row/col ordering
flux.groupFluxes[2, 10] *= 1.1
flux.groupFluxes[12, 1] *= 1.2
rzflux.writeBinary(flux, "RZFLUX3")
flux2 = rzflux.readBinary("RZFLUX3")
self.assertAlmostEqual(flux2.groupFluxes[12, 1], flux.groupFluxes[12, 1])
def test_rwAscii(self):
"""Ensure that we can read/write in ascii format."""
with TemporaryDirectoryChanger():
flux = rzflux.readBinary(SIMPLE_RZFLUX)
rzflux.writeAscii(flux, "RZFLUX.ascii")
flux2 = rzflux.readAscii("RZFLUX.ascii")
self.assertTrue((flux2.groupFluxes == flux.groupFluxes).all())
def binaryFilesEqual(fn1, fn2):
"""True if two files are bytewise identical."""
with open(fn1, "rb") as f1, open(fn2, "rb") as f2:
for byte1, byte2 in zip(f1, f2):
if byte1 != byte2:
return False
return True
================================================
FILE: armi/nuclearDataIO/nuclearFileMetadata.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Assists in reconstruction/rewriting nuclear data files.
One might
refer to the information stored in these files as the scaffolding or blueprints.
Some of it can/could be derived based on data within the overall file; however, not all of it could be
and it is always necessary to retain this type of data while reading the file.
"""
from armi import runLog
from armi.utils import properties
COMPXS_POWER_CONVERSION_FACTORS = ["fissionWattSeconds", "captureWattSeconds"]
REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF = [
"powerConvMult",
"d1Multiplier",
"d1Additive",
"d1Multiplier",
"d2Additive",
"d3Multiplier",
"d3Additive",
]
class _Metadata:
"""Simple dictionary wrapper, that returns :code:`None` if the key does not exist.
Notes
-----
Cannot use a dictionary directly because it is difficult to subclass and broadcast them with MPI.
"""
def __init__(self):
self._data = {}
def __getitem__(self, key):
return self._data.get(key, None)
def __setitem__(self, key, value):
self._data[key] = value
def __iter__(self):
return iter(self._data)
def items(self):
"""Returns items similar to the dict implementation."""
return self._data.items()
def __len__(self):
return len(self._data)
def keys(self):
"""Returns keys similar to the dict implementation."""
return self._data.keys()
def values(self):
return self._data.values()
def update(self, other):
"""Updates the underlying dictionary, similar to the dict implementation."""
self._data.update(other._data)
def merge(self, other, selfContainer, otherContainer, fileType, exceptionClass):
"""
Merge the contents of two metadata instances.
Parameters
----------
other: Similar Metadata class as self
Metadata to be compared against
selfContainer: class
otherContainer: class
Objects that hold the two metadata instances
fileType: str
File type that created this metadata. Examples: ``'ISOTXS', 'GAMISO', 'COMPXS'```
exceptionClass: Exception
Type of exception to raise in the event of dissimilar metadata values
Returns
-------
mergedData: Metadata
Returns a metadata instance of similar type as ``self`` and ``other``
containing the correctly merged data of the two
"""
mergedData = self.__class__()
if not (any(self.keys()) and any(other.keys())):
mergedData.update(self)
mergedData.update(other)
return mergedData
self._mergeLibrarySpecificData(other, selfContainer, otherContainer, mergedData)
skippedKeys = self._getSkippedKeys(other, selfContainer, otherContainer, mergedData)
for key in set(list(self.keys()) + list(other.keys())) - skippedKeys:
selfVal = self[key]
otherVal = other[key]
mergedVal = None
if not properties.numpyHackForEqual(selfVal, otherVal):
exceptionMsg = (
"{libType} {key} metadata differs between {lib1} and {lib2}; Cannot Merge\n"
"{key} has values of {val1} and {val2}"
)
raise exceptionClass(
exceptionMsg.format(
libType=fileType,
lib1=selfContainer,
lib2=otherContainer,
key=key,
val1=selfVal,
val2=otherVal,
)
)
else:
mergedVal = selfVal
mergedData[key] = mergedVal
return mergedData
def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData):
return set()
def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData):
pass
def compare(self, other, selfContainer, otherContainer, tolerance=0.0):
"""
Compare the metadata for two libraries.
Parameters
----------
other: Similar Metadata class as self
Metadata to be compared against
selfContainer: class
otherContainer: class
Objects that hold the two metadata instances
tolerance: float
Acceptable difference between two metadata values
Returns
-------
equal: bool
If the metadata are equal or not.
"""
equal = True
for propName in set(list(self.keys()) + list(other.keys())):
selfVal = self[propName]
otherVal = other[propName]
if not properties.areEqual(selfVal, otherVal, tolerance):
runLog.important(
"{} and {} {} have different {}:\n{}\n{}".format(
selfContainer,
otherContainer,
self.__class__.__name__,
propName,
selfVal,
otherVal,
)
)
equal = False
return equal
class FileMetadata(_Metadata):
"""
Metadata description for a file.
Attributes
----------
fileNames : list
string list of file names
"""
def __init__(self):
_Metadata.__init__(self)
self.fileNames = []
def update(self, other):
"""Update this metadata with metadata from another file."""
_Metadata.update(self, other)
self.fileNames += other.fileNames
def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData):
mergedData.fileNames = self.fileNames + other.fileNames
class NuclideXSMetadata(FileMetadata):
"""Metadata for library files containing nuclide cross sections, e.g. ``ISOTXS``."""
def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData):
skippedKeys = set(["chi", "libraryLabel"])
if self["chi"] is not None or other["chi"] is not None:
runLog.warning(
"File-wide chi is removed merging libraries {lib1} and {lib2}.\n"
"This should not impact the calculation, as the file-wide chi is used as"
" the nuclide-specific chi.\n The nuclides in {lib2} may be modified as well.".format(
lib1=selfContainer, lib2=otherContainer
)
)
mergedData["fileWideChiFlag"] = 0
skippedKeys.add("fileWideChiFlag")
mergedData["chi"] = None
for nuc in [nn for nn in selfContainer.nuclides + otherContainer.nuclides]:
if nuc.isotxsMetadata["fisFlag"] > 0:
nuc.isotxsMetadata["chiFlag"] = 1
return skippedKeys
def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData):
FileMetadata._mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData)
mergedData["libraryLabel"] = self["libraryLabel"] or other["libraryLabel"]
class RegionXSMetadata(FileMetadata):
"""Metadata for library files containing region cross sections, e.g. ``COMPXS``."""
def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData):
FileMetadata._mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData)
for datum in COMPXS_POWER_CONVERSION_FACTORS:
mergedData[datum] = self[datum] + other[datum]
mergedData["compFamiliesWithPrecursors"] = (
self["compFamiliesWithPrecursors"] + other["compFamiliesWithPrecursors"]
)
mergedData["numFissComps"] = self["numFissComps"] + other["numFissComps"]
def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData):
return set(["numComps", "compFamiliesWithPrecursors", "numFissComps"] + COMPXS_POWER_CONVERSION_FACTORS)
class NuclideMetadata(_Metadata):
"""Simple dictionary for providing metadata about how to read/write a nuclide to/from a file."""
================================================
FILE: armi/nuclearDataIO/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/nuclearDataIO/tests/library-file-generation/combine-AA-AB.inp
================================================
$control
c_isotxs_conversion = bin2asc
/
$material
t_composition(:,1) = U235_7 "U235AA" 1.00000E-03 873.000 ! Fuel
U238_7 "U238AA" 1.00000E-03 873.000 ! Fuel
PU2397 "PU39AA" 1.00000E-04 873.000 ! Fuel
FE54_7 "FE54AA" 1.00000E-03 743.000 ! Structure
FE56_7 "FE56AA" 1.00000E-02 743.000 ! Structure
FE57_7 "FE57AA" 1.00000E-04 743.000 ! Structure
FE58_7 "FE58AA" 1.00000E-05 743.000 ! Structure
NA23_7 "NA23AA" 1.00000E-03 738.000 ! Coolant
ZR90_7 "ZR90AA" 1.00000E-03 873.000 ! Composite fission product
ZR91_7 "ZR91AA" 1.00000E-04 873.000 ! Composite fission product
ZR92_7 "ZR92AA" 1.00000E-04 873.000 ! Composite fission product
ZR93_7 "ZR93AA" 1.00000E-15 873.000 ! Fission product
ZR94_7 "ZR94AA" 1.00000E-04 873.000 ! Composite fission product
ZR95_7 "ZR95AA" 1.00000E-15 873.000 ! Fission product
ZR96_7 "ZR96AA" 1.00000E-05 873.000 ! Composite fission product
XE1287 "XE28AA" 1.00000E-15 873.000 ! Fission product
XE1297 "XE29AA" 1.00000E-15 873.000 ! Fission product
XE1307 "XE30AA" 1.00000E-15 873.000 ! Fission product
XE1317 "XE31AA" 1.00000E-15 873.000 ! Fission product
XE1327 "XE32AA" 1.00000E-15 873.000 ! Fission product
XE1337 "XE33AA" 1.00000E-15 873.000 ! Fission product
XE1347 "XE34AA" 1.00000E-15 873.000 ! Fission product
XE1357 "XE35AA" 1.00000E-15 873.000 ! Fission product
XE1367 "XE36AA" 1.00000E-15 873.000 ! Fission product
FP40AA FP40AA 1.0 873.0
U235_7 "U235AB" 1.10000E-03 873.000 ! Fuel
U238_7 "U238AB" 1.10000E-03 873.000 ! Fuel
PU2397 "PU39AB" 1.10000E-04 873.000 ! Fuel
FE54_7 "FE54AB" 1.10000E-03 743.000 ! Structure
FE56_7 "FE56AB" 1.10000E-02 743.000 ! Structure
FE57_7 "FE57AB" 1.10000E-04 743.000 ! Structure
FE58_7 "FE58AB" 1.10000E-05 743.000 ! Structure
NA23_7 "NA23AB" 1.10000E-03 738.000 ! Coolant
ZR90_7 "ZR90AB" 1.10000E-03 873.000 ! Composite fission product
ZR91_7 "ZR91AB" 1.10000E-04 873.000 ! Composite fission product
ZR92_7 "ZR92AB" 1.10000E-04 873.000 ! Composite fission product
ZR93_7 "ZR93AB" 1.10000E-15 873.000 ! Fission product
ZR94_7 "ZR94AB" 1.10000E-04 873.000 ! Composite fission product
ZR95_7 "ZR95AB" 1.10000E-15 873.000 ! Fission product
ZR96_7 "ZR96AB" 1.10000E-05 873.000 ! Composite fission product
XE1287 "XE28AB" 1.10000E-15 873.000 ! Fission product
XE1297 "XE29AB" 1.10000E-15 873.000 ! Fission product
XE1307 "XE30AB" 1.10000E-15 873.000 ! Fission product
XE1317 "XE31AB" 1.10000E-15 873.000 ! Fission product
XE1327 "XE32AB" 1.10000E-15 873.000 ! Fission product
XE1337 "XE33AB" 1.10000E-15 873.000 ! Fission product
XE1347 "XE34AB" 1.10000E-15 873.000 ! Fission product
XE1357 "XE35AB" 1.10000E-15 873.000 ! Fission product
XE1367 "XE36AB" 1.10000E-15 873.000 ! Fission product
FP40AB FP40AB 1.0 873.0
/
$output
c_isotxs_file = "../mc2v3-AA.isotxs" "../mc2v3-AB.isotxs"
/
================================================
FILE: armi/nuclearDataIO/tests/library-file-generation/combine-and-lump-AA-AB.inp
================================================
$control
c_isotxs_conversion = bin2asc
/
$material
t_composition(:,1) = U235_7 "U235AA" 1.00000E-03 873.000 ! Fuel
U238_7 "U238AA" 1.00000E-03 873.000 ! Fuel
PU2397 "PU39AA" 1.00000E-04 873.000 ! Fuel
FE54_7 "FE54AA" 1.00000E-03 743.000 ! Structure
FE56_7 "FE56AA" 1.00000E-02 743.000 ! Structure
FE57_7 "FE57AA" 1.00000E-04 743.000 ! Structure
FE58_7 "FE58AA" 1.00000E-05 743.000 ! Structure
NA23_7 "NA23AA" 1.00000E-03 738.000 ! Coolant
ZR90_7 "ZR90AA" 1.00000E-03 873.000 ! Composite fission product
ZR91_7 "ZR91AA" 1.00000E-04 873.000 ! Composite fission product
ZR92_7 "ZR92AA" 1.00000E-04 873.000 ! Composite fission product
ZR94_7 "ZR94AA" 1.00000E-04 873.000 ! Composite fission product
ZR96_7 "ZR96AA" 1.00000E-05 873.000 ! Composite fission product
FP40AA FP40AA 11.0 873.0
U235_7 "U235AB" 1.10000E-03 873.000 ! Fuel
U238_7 "U238AB" 1.10000E-03 873.000 ! Fuel
PU2397 "PU39AB" 1.10000E-04 873.000 ! Fuel
FE54_7 "FE54AB" 1.10000E-03 743.000 ! Structure
FE56_7 "FE56AB" 1.10000E-02 743.000 ! Structure
FE57_7 "FE57AB" 1.10000E-04 743.000 ! Structure
FE58_7 "FE58AB" 1.10000E-05 743.000 ! Structure
NA23_7 "NA23AB" 1.10000E-03 738.000 ! Coolant
ZR90_7 "ZR90AB" 1.10000E-03 873.000 ! Composite fission product
ZR91_7 "ZR91AB" 1.10000E-04 873.000 ! Composite fission product
ZR92_7 "ZR92AB" 1.10000E-04 873.000 ! Composite fission product
ZR94_7 "ZR94AB" 1.10000E-04 873.000 ! Composite fission product
ZR96_7 "ZR96AB" 1.10000E-05 873.000 ! Composite fission product
FP40AB FP40AB 1.0 873.0
/
$output
c_isotxs_file = "../mc2v3-AA.isotxs" "../mc2v3-AB.isotxs"
c_lump_name( 1) = FP35AA
t_lump_isotope(:, 1) = ZR90_7 1.00000E-03
ZR91_7 1.00000E-02
ZR92_7 1.00000E-02
ZR94_7 1.00000E-02
ZR96_7 1.00000E-02
XE1287 1.00000E-05
XE1297 1.00000E-07
XE1307 1.00000E-04
XE1317 1.00000E-02
XE1327 1.00000E-02
XE1347 1.00000E-02
XE1367 1.00000E-02
ZR93_7 1.00000E-02
ZR95_7 1.00000E-03
XE1357 1.00000E-05
XE1337 1.00000E-04
c_lump_name( 2) = FP38AA
t_lump_isotope(:, 2) = ZR90_7 1.00000E-03
ZR91_7 1.00000E-02
ZR92_7 1.00000E-02
ZR94_7 1.00000E-02
ZR96_7 1.00000E-02
XE1287 1.00000E-05
XE1297 1.00000E-07
XE1307 1.00000E-05
XE1317 1.00000E-02
XE1327 1.00000E-02
XE1347 1.00000E-02
XE1367 1.00000E-02
ZR93_7 1.00000E-02
ZR95_7 1.00000E-03
XE1357 1.00000E-05
XE1337 1.00000E-04
c_lump_name( 3) = FP39AA
t_lump_isotope(:, 3) = ZR90_7 1.00000E-04
ZR91_7 1.00000E-02
ZR92_7 1.00000E-02
ZR94_7 1.00000E-02
ZR96_7 1.00000E-02
XE1287 1.00000E-04
XE1297 1.00000E-07
XE1307 1.00000E-04
XE1317 1.00000E-02
XE1327 1.00000E-02
XE1347 1.00000E-02
XE1367 1.00000E-02
ZR93_7 1.00000E-02
ZR95_7 1.00000E-03
XE1357 1.00000E-05
XE1337 1.00000E-04
c_lump_name( 4) = FP40AA
t_lump_isotope(:, 4) = ZR90_7 1.00000E-04
ZR91_7 1.00000E-02
ZR92_7 1.00000E-02
ZR94_7 1.00000E-02
ZR96_7 1.00000E-02
XE1287 1.00000E-05
XE1297 1.00000E-07
XE1307 1.00000E-04
XE1317 1.00000E-02
XE1327 1.00000E-02
XE1347 1.00000E-02
XE1367 1.00000E-02
ZR93_7 1.00000E-02
ZR95_7 1.00000E-03
XE1357 1.00000E-05
XE1337 1.00000E-04
c_lump_name( 5) = FP41AA
t_lump_isotope(:, 5) = ZR90_7 1.00000E-04
ZR91_7 1.00000E-02
ZR92_7 1.00000E-02
ZR94_7 1.00000E-02
ZR96_7 1.00000E-02
XE1287 1.00000E-05
XE1297 1.00000E-07
XE1307 1.00000E-04
XE1317 1.00000E-02
XE1327 1.00000E-02
XE1347 1.00000E-02
XE1367 1.00000E-02
ZR93_7 1.00000E-02
ZR95_7 1.00000E-03
XE1357 1.00000E-05
XE1337 1.00000E-04
/
================================================
FILE: armi/nuclearDataIO/tests/library-file-generation/mc2v3-AA.inp
================================================
$control
c_group_structure = ANL33
i_number_region = 1
l_external_inelasticpn = F
c_geometry_type = mixture
l_buckling_search = T
r_eps_buckling = 0.00001
l_gamma = T
/
$library
c_mcclibdir ="\\path\to\mc2\3.2.2\libraries\endfb-vii.0\lib.mcc.e70"
c_gammalibdir = "\\path\to\mc2\3.2.2\libraries\endfb-vii.0\lib.gamma.e70"
/
$material
t_composition(:,1) = U235_7 "U235AA" 1.00000E-03 873.000 ! Fuel
U238_7 "U238AA" 1.00000E-03 873.000 ! Fuel
PU2397 "PU39AA" 1.00000E-04 873.000 ! Fuel
FE54_7 "FE54AA" 1.00000E-03 743.000 ! Structure
FE56_7 "FE56AA" 1.00000E-02 743.000 ! Structure
FE57_7 "FE57AA" 1.00000E-04 743.000 ! Structure
FE58_7 "FE58AA" 1.00000E-05 743.000 ! Structure
NA23_7 "NA23AA" 1.00000E-03 738.000 ! Coolant
ZR90_7 "ZR90AA" 1.00000E-03 873.000 ! Composite fission product
ZR91_7 "ZR91AA" 1.00000E-04 873.000 ! Composite fission product
ZR92_7 "ZR92AA" 1.00000E-04 873.000 ! Composite fission product
ZR93_7 "ZR93AA" 1.00000E-15 873.000 ! Fission product
ZR94_7 "ZR94AA" 1.00000E-04 873.000 ! Composite fission product
ZR95_7 "ZR95AA" 1.00000E-15 873.000 ! Fission product
ZR96_7 "ZR96AA" 1.00000E-05 873.000 ! Composite fission product
XE1287 "XE28AA" 1.00000E-15 873.000 ! Fission product
XE1297 "XE29AA" 1.00000E-15 873.000 ! Fission product
XE1307 "XE30AA" 1.00000E-15 873.000 ! Fission product
XE1317 "XE31AA" 1.00000E-15 873.000 ! Fission product
XE1327 "XE32AA" 1.00000E-15 873.000 ! Fission product
XE1337 "XE33AA" 1.00000E-15 873.000 ! Fission product
XE1347 "XE34AA" 1.00000E-15 873.000 ! Fission product
XE1357 "XE35AA" 1.00000E-15 873.000 ! Fission product
XE1367 "XE36AA" 1.00000E-15 873.000 ! Fission product
/
$output
l_edit_flux = T
c_check_memory = "long"
c_lump_name( 1) = FP40AA
t_lump_isotope(:, 1) = ZR90_7 0.090
ZR91_7 0.091
ZR92_7 0.092
ZR93_7 0.093
ZR94_7 0.094
ZR95_7 0.095
ZR96_7 0.096
XE1287 0.128
XE1297 0.129
XE1307 0.130
XE1317 0.131
XE1327 0.132
XE1337 0.133
XE1347 0.134
XE1357 0.135
XE1367 0.136
/
================================================
FILE: armi/nuclearDataIO/tests/library-file-generation/mc2v3-AB.inp
================================================
$control
c_group_structure = ANL33
i_number_region = 1
l_external_inelasticpn = F
c_geometry_type = mixture
l_buckling_search = T
r_eps_buckling = 0.00001
l_gamma = T
/
$library
c_mcclibdir ="\\path\to\mc2\3.2.2\libraries\endfb-vii.0\lib.mcc.e70"
c_gammalibdir = "\\path\to\mc2\3.2.2\libraries\endfb-vii.0\lib.gamma.e70"
/
$material
t_composition(:,1) = U235_7 "U235AB" 1.10000E-03 873.000 ! Fuel
U238_7 "U238AB" 1.10000E-03 873.000 ! Fuel
PU2397 "PU39AB" 1.10000E-04 873.000 ! Fuel
FE54_7 "FE54AB" 1.10000E-03 743.000 ! Structure
FE56_7 "FE56AB" 1.10000E-02 743.000 ! Structure
FE57_7 "FE57AB" 1.10000E-04 743.000 ! Structure
FE58_7 "FE58AB" 1.10000E-05 743.000 ! Structure
NA23_7 "NA23AB" 1.10000E-03 738.000 ! Coolant
ZR90_7 "ZR90AB" 1.10000E-03 873.000 ! Composite fission product
ZR91_7 "ZR91AB" 1.10000E-04 873.000 ! Composite fission product
ZR92_7 "ZR92AB" 1.10000E-04 873.000 ! Composite fission product
ZR93_7 "ZR93AB" 1.10000E-15 873.000 ! Fission product
ZR94_7 "ZR94AB" 1.10000E-04 873.000 ! Composite fission product
ZR95_7 "ZR95AB" 1.10000E-15 873.000 ! Fission product
ZR96_7 "ZR96AB" 1.10000E-05 873.000 ! Composite fission product
XE1287 "XE28AB" 1.10000E-15 873.000 ! Fission product
XE1297 "XE29AB" 1.10000E-15 873.000 ! Fission product
XE1307 "XE30AB" 1.10000E-15 873.000 ! Fission product
XE1317 "XE31AB" 1.10000E-15 873.000 ! Fission product
XE1327 "XE32AB" 1.10000E-15 873.000 ! Fission product
XE1337 "XE33AB" 1.10000E-15 873.000 ! Fission product
XE1347 "XE34AB" 1.10000E-15 873.000 ! Fission product
XE1357 "XE35AB" 1.10000E-15 873.000 ! Fission product
XE1367 "XE36AB" 1.10000E-15 873.000 ! Fission product
/
$output
c_check_memory = "long"
c_lump_name( 1) = FP40AB
t_lump_isotope(:, 1) = ZR90_7 0.090
ZR91_7 0.091
ZR92_7 0.092
ZR93_7 0.093
ZR94_7 0.094
ZR95_7 0.095
ZR96_7 0.096
XE1287 0.128
XE1297 0.129
XE1307 0.130
XE1317 0.131
XE1327 0.132
XE1337 0.133
XE1347 0.134
XE1357 0.135
XE1367 0.136
/
================================================
FILE: armi/nuclearDataIO/tests/simple_hexz.inp
================================================
BLOCK=STP021,3
UNFORM=A.DIF3D
01 3D Hex-Z to generate NHFLUX file
02 10000 1800000
03 0 0
04 1 0 0 00 110 10 100 1
05 1.0E-7 1.0E-5 1.0E-5
06 1.0 0.001 0.04 1.0
UNFORM=A.NIP3
01 3D Hex-Z core
02 0 1
03 120 $ full core in plane
04 4 4 4 4 4 4
09 Z 1 12.0
09 Z 4 60.0
09 Z 1 72.0
14 M1 I1 1.0
14 M4 I4 1.0
15 M1 IC
15 M4 AB
29 12.0
30 AB 1 0 0 0.0 72.0
30 IC 1 0 0 12.0 60.0
30 AB 2 0 0 0.0 72.0
30 IC 2 0 0 12.0 60.0
30 AB 3 0 0 0.0 72.0
30 IC 3 0 0 12.0 60.0
NOSORT=A.ISO
0V ISOTXS *GFK 3D BNCH * 1
1D 4 6 0 3 0 1 1 1
2D *NA COOLED FBR BENCHMARK FOUR GROUP CROSS SECTIONS *
* * I1 I2 I3 I4 I5 I6
0.768 0.232 0.0 0.0
1.72336E+09 4.02463E+08 7.97003E+07 3.15946E+07 1.05 E+07 8.00 E+05
10000. 1000. 0.0
0 3 6 9 12 15
4D I1 GFK 1
100. 0.0 0.0 0.0 0.0 0.0
0 0 1 0 0 0 0 0 1 1 0 200
1 1 2 3 4 1 1 1 1
5D .11587 .21220 .46137 .34571 .11587
.21220 .46137 .34571 .69059 E-03 1.83076E-03 .92948 E-02
.17305 E-01 .39123 E-02 .18286 E-02 .36334 E-02 .92415 E-02 3.03607
2.91217 2.88187 2.87951
7D 0.0 0.0 .023597 0.0 .16153 E-02
.40791 E-05 0.0 .46838 E-02 .42309 E-07 .44493 E-07
4D I2 GFK 1
100. 0.0 0.0 0.0 0.0 0.0
0 0 1 0 0 0 0 0 1 1 0 200
1 1 2 3 4 1 1 1 1
5D .11588 .21213 .46770 .35349 .11588
.21213 .46770 .35349 .66221 E-031.83956 E-03 1.00354E-02
.20476 E-01 .48531 E-02 .26377 E-02 .51332 E-02 .13238 E-01 3.07906
2.91493 2.88495 2.88254
7D 0.0 0.0 .023262 0.0 .15718 E-02
.46451 E-05 0.0 .43414 E-02 .40724 E-07 .49968 E-07
4D I3 GFK 1
100. 0.0 0.0 0.0 0.0 0.0
0 0 1 0 0 0 0 0 1 1 0 200
1 1 2 3 4 1 1 1 1
5D .14584 .28443 .52703 .40732 .14584
.28443 .52703 .40732 1.11527E-03 3.06346E-03 1.00212E-02
.129995E-01 .27688 E-02 .44347 E-04 .12274 E-03 .34952 E-03 2.796410
2.44098 2.42317 2.42295
7D 0.0 0.0 .032071 0.0 .27776 E-02
.38880 E-05 0.0 .58971 E-02 .90018 E-07 .45039 E-07
4D I4 GFK 1
100. 0.0 0.0 0.0 0.0 0.0
0 0 1 0 0 0 0 0 1 1 0 200
1 1 2 3 4 1 1 1 1
5D .12270 .23133 .46274 .33749 .12270
.23133 .46274 .33749 8.2278 E-04 2.17087E-03 7.64083E-03
.97185 E-02 .19453 E-02 .31065 E-04 .87566 E-04 .23769 E-03 2.79026
2.441880 2.42309 2.42299
7D 0.0 0.0 .026322 0.0 .22889 E-02
.28907 E-05 0.0 .53536 E-02 .62133 E-07 .33248 E-07
4D I5 GFK 1
100. 0.0 0.0 0.0 0.0 0.0
0 0 0 0 0 0 0 0 1 1 0 200
1 1 2 3 4 1 1 1 1
5D .13317 .25355 .58044 .54168 .13317
.25355 .58044 .54168 .186696E-02 .126433E-01 .634405E-01
.16868
7D 0.0 0.0 .022946 0.0 .37687 E-02
.10320 E-05 0.0 .86815 E-02 .70361 E-11 .10489 E-07
4D I6 GFK 1
100. 0.0 0.0 0.0 0.0 0.0
0 0 0 0 0 0 0 0 1 1 0 200
1 1 2 3 4 1 1 1 1
5D .072206 .11487 .32642 .19272 .072206
.11487 .32642 .19272 .216305E-03 .16880 E-03 .11468 E-02
.78660 E-03
7D 0.0 0.0 .012942 0.0 .12871 E-02
.68780 E-06 0.0 .34533 E-02 .43633 E-11 .69903 E-08
================================================
FILE: armi/nuclearDataIO/tests/test_xsCollections.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that tests methods within xsCollections."""
import os
import unittest
from armi import settings
from armi.nuclearDataIO import isotxs, xsCollections
from armi.reactor.blocks import HexBlock
from armi.tests import ISOAA_PATH
from armi.utils.directoryChangers import TemporaryDirectoryChanger
from armi.utils.plotting import plotNucXs
class TestXsCollections(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.microLib = isotxs.readBinary(ISOAA_PATH)
def setUp(self):
self.mc = xsCollections.MacroscopicCrossSectionCreator(minimumNuclideDensity=1e-13)
self.block = MockBlock()
self.block.setNumberDensity("U235", 0.02)
self.block.setNumberDensity("FE", 0.01)
def test_genTotScatteringMatrix(self):
"""Generates the total scattering matrix by summing elastic, inelastic, and n2n scattering matrices."""
nuc = self.microLib.nuclides[0]
totalScatter = nuc.micros.getTotalScatterMatrix()
self.assertAlmostEqual(
totalScatter[0, 0],
(nuc.micros.elasticScatter[0, 0] + nuc.micros.inelasticScatter[0, 0] + 2.0 * nuc.micros.n2nScatter[0, 0]),
)
def test_totalScatteringMatrixWithMissingData(self):
"""
Generates the total scattering matrix by summing elastic and n2n scattering matrices.
Notes
-----
This tests that the total scattering matrix can be produced when the inelastic scattering matrix is not defined.
"""
nuc = self.microLib.nuclides[0]
nuc.micros.inelasticScatter = None
totalScatter = nuc.micros.getTotalScatterMatrix()
self.assertAlmostEqual(
totalScatter[0, 0],
(nuc.micros.elasticScatter[0, 0] + 2.0 * nuc.micros.n2nScatter[0, 0]),
)
def test_plotNucXs(self):
"""Testing this plotting method here because we need a XS library to run the test."""
fName = "test_plotNucXs.png"
with TemporaryDirectoryChanger():
plotNucXs(self.microLib, "U235AA", "fission", fName=fName)
self.assertTrue(os.path.exists(fName))
def test_createMacrosFromMicros(self):
"""Test calculating macroscopic cross sections from microscopic cross sections.
.. test:: Compute macroscopic cross sections from microscopic cross sections and number densities.
:id: T_ARMI_NUCDATA_MACRO
:tests: R_ARMI_NUCDATA_MACRO
"""
self.assertEqual(self.mc.minimumNuclideDensity, 1e-13)
self.mc.createMacrosFromMicros(self.microLib, self.block)
totalMacroFissionXs = 0.0
totalMacroAbsXs = 0.0
for nuc, density in self.mc.densities.items():
nuclideXS = self.mc.microLibrary.getNuclide(nuc, "AA")
for microXs in nuclideXS.micros.fission:
totalMacroFissionXs += microXs * density
for microXsName in xsCollections.ABSORPTION_XS:
for microXs in getattr(nuclideXS.micros, microXsName):
totalMacroAbsXs += microXs * density
self.assertAlmostEqual(sum(self.mc.macros.fission), totalMacroFissionXs)
self.assertAlmostEqual(sum(self.mc.macros.absorption), totalMacroAbsXs)
def test_collapseCrossSection(self):
"""
Tests cross section collapsing.
Notes
-----
The expected 1 group cross section was generated by running the collapse cross section method. This tests
that this method has not been modified to produce a different result.
"""
expected1gXs = 2.35725262208
micros = self.microLib["U235AA"].micros
flux = list(reversed(range(33)))
self.assertAlmostEqual(micros.collapseCrossSection(micros.nGamma, flux), expected1gXs)
class MockReactor:
def __init__(self):
self.blueprints = MockBlueprints()
self.spatialGrid = None
class MockBlueprints:
# this is only needed for allNuclidesInProblem and attributes were acting funky, so this was made.
def __getattribute__(self, *args, **kwargs):
return ["U235", "U235", "FE", "NA23"]
class MockBlock(HexBlock):
def __init__(self, name=None, cs=None):
self.density = {}
HexBlock.__init__(self, name or "MockBlock", cs or settings.Settings())
self.r = MockReactor()
@property
def r(self):
return self._r
@r.setter
def r(self, r):
self._r = r
def getVolume(self, *args, **kwargs):
"""Return the volume of a block."""
return 1.0
def getNuclideNumberDensities(self, nucNames):
"""Return a list of number densities in atoms/barn-cm for the nuc names requested."""
return [self.density.get(nucName, 0.0) for nucName in nucNames]
def _getNdensHelper(self):
return {nucName: density for nucName, density in self.density.items()}
def setNumberDensity(self, key, val, *args, **kwargs):
"""Set the number density of this nuclide to this value."""
self.density[key] = val
def getNuclides(self):
"""Determine which nuclides are present in this armi block."""
return self.density.keys()
================================================
FILE: armi/nuclearDataIO/tests/test_xsLibraries.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xsLibraries.IsotxsLibrary."""
import copy
import filecmp
import os
import pickle
import traceback
import unittest
from time import sleep
import numpy as np
from armi.nucDirectory.nuclideBases import NuclideBases
from armi.nuclearDataIO import xsLibraries
from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx
from armi.tests import mockRunLogs
from armi.utils import properties
from armi.utils.directoryChangers import TemporaryDirectoryChanger
# test input pathing
THIS_DIR = os.path.dirname(__file__)
RUN_DIR = os.path.join(THIS_DIR, "library-file-generation")
FIXTURE_DIR = os.path.join(THIS_DIR, "fixtures")
# specific tests files
GAMISO_AA = os.path.join(FIXTURE_DIR, "AA.gamiso")
GAMISO_AA_AB = os.path.join(FIXTURE_DIR, "combined-AA-AB.gamiso")
GAMISO_AB = os.path.join(FIXTURE_DIR, "AB.gamiso")
GAMISO_LUMPED = os.path.join(FIXTURE_DIR, "combined-and-lumped-AA-AB.gamiso")
ISOTXS_AA = os.path.join(FIXTURE_DIR, "ISOAA")
ISOTXS_AA_AB = os.path.join(FIXTURE_DIR, "combined-AA-AB.isotxs")
ISOTXS_AB = os.path.join(FIXTURE_DIR, "ISOAB")
ISOTXS_LUMPED = os.path.join(FIXTURE_DIR, "combined-and-lumped-AA-AB.isotxs")
PMATRX_AA = os.path.join(FIXTURE_DIR, "AA.pmatrx")
PMATRX_AA_AB = os.path.join(FIXTURE_DIR, "combined-AA-AB.pmatrx")
PMATRX_AB = os.path.join(FIXTURE_DIR, "AB.pmatrx")
PMATRX_LUMPED = os.path.join(FIXTURE_DIR, "combined-and-lumped-AA-AB.pmatrx")
UFG_FLUX_EDIT = os.path.join(FIXTURE_DIR, "mc2v3-AA.flux_ufg")
# CCCC fixtures are less fancy than these merging ones.
FIXTURE_DIR_CCCC = os.path.join(os.path.dirname(isotxs.__file__), "tests", "fixtures")
DLAYXS_MCC3 = os.path.join(FIXTURE_DIR_CCCC, "mc2v3.dlayxs")
class TempFileMixin:
"""A helpful test tooling; creating temporary directories and nucdata test file path."""
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
@property
def testFileName(self):
return os.path.join(self.td.destination, f"{self.__class__.__name__}-{self._testMethodName}.nucdata")
class TestXSLibrary(TempFileMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.isotxsAA = isotxs.readBinary(ISOTXS_AA)
cls.gamisoAA = gamiso.readBinary(GAMISO_AA)
cls.pmatrxAA = pmatrx.readBinary(PMATRX_AA)
cls.xsLib = xsLibraries.IsotxsLibrary()
cls.xsLibGenerationErrorStack = None
try:
cls.xsLib.merge(copy.deepcopy(cls.isotxsAA))
cls.xsLib.merge(copy.deepcopy(cls.gamisoAA))
cls.xsLib.merge(copy.deepcopy(cls.pmatrxAA))
except Exception:
cls.xsLibGenerationErrorStack = traceback.format_exc()
def test_canPickleAndUnpickleISOTXS(self):
pikAA = pickle.loads(pickle.dumps(self.isotxsAA))
self.assertTrue(xsLibraries.compare(pikAA, self.isotxsAA))
def test_canPickleAndUnpickleGAMISO(self):
pikAA = pickle.loads(pickle.dumps(self.gamisoAA))
self.assertTrue(xsLibraries.compare(pikAA, self.gamisoAA))
def test_canPickleAndUnpicklePMATRX(self):
pikAA = pickle.loads(pickle.dumps(self.pmatrxAA))
self.assertTrue(xsLibraries.compare(pikAA, self.pmatrxAA))
def test_compareWorks(self):
self.assertTrue(xsLibraries.compare(self.isotxsAA, self.isotxsAA))
self.assertTrue(xsLibraries.compare(self.pmatrxAA, self.pmatrxAA))
aa = isotxs.readBinary(ISOTXS_AA)
del aa[aa.nuclideLabels[0]]
self.assertFalse(xsLibraries.compare(aa, self.isotxsAA))
def test_compareComponentsOfXSLibrary(self):
"""Compare different components of a XS library."""
self.assertTrue(xsLibraries.compare(self.isotxsAA, self.isotxsAA))
self.assertTrue(xsLibraries.compare(self.pmatrxAA, self.pmatrxAA))
aa = isotxs.readBinary(ISOTXS_AA)
del aa[aa.nuclideLabels[0]]
self.assertFalse(xsLibraries.compare(aa, self.isotxsAA))
def test_mergeFailsWithNonIsotxsFiles(self):
dummyFileName = "ISOSOMEFILE"
with open(dummyFileName, "w") as someFile:
someFile.write("hi")
try:
with mockRunLogs.BufferLog() as log:
lib = xsLibraries.IsotxsLibrary()
with self.assertRaises(OSError):
xsLibraries.mergeXSLibrariesInWorkingDirectory(lib, "ISOTXS", "")
self.assertIn(dummyFileName, log.getStdout())
finally:
os.remove(dummyFileName)
with TemporaryDirectoryChanger():
dummyFileName = "ISO[]"
with open(dummyFileName, "w") as file:
file.write(
"This is a file that starts with the letters 'ISO' but will break the regular expression search."
)
try:
with mockRunLogs.BufferLog() as log:
lib = xsLibraries.IsotxsLibrary()
xsLibraries.mergeXSLibrariesInWorkingDirectory(lib)
self.assertIn(f"{dummyFileName} in the merging of ISOXX files", log.getStdout())
finally:
pass
def _xsLibraryAttributeHelper(
self,
lib,
neutronEnergyLength,
neutronVelLength,
gammaEnergyLength,
neutronDoseLength,
gammaDoseLength,
):
for attrName, listLength in [
("neutronEnergyUpperBounds", neutronEnergyLength),
("neutronVelocity", neutronVelLength),
("gammaEnergyUpperBounds", gammaEnergyLength),
("neutronDoseConversionFactors", neutronDoseLength),
("gammaDoseConversionFactors", gammaDoseLength),
]:
if listLength > 0:
self.assertEqual(listLength, len(getattr(lib, attrName)))
else:
with self.assertRaises(properties.ImmutablePropertyError):
print(f"Getting the value {attrName}")
print(getattr(lib, attrName))
def test_isotxsLibraryAttributes(self):
self._xsLibraryAttributeHelper(
self.isotxsAA,
neutronEnergyLength=33,
neutronVelLength=33,
gammaEnergyLength=0,
neutronDoseLength=0,
gammaDoseLength=0,
)
def test_gamisoLibraryAttributes(self):
self._xsLibraryAttributeHelper(
self.gamisoAA,
neutronEnergyLength=0,
neutronVelLength=0,
gammaEnergyLength=21,
neutronDoseLength=0,
gammaDoseLength=0,
)
def test_pmatrxLibraryAttributes(self):
self._xsLibraryAttributeHelper(
self.pmatrxAA,
neutronEnergyLength=33,
neutronVelLength=0,
gammaEnergyLength=21,
neutronDoseLength=0,
gammaDoseLength=0,
)
def test_mergeXSLibrariesWithDifferentDataWorks(self):
if self.xsLibGenerationErrorStack is not None:
print(self.xsLibGenerationErrorStack)
raise Exception("see stdout for stack trace")
# check to make sure they labels overlap, or are actually the same
labels = set(self.xsLib.nuclideLabels)
self.assertEqual(labels, set(self.isotxsAA.nuclideLabels))
self.assertEqual(labels, set(self.gamisoAA.nuclideLabels))
self.assertEqual(labels, set(self.pmatrxAA.nuclideLabels))
# the whole thing is different from the sum of its components
self.assertFalse(xsLibraries.compare(self.xsLib, self.isotxsAA))
self.assertFalse(xsLibraries.compare(self.xsLib, self.gamisoAA))
self.assertFalse(xsLibraries.compare(self.xsLib, self.pmatrxAA))
# individual components are the same
self.assertTrue(isotxs.compare(self.xsLib, self.isotxsAA))
self.assertTrue(gamiso.compare(self.xsLib, self.gamisoAA))
self.assertTrue(pmatrx.compare(self.xsLib, self.pmatrxAA))
def test_canWriteIsotxsFromCombinedXSLibrary(self):
self._canWritefromCombined(isotxs, ISOTXS_AA)
def test_canWriteGamisoFromCombinedXSLibrary(self):
self._canWritefromCombined(gamiso, GAMISO_AA)
def test_canWritePmatrxFromCombinedXSLibrary(self):
self._canWritefromCombined(pmatrx, PMATRX_AA)
def _canWritefromCombined(self, writer, refFile):
if self.xsLibGenerationErrorStack is not None:
print(self.xsLibGenerationErrorStack)
raise Exception("See stdout for stack trace")
# check to make sure they labels overlap, or are actually the same
writer.writeBinary(self.xsLib, self.testFileName)
self.assertTrue(filecmp.cmp(refFile, self.testFileName))
class TestGetISOTXSFilesWorkDir(unittest.TestCase):
def test_getISOTXSFilesWithoutLibrarySuffix(self):
shouldBeThere = ["ISOAA", "ISOBA", os.path.join("file-path", "ISOCA")]
shouldNotBeThere = [
"ISOBA-n2",
"ISOTXS",
"ISOTXS-c2",
"dummyISOTXS",
"ISOTXS.BCD",
"ISOAA.BCD",
]
filesInDirectory = shouldBeThere + shouldNotBeThere
toMerge = xsLibraries.getISOTXSLibrariesToMerge("", filesInDirectory)
self.assert_contains_only(toMerge, shouldBeThere, shouldNotBeThere)
def test_getISOTXSFilesWithLibrarySuffix(self):
shouldBeThere = [
"ISOAA-n23",
"ISOAAF-n23",
"ISOBA-n23",
"ISODA",
os.path.join("file-path", "ISOCA-n23"),
]
shouldNotBeThere = [
"ISOAA",
"ISOAA-n24",
"ISOBA-ISO",
"ISOBA-n2",
"ISOBA",
"ISOTXS",
"ISOTXS-c2",
"dummyISOTXS",
"ISOTXS.BCD",
"ISOAA.BCD",
"ISOCA-doppler",
"ISOSA-void",
os.path.join("file-path", "ISOCA"),
]
filesInDirectory = shouldBeThere + shouldNotBeThere
toMerge = xsLibraries.getISOTXSLibrariesToMerge("-n23", filesInDirectory)
self.assert_contains_only(toMerge, shouldBeThere, shouldNotBeThere)
def assert_contains_only(self, container, shouldBeThere, shouldNotBeThere):
"""
Utility method for saying what things contain.
This could just check the contents and length, but the error produced from shouldNotBeThere is much nicer.
"""
container = set(container)
self.assertEqual(container, set(shouldBeThere))
self.assertEqual(set(), container & set(shouldNotBeThere))
class AbstractTestXSlibraryMerging(TempFileMixin):
"""
A shared class that defines tests that should be true for all IsotxsLibrary merging.
Notes
-----
This is a base class; it is not run directly.
"""
def _readFileAttempts(self, path):
"""Run the file read a few times, because sometimes GitHub CI is flaky with these tests."""
maxAttempts = 5
for a in range(maxAttempts):
try:
return self.getReadFunc()(path)
except OSError as e:
if a >= (maxAttempts - 1):
raise e
sleep(1)
def setUp(self):
TempFileMixin.setUp(self)
# Load a library in the ARMI tree. This should be a small library with LFPs, Actinides, structure, and coolant.
self.libAA = self._readFileAttempts(self.getLibAAPath())
self.libAB = self._readFileAttempts(self.getLibABPath())
self.libCombined = self._readFileAttempts(self.getLibAA_ABPath())
self.libLumped = self._readFileAttempts(self.getLibLumpedPath())
self.nuclideBases = NuclideBases()
def getErrorType(self):
raise NotImplementedError()
def getReadFunc(self):
raise NotImplementedError()
def getWriteFunc(self):
raise NotImplementedError()
def getLibAAPath(self):
raise NotImplementedError()
def getLibABPath(self):
raise NotImplementedError()
def getLibAA_ABPath(self):
raise NotImplementedError()
def getLibLumpedPath(self):
raise NotImplementedError()
def test_mergeXSLibSameNucNames(self):
"""Cannot merge XS libraries with the same nuclide names."""
with self.assertRaises(AttributeError):
self.libAA.merge(self.libCombined)
with self.assertRaises(AttributeError):
self.libAA.merge(self.libAA)
with self.assertRaises(AttributeError):
self.libAA.merge(self.libCombined)
with self.assertRaises(AttributeError):
self.libCombined.merge(self.libAA)
def test_mergeXSLibxDiffGroupStructure(self):
"""Cannot merge XS libraries with different group structure."""
dummyXsLib = xsLibraries.IsotxsLibrary()
dummyXsLib.neutronEnergyUpperBounds = np.array([1, 2, 3])
dummyXsLib.gammaEnergyUpperBounds = np.array([1, 2, 3])
with self.assertRaises(properties.ImmutablePropertyError):
dummyXsLib.merge(self.libCombined)
def test_mergeEmptyXSLibWithClones(self):
"""Merge empty XS libraries with clones of others."""
emptyXSLib = xsLibraries.IsotxsLibrary()
emptyXSLib.merge(self.libAA)
self.libAA = None
self.getWriteFunc()(emptyXSLib, self.testFileName)
sleep(1)
self.assertTrue(os.path.exists(self.testFileName))
self.assertGreater(os.path.getsize(self.testFileName), 0)
self.assertTrue(filecmp.cmp(self.getLibAAPath(), self.testFileName))
def test_mergeTwoXSLibFiles(self):
emptyXSLib = xsLibraries.IsotxsLibrary()
emptyXSLib.merge(self.libAA)
self.libAA = None
emptyXSLib.merge(self.libAB)
self.libAB = None
self.assertEqual(set(self.libCombined.nuclideLabels), set(emptyXSLib.nuclideLabels))
self.assertTrue(xsLibraries.compare(emptyXSLib, self.libCombined))
self.getWriteFunc()(emptyXSLib, self.testFileName)
sleep(1)
self.assertTrue(os.path.exists(self.testFileName))
self.assertGreater(os.path.getsize(self.testFileName), 0)
self.assertTrue(filecmp.cmp(self.getLibAA_ABPath(), self.testFileName))
class TestPmatrxMerge(AbstractTestXSlibraryMerging, unittest.TestCase):
def getErrorType(self):
raise OSError
def getReadFunc(self):
return pmatrx.readBinary
def getWriteFunc(self):
return pmatrx.writeBinary
def getLibAAPath(self):
return PMATRX_AA
def getLibABPath(self):
return PMATRX_AB
def getLibAA_ABPath(self):
return PMATRX_AA_AB
def getLibLumpedPath(self):
return PMATRX_LUMPED
def test_cannotMergeXSLibsWithDiffGammaGroups(self):
"""Test that we cannot merge XS Libs with different Gamma Group Structures."""
dummyXsLib = xsLibraries.IsotxsLibrary()
dummyXsLib.gammaEnergyUpperBounds = np.array([1, 2, 3])
with self.assertRaises(properties.ImmutablePropertyError):
dummyXsLib.merge(self.libCombined)
class TestIsotxsMerge(AbstractTestXSlibraryMerging, unittest.TestCase):
def getErrorType(self):
raise OSError
def getReadFunc(self):
return isotxs.readBinary
def getWriteFunc(self):
return isotxs.writeBinary
def getLibAAPath(self):
return ISOTXS_AA
def getLibABPath(self):
return ISOTXS_AB
def getLibAA_ABPath(self):
return ISOTXS_AA_AB
def getLibLumpedPath(self):
return ISOTXS_LUMPED
def test_canRemoveIsotopes(self):
emptyXSLib = xsLibraries.IsotxsLibrary()
emptyXSLib.merge(self.libAA)
self.libAA = None
emptyXSLib.merge(self.libAB)
self.libAB = None
for nucId in [
"ZR93_7",
"ZR95_7",
"XE1287",
"XE1297",
"XE1307",
"XE1317",
"XE1327",
"XE1337",
"XE1347",
"XE1357",
"XE1367",
]:
nucLabel = self.nuclideBases.byMcc3Id[nucId].label
del emptyXSLib[nucLabel + "AA"]
del emptyXSLib[nucLabel + "AB"]
self.assertEqual(set(self.libLumped.nuclideLabels), set(emptyXSLib.nuclideLabels))
self.getWriteFunc()(emptyXSLib, self.testFileName)
self.assertTrue(filecmp.cmp(self.getLibLumpedPath(), self.testFileName))
class TestGamisoMerge(AbstractTestXSlibraryMerging, unittest.TestCase):
def getErrorType(self):
raise OSError
def getReadFunc(self):
return gamiso.readBinary
def getWriteFunc(self):
return gamiso.writeBinary
def getLibAAPath(self):
return GAMISO_AA
def getLibABPath(self):
return GAMISO_AB
def getLibAA_ABPath(self):
return GAMISO_AA_AB
def getLibLumpedPath(self):
return GAMISO_LUMPED
def test_canRemoveIsotopes(self):
emptyXSLib = xsLibraries.IsotxsLibrary()
emptyXSLib.merge(self.libAA)
self.libAA = None
emptyXSLib.merge(self.libAB)
self.libAB = None
for nucId in [
"ZR93_7",
"ZR95_7",
"XE1287",
"XE1297",
"XE1307",
"XE1317",
"XE1327",
"XE1337",
"XE1347",
"XE1357",
"XE1367",
]:
nucLabel = self.nuclideBases.byMcc3Id[nucId].label
del emptyXSLib[nucLabel + "AA"]
del emptyXSLib[nucLabel + "AB"]
self.assertEqual(set(self.libLumped.nuclideLabels), set(emptyXSLib.nuclideLabels))
self.getWriteFunc()(emptyXSLib, self.testFileName)
self.assertTrue(filecmp.cmp(self.getLibLumpedPath(), self.testFileName))
class TestCombinedMerge(unittest.TestCase):
def setUp(self):
# Load a library in the ARMI tree. This should be a small library with LFPs, Actinides, structure, and coolant.
self.isotxsAA = isotxs.readBinary(ISOTXS_AA)
self.gamisoAA = gamiso.readBinary(GAMISO_AA)
self.pmatrxAA = pmatrx.readBinary(PMATRX_AA)
self.isotxsAB = isotxs.readBinary(ISOTXS_AB)
self.gamisoAB = gamiso.readBinary(GAMISO_AB)
self.pmatrxAB = pmatrx.readBinary(PMATRX_AB)
self.libCombined = isotxs.readBinary(ISOTXS_AA_AB)
def test_mergeAllXSLibFiles(self):
lib = xsLibraries.IsotxsLibrary()
xsLibraries.mergeXSLibrariesInWorkingDirectory(
lib, xsLibrarySuffix="", mergeGammaLibs=True, alternateDirectory=FIXTURE_DIR
)
self.assertEqual(set(lib.nuclideLabels), set(self.libCombined.nuclideLabels))
================================================
FILE: armi/nuclearDataIO/tests/test_xsNuclides.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for xs nuclides."""
import unittest
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO import isotxs, xsLibraries, xsNuclides
from armi.tests import ISOAA_PATH, mockRunLogs
class NuclideTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lib = isotxs.readBinary(ISOAA_PATH)
def test_badNameFailure(self):
"""Creating nuclide from label fails on bad name."""
nuc = xsNuclides.XSNuclide(None, "BACONAA")
nuc.isotxsMetadata["nuclideId"] = "BACN87"
with self.assertRaises(OSError):
nuc.updateBaseNuclide()
def test_creatingNucNoSideEffects(self):
"""Creating nuclide does not mes with underlying nuclide dictionary."""
nuc = nuclideBases.byName["U238"]
self.assertFalse(hasattr(nuc, "xsId"))
nrAA = xsNuclides.XSNuclide(None, "U238AA")
nrAA.isotxsMetadata["nuclideId"] = nuc.name
nrAA.updateBaseNuclide()
self.assertEqual("AA", nrAA.xsId)
self.assertFalse(hasattr(nuc, "xsId"))
def test_odifyingNucAttrUpdatesIsotxs(self):
"""Modifying nuclide attribute updates the ISOTXS nuclide data."""
lib = xsLibraries.IsotxsLibrary()
nuc = nuclideBases.byName["FE"]
nrAA = xsNuclides.XSNuclide(lib, "FEAA")
lib["FEAA"] = nrAA
nrAA.isotxsMetadata["nuclideId"] = nuc.name
nrAA.updateBaseNuclide()
self.assertEqual(len(nuc.trans), len(nrAA.trans))
nuc.trans.append("whatever")
self.assertEqual(len(nuc.trans), len(nrAA.trans))
self.assertEqual("whatever", nuc.trans[-1])
self.assertEqual("whatever", nrAA.trans[-1])
# We have modified the underlying nuclide; need to reset.
nuc.trans.pop()
def test_moLabelsNoWarnings(self):
"""New nuclide labels do not cause warnings."""
with mockRunLogs.BufferLog() as logCapture:
self.assertEqual("", logCapture.getStdout())
fe = nuclideBases.byName["FE"]
feNuc = xsNuclides.XSNuclide(None, "FEAA")
feNuc.isotxsMetadata["nuclideId"] = fe.name
feNuc.updateBaseNuclide()
self.assertEqual(fe, feNuc._base)
self.assertEqual("", logCapture.getStdout())
def test_nuclide_oldLabelsCauseWarnings(self):
with mockRunLogs.BufferLog() as logCapture:
self.assertEqual("", logCapture.getStdout())
pu = nuclideBases.byName["PU239"]
puNuc = xsNuclides.XSNuclide(None, "PLUTAA")
puNuc.isotxsMetadata["nuclideId"] = pu.name
puNuc.updateBaseNuclide()
self.assertEqual(pu, puNuc._base)
length = len(logCapture.getStdout())
self.assertGreater(length, 15)
# now get it with a legitimate same label, length should not change
puNuc = xsNuclides.XSNuclide(None, "PLUTAB")
puNuc.isotxsMetadata["nuclideId"] = pu.name
puNuc.updateBaseNuclide()
self.assertEqual(pu, puNuc._base)
self.assertEqual(length, len(logCapture.getStdout()))
def test_nuclideBaseMethodsNoFail(self):
"""Nuclide base method should not fail."""
for nuc in self.lib.nuclides:
self.assertIsInstance(nuc.getDatabaseName(), str)
self.assertIsInstance(nuc.getMcc3Id(), str)
def test_nuclideIsoaaDetails(self):
nuc = self.lib["U235AA"]
self.assertEqual(935.9793848991394, sum(nuc.micros.fission))
self.assertEqual(1.0000000956962505, sum(nuc.micros.chi))
nuc = self.lib["B10AA"]
self.assertEqual(0.7499475518734471, sum(nuc.micros.nGamma))
nuc = self.lib["B11AA"]
self.assertEqual(0.0008645406924188137, sum(nuc.micros.n2n))
self.assertEqual(0.008091875669521187, sum(nuc.micros.nGamma))
def test_2dDataCoords(self):
"""Manually compare some 2d XS data to ensure the correct coordinates."""
u235 = self.lib["U235AA"]
self.assertAlmostEqual(5.76494979858, u235.micros.total[0, 0])
self.assertAlmostEqual(6.5928812027, u235.micros.total[1, 0])
self.assertAlmostEqual(113.00479126, u235.micros.total[31, 0])
self.assertAlmostEqual(606.100097656, u235.micros.total[32, 0])
self.assertAlmostEqual(5.7647356987, u235.micros.total[0, 1])
self.assertAlmostEqual(6.58178663254, u235.micros.total[1, 1])
self.assertAlmostEqual(112.154449463, u235.micros.total[31, 1])
self.assertAlmostEqual(606.100097656, u235.micros.total[32, 1])
pu239 = self.lib["PU39AA"]
self.assertAlmostEqual(5.83128976821, pu239.micros.total[0, 0])
self.assertAlmostEqual(6.64091205597, pu239.micros.total[1, 0])
self.assertAlmostEqual(394.632354736, pu239.micros.total[31, 0])
self.assertAlmostEqual(973.399902343, pu239.micros.total[32, 0])
self.assertAlmostEqual(5.83086299896, pu239.micros.total[0, 1])
self.assertAlmostEqual(6.63103675842, pu239.micros.total[1, 1])
self.assertAlmostEqual(383.891998291, pu239.micros.total[31, 1])
self.assertAlmostEqual(973.399902343, pu239.micros.total[32, 1])
def test_scatterXSdataCoords(self):
"""Manually compare scatter XS data to ensure the correct coordinates."""
u235 = self.lib["U235AA"]
elasticScatter = u235.micros.elasticScatter
n2nScatter = u235.micros.n2nScatter
inelasticScatter = u235.micros.inelasticScatter
self.assertAlmostEqual(0.0304658822715, elasticScatter[(2, 1)])
self.assertAlmostEqual(0.0331721678376, inelasticScatter[(2, 0)])
self.assertAlmostEqual(0.0310171917081, inelasticScatter[(2, 1)])
self.assertAlmostEqual(0.0893433615565, inelasticScatter[(2, 2)])
self.assertAlmostEqual(8.41606015456e-05, inelasticScatter[(16, 2)])
self.assertAlmostEqual(3.23279074621e-08, inelasticScatter[(17, 2)])
self.assertAlmostEqual(1.96078691062e-08, inelasticScatter[(18, 2)])
self.assertAlmostEqual(1.18927703241e-08, inelasticScatter[(19, 2)])
self.assertAlmostEqual(7.21333170972e-09, inelasticScatter[(20, 2)])
self.assertAlmostEqual(3.66581343059e-09, inelasticScatter[(21, 2)])
self.assertAlmostEqual(3.81337583732e-09, inelasticScatter[(22, 2)])
self.assertAlmostEqual(1.35068589646e-09, inelasticScatter[(23, 2)])
self.assertAlmostEqual(3.96180976914e-10, inelasticScatter[(24, 2)])
self.assertAlmostEqual(4.85626551381e-05, n2nScatter[(1, 0)])
self.assertAlmostEqual(4.61509245042e-07, n2nScatter[(1, 1)])
self.assertAlmostEqual(9.67319720075e-05, n2nScatter[(2, 1)])
self.assertAlmostEqual(3.39554608217e-05, n2nScatter[(16, 1)])
self.assertAlmostEqual(1.12633460958e-05, n2nScatter[(17, 1)])
self.assertAlmostEqual(6.964501722e-07, n2nScatter[(18, 1)])
pu239 = self.lib["PU39AA"]
elasticScatter = pu239.micros.elasticScatter
inelasticScatter = pu239.micros.inelasticScatter
n2nScatter = pu239.micros.n2nScatter
self.assertAlmostEqual(1.7445316189e-05, n2nScatter[(1, 0)])
self.assertAlmostEqual(4.12698773289e-06, n2nScatter[(17, 1)])
self.assertAlmostEqual(6.80282767007e-07, n2nScatter[(1, 1)])
self.assertAlmostEqual(1.56137302838e-05, n2nScatter[(16, 1)])
self.assertAlmostEqual(9.7953477507e-07, n2nScatter[(18, 1)])
self.assertAlmostEqual(0.000104939324956, n2nScatter[(2, 1)])
self.assertAlmostEqual(0.0206335708499, elasticScatter[(2, 1)])
self.assertAlmostEqual(0.000585122266784, inelasticScatter[(2, 0)])
self.assertAlmostEqual(0.0352461636066, inelasticScatter[(2, 1)])
self.assertAlmostEqual(0.457990020514, inelasticScatter[(2, 2)])
self.assertAlmostEqual(1.16550609164e-07, n2nScatter[(19, 1)])
self.assertAlmostEqual(5.22556074429e-05, inelasticScatter[(16, 2)])
# the code below is very useful for generating the above test information
"""
for key, xs in pu239Scatter.items():
mk = max(key[1:])
if len(key) == 5 and 1 in key and 2 in key and (mk <= 2 or mk > 15):
print ('self.assertAlmostEqual({}, pu239.micros[{}])'
.format(xs, key))
"""
def test_getMicroXS(self):
"""Check whether getMicroXS method returns the correct cross sections for the input nuclide."""
u235Nuc = self.lib["U235AA"]
for i in range(self.lib.numGroups):
refFissionXS = u235Nuc.micros.fission[i]
curFissionXS = u235Nuc.getMicroXS("fission", i)
self.assertAlmostEqual(refFissionXS, curFissionXS)
# error raised if you attempt a bad group index
with self.assertRaises(IndexError):
u235Nuc.getMicroXS("fission", -999)
# zero returned if you try to grab a non-existent interaction
self.assertEqual(u235Nuc.getMicroXS("fake", 1), 0)
def test_getXS(self):
u235Nuc = self.lib["U235AA"]
refFission = u235Nuc.micros.fission
curFission = u235Nuc.getXS("fission")
self.assertAlmostEqual(len(refFission), len(curFission))
self.assertAlmostEqual(refFission[0], curFission[0])
self.assertAlmostEqual(refFission[1], curFission[1])
================================================
FILE: armi/nuclearDataIO/xsCollections.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cross section collections contain cross sections for a single nuclide or region.
Specifically, they are used as attributes of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`,
which then are combined as a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
These may represent microscopic or macroscopic neutron or photon cross sections. When they are
macroscopic, they generally represent a whole region with many nuclides, though this is not
required.
See Also
--------
armi.nuclearDataIO.xsCollection.XSCollection : object that gets created.
Examples
--------
# creating a MicroscopicXSCollection by loading one from ISOTXS.
microLib = armi.nuclearDataIO.ISOTXS('ISOTXS')
micros = myLib.nuclides['U235AA'].micros
# creating macroscopic XS:
mc = MacroscopicCrossSectionCreator()
macroCollection = mc.createMacrosFromMicros(microLib, block)
blocksWithMacros = mc.createMacrosOnBlocklist(microLib, blocks)
"""
import numpy as np
from scipy import sparse
from armi import runLog
from armi.utils import properties, units
# Basic cross-section types that are represented by a 1-D vector in the multigroup approximation
# No one is particularly proud of these names...we can claim
# they have some origin in the ISOTXS file format card 04 definition
# fmt: off
NGAMMA = "nGamma" # radiative capture
NALPHA = "nalph" # (n, alpha)
NP = "np" # (n, proton)
ND = "nd" # (n, deuteron)
NT = "nt" # (n, triton)
FISSION_XS = "fission" # (n, fission)
N2N_XS = "n2n" # (n,2n)
NUSIGF = "nuSigF"
NU = "neutronsPerFission"
# fmt: on
CAPTURE_XS = [NGAMMA, NALPHA, NP, ND, NT]
# Cross section types that are represented by 2-D matrices in the multigroup approximation
BASIC_SCAT_MATRIX = ["elasticScatter", "inelasticScatter", "n2nScatter"]
OTHER_SCAT_MATRIX = ["totalScatter", "elasticScatter1stOrder"]
HIGHORDER_SCATTER = "higherOrderScatter"
# Subset of vector xs used to evaluate absorption cross-section
ABSORPTION_XS = CAPTURE_XS + [FISSION_XS, N2N_XS]
# Subset of vector xs evaluated by _convertBasicXS
BASIC_XS = ABSORPTION_XS + [NUSIGF]
# Subset vector xs that are derived from basic cross sections
DERIVED_XS = ["absorption", "removal"]
# Total and transport are treated differently since they are 2D (can have multiple moments)
TOTAL_XS = ["total", "transport"]
# Subset of all basic cross sections that include removal and scattering
ALL_XS = BASIC_XS + BASIC_SCAT_MATRIX + OTHER_SCAT_MATRIX + DERIVED_XS + TOTAL_XS
# All xs collection data
ALL_COLLECTION_DATA = ALL_XS + [
"chi",
NU,
"strpd",
HIGHORDER_SCATTER,
"diffusionConstants",
]
E_CAPTURE = "ecapt"
E_FISSION = "efiss"
class XSCollection:
"""A cross section collection."""
_zeroes = {}
"""
A dict of numpy arrays set to the size of XSLibrary.numGroups.
This is used to initialize cross sections which may not exist for the specific nuclide.
Consequently, there should never be a situation where a cross section does not exist.
In addition, they are all pointers to the same array, so we're not generating too much
unnecessary data.
Notes
-----
This is a dict so that it can store multiple 0_g "matrices", i.e. vectors. Realistically,
during any given run there will only be a set of groups, e.g. 33.
"""
@classmethod
def getDefaultXs(cls, numGroups):
default = cls._zeroes.get(numGroups, None)
if default is None:
default = np.zeros(numGroups)
cls._zeroes[numGroups] = default
return default
def __init__(self, parent):
"""
Construct a NuclideCollection.
Parameters
----------
parent : object
The parent container, which may be a region, a nuclide, a block, etc.
"""
self.numGroups = None
self.transport = None
self.total = None
self.nGamma = None
self.fission = None
self.neutronsPerFission = None
self.chi = None
self.nalph = None
self.np = None
self.n2n = None
self.nd = None
self.nt = None
self.strpd = None
self.elasticScatter = None
self.inelasticScatter = None
self.n2nScatter = None
self.elasticScatter1stOrder = None
self.totalScatter = None
self.absorption = None
self.diffusionConstants = None
self.removal = None
self.nuSigF = None
self.higherOrderScatter = {}
self.source = "{}".format(parent)
def __getitem__(self, key):
"""
Access cross sections by key string (e.g. micros['fission'] = micros.fission.
Notes
-----
These containers were originally
dicts, but upgraded to objects with numpy values as specialization
was needed. This access method could/should be phased out.
"""
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def get(self, key, default):
try:
return self[key]
except (IndexError, KeyError, TypeError):
return default
def getAbsorptionXS(self):
"""Return total absorption XS, which is the sum of capture + fission + others."""
absXS = [
self.nGamma,
self.fission,
self.nalph,
self.np,
self.nd,
self.nt,
self.n2n,
]
return absXS
def getTotalScatterMatrix(self):
"""
Sum up scatter matrices to produce total scatter matrix.
Multiply reaction-based n2n scatter matrix by 2.0 to convert to production-based.
.. warning:: Not all lattice codes store (n,2n) matrices consistently. Some are
production-based and some are absorption-based. If you use an
absorption-based one, your scatter matrix will be off, generally
leading to about a percent error in your neutron balance.
Notes
-----
The total scattering matrix is produced by summing the elastic, inelastic, and n2n scattering matrices. If a
specific scattering matrix does not exist for a composition (nuclide or region) then it is skipped and a
warning is displayed stating that the scattering reaction is not available and is not included in the total
scattering matrix.
Example: When producing macroscopic cross sections in MC2-3 the code internally merges the elastic and
inelastic scattering matrices into a single elastic scattering matrix.
"""
scatters = []
totalScatterComponents = {
"elastic": self.elasticScatter,
"inelastic": self.inelasticScatter,
"n2n": self.n2nScatter * 2.0,
}
for sType, sMatrix in totalScatterComponents.items():
if sMatrix is not None:
scatters.append(sMatrix)
else:
runLog.warning(
"{} scattering matrix in {} is not defined. Generating total scattering matrix"
" without this data".format(sType.title(), self),
single=True,
)
return sum(scatters)
def clear(self):
"""Zero out all the cross sections; this is useful for creating dummy cross sections."""
for xsAttr in ALL_XS:
value = getattr(self, xsAttr)
# it should either be a list, a numpy array, or a sparse matrix
if isinstance(value, list):
value = [0.0] * len(value)
elif isinstance(value, np.ndarray):
value = np.zeros(value.shape)
elif value is None: # assume it is scipy.sparse
pass
elif value.nnz >= 0:
value = sparse.csr_matrix(value.shape)
setattr(self, xsAttr, value)
# need to do the same thing for the higherOrderScatter
for kk, currentMatrix in self.higherOrderScatter.items():
self.higherOrderScatter[kk] = sparse.csr_matrix(currentMatrix.shape)
@staticmethod
def collapseCrossSection(crossSection, weights):
r"""
Collapse a cross section into 1-group.
This is extremely useful for many analyses such as doing a shielding efficacy survey
or computing one-group reaction rates.
.. math::
\bar{\sigma} = \frac{\sum_g{\sigma_g \phi_g}}{\sum_g{\phi_g}}
Parameters
----------
crossSection : list
Multigroup cross section values
weights : list
energy group weights to apply (usually the multigroup flux)
Returns
-------
oneGroupXS : float
The one group cross section in the same units as the input cross section.
"""
mult = np.array(crossSection) * np.array(weights)
return sum(mult) / sum(weights)
def compare(self, other, flux, relativeTolerance=0, verbose=False, nucName=""):
"""Compare the cross sections between two XSCollections objects."""
nuclideIDMsg = f"Nuclide {nucName} " if nucName else ""
equal = True
for xsName in ALL_COLLECTION_DATA:
myXsData = self.__dict__[xsName]
theirXsData = other.__dict__[xsName]
if xsName == HIGHORDER_SCATTER:
for actualList, expectedList in zip(myXsData, theirXsData):
if actualList != expectedList:
equal = False
runLog.important(
" {}{} {:<30} cross section is different.".format(
nuclideIDMsg,
self.source,
xsName,
)
)
elif sparse.issparse(myXsData) and sparse.issparse(theirXsData):
if not np.allclose(
myXsData.todense(),
theirXsData.todense(),
rtol=relativeTolerance,
atol=0.0,
):
verboseData = "" if not verbose else "\n{},\n\n{}".format(myXsData, theirXsData)
runLog.important(
" {}{} {:<30} cross section is different.{}".format(
nuclideIDMsg, self.source, xsName, verboseData
)
)
equal = False
elif isinstance(myXsData, dict) and myXsData != theirXsData:
# there are no dicts currently so code is untested
raise NotImplementedError("there are no dicts")
elif not properties.areEqual(myXsData, theirXsData, relativeTolerance):
verboseData = "" if not verbose else "\n{},\n\n{}".format(myXsData, theirXsData)
runLog.important(
" {}{} {:<30} cross section is different.{}".format(nuclideIDMsg, self.source, xsName, verboseData)
)
equal = False
return equal
def merge(self, other):
"""
Merge the cross sections of two collections.
Notes
-----
1. This can only merge if one hasn't been assigned at all, because it doesn't try to figure out how to
account for overlapping cross sections.
2. Update the current library (self) with values from the other library if all attributes in the library except
ones in `attributesToIgnore` are None.
3. Libraries are already merged if all attributes in the other library are None (This is nothing to merge!).
"""
attributesToIgnore = ["source", HIGHORDER_SCATTER]
if all(v is None for k, v in self.__dict__.items() if k not in attributesToIgnore):
self.__dict__.update(other.__dict__) # See note 2
elif all(v is None for k, v in other.__dict__.items() if k not in attributesToIgnore):
pass # See note 3
else:
overlappingAttrs = set(k for k, v in self.__dict__.items() if v is not None and k != "source")
overlappingAttrs &= set(k for k, v in other.__dict__.items() if v is not None and k != "source")
raise AttributeError(
"Cannot merge {} and {}.\n Cross sections overlap in attributes: {}.".format(
self.source, other.source, ", ".join(overlappingAttrs)
)
)
class MacroscopicCrossSectionCreator:
"""
Create macroscopic cross sections from micros and number density.
Object encapsulating all high-level methods related to the creation of macroscopic cross
sections.
"""
def __init__(self, buildScatterMatrix=True, minimumNuclideDensity=0.0):
self.densities = None
self.macros = None
self.micros = None
self.minimumNuclideDensity = minimumNuclideDensity
self.buildScatterMatrix = buildScatterMatrix
self.block = None
def createMacrosOnBlocklist(self, microLibrary, blockList, nucNames=None, libType="micros"):
"""Create macroscopic cross sections for a list of blocks."""
for block in blockList:
block.macros = self.createMacrosFromMicros(microLibrary, block, nucNames, libType=libType)
return blockList
def createMacrosFromMicros(self, microLibrary, block, nucNames=None, libType="micros"):
"""
Creates a macroscopic cross section set based on a microscopic XS library using a block object.
Micro libraries have lots of nuclides, but macros only have 1.
Parameters
----------
microLibrary : xsCollection.XSCollection
Input micros
block : Block
Object whose number densities should be used to generate macros
nucNames : list, optional
List of nuclides to include in the macros. Defaults to all in block.
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
Returns
-------
macros : xsCollection.XSCollection
A new XSCollection full of macroscopic cross sections
"""
runLog.debug("Building macroscopic cross sections for {0}".format(block))
if nucNames is None:
nucNames = block.getNuclides()
self.microLibrary = microLibrary
self.block = block
self.xsSuffix = block.getMicroSuffix()
self.macros = XSCollection(parent=block)
self.densities = dict(
filter(
lambda x: x[1] > self.minimumNuclideDensity,
zip(nucNames, block.getNuclideNumberDensities(nucNames)),
)
)
self.ng = getattr(self.microLibrary, "numGroups" + _getLibTypeSuffix(libType))
self._initializeMacros()
self._convertBasicXS(libType=libType)
self._computeAbsorptionXS()
self._convertScatterMatrices(libType=libType)
self._computeDiffusionConstants()
self._buildTotalScatterMatrix()
self._computeRemovalXS()
self.macros.chi = computeBlockAverageChi(b=self.block, isotxsLib=self.microLibrary)
return self.macros
def _initializeMacros(self):
m = self.macros
for xsName in BASIC_XS + DERIVED_XS:
setattr(m, xsName, np.zeros(self.ng))
for matrixName in BASIC_SCAT_MATRIX:
# lil_matrices are good for indexing but bad for certain math operations.
# use csr for faster math
setattr(m, matrixName, sparse.csr_matrix((self.ng, self.ng)))
def _convertBasicXS(self, libType="micros"):
"""
Converts basic XS such as fission, nGamma, etc.
Parameters
----------
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
"""
reactions = BASIC_XS + TOTAL_XS
if NUSIGF in reactions:
reactions.remove(NUSIGF)
self.macros[NUSIGF] = computeMacroscopicGroupConstants(
FISSION_XS,
self.densities,
self.microLibrary,
self.xsSuffix,
libType=libType,
multConstant=NU,
)
for reaction in reactions:
self.macros[reaction] = computeMacroscopicGroupConstants(
reaction,
self.densities,
self.microLibrary,
self.xsSuffix,
libType=libType,
)
def _convertScatterMatrices(self, libType="micros"):
"""
Build macroscopic scatter matrices.
Parameters
----------
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
"""
if not self.buildScatterMatrix:
return
for nuclide in self.microLibrary.getNuclides(self.xsSuffix):
microCollection = getattr(nuclide, libType)
nDens = self.densities.get(nuclide.name, 0.0)
if microCollection.elasticScatter is not None:
self.macros.elasticScatter += microCollection.elasticScatter * nDens
if microCollection.inelasticScatter is not None:
self.macros.inelasticScatter += microCollection.inelasticScatter * nDens
if microCollection.n2nScatter is not None:
self.macros.n2nScatter += microCollection.n2nScatter * nDens
def _computeAbsorptionXS(self):
"""
Absorption = sum of all absorption reactions.
Must be called after :py:meth:`_convertBasicXS`.
"""
for absXS in self.macros.getAbsorptionXS():
self.macros.absorption += absXS
def _computeDiffusionConstants(self):
self.macros.diffusionConstants = 1.0 / (3.0 * self.macros.transport)
def _buildTotalScatterMatrix(self):
self.macros.totalScatter = self.macros.getTotalScatterMatrix()
def _computeRemovalXS(self):
"""
Compute removal cross section (things that remove a neutron from this phase space).
This includes all absorptions and outscattering.
Outscattering is represented by columns of the total scatter matrix.
Self-scattering (e.g. when g' == g) is not be included. This can be
handled by summing the columns and then subtracting the diagonal.
within-group n2n is accounted for by simply not including n2n in the removal xs.
"""
self.macros.removal = self.macros.absorption - self.macros.n2n
columnSum = self.macros.totalScatter.sum(axis=0).getA1() # convert to ndarray
diags = self.macros.totalScatter.diagonal()
self.macros.removal += columnSum - diags
# ruff: noqa: E501
def computeBlockAverageChi(b, isotxsLib):
r"""
Return the block average total chi vector based on isotope chi vectors.
This is defined by eq 3.4b in DIF3D manual [DIF3D]_, which corresponds to 1 in A.HMG4C card.
.. math::
\chi_g = \frac{\sum_{n} \chi_{g,n} N_n V \sum_{g'}(\nu_{g'}*\sigma_{f,g'})}{\sum_n N_n V \sum_{g'}(\nu_{g'}*\sigma_{f,g'} )}
To evaluate efficiently, assume that if :math:`\chi_{g,n}=0`, there will be no contributions
Volume is not used b/c it is already homogenized in the block.
Parameters
----------
b : object
Block object
isotxsLib : object
ISOTXS library object
Notes
-----
This methodology is based on option 1 in the HMG4C utility (named total
fission source weighting).
"""
numGroups = isotxsLib.numGroups
numerator = np.zeros(numGroups)
denominator = 0.0
numberDensities = b.getNumberDensities()
for nucObj in isotxsLib.getNuclides(b.getMicroSuffix()):
nucMicroXS = nucObj.micros
nucNDens = numberDensities.get(nucObj.name, 0.0)
nuFissionTotal = sum(nucMicroXS.neutronsPerFission * nucMicroXS.fission)
numerator += nucMicroXS.chi * nucNDens * nuFissionTotal
denominator += nucNDens * nuFissionTotal
if denominator != 0.0:
return numerator / denominator
else:
return np.zeros(numGroups)
def _getLibTypeSuffix(libType):
if libType == "micros":
libTypeSuffix = ""
elif libType == "gammaXS":
libTypeSuffix = "Gamma"
else:
libTypeSuffix = None
runLog.warning(
'ARMI currently supports only micro XS libraries of types "micros" (neutron) and "gammaXS" (gamma).'
)
return libTypeSuffix
def computeNeutronEnergyDepositionConstants(numberDensities, lib, microSuffix):
"""
Compute the macroscopic neutron energy deposition group constants.
These group constants can be multiplied by the flux to obtain energy deposition rates.
Parameters
----------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
Returns
-------
energyDepositionConsts : np.ndarray
Neutron energy deposition group constants. (J/cm)
Notes
-----
PMATRX documentation says units will be eV/s when multiplied by flux but it's eV/s/cm^3.
(eV/s/cm^3 = eV-bn * 1/cm^2/s * 1/bn-cm.)
Converted here to obtain J/cm (eV-bn * 1/bn-cm * J / eV)
"""
return computeMacroscopicGroupConstants("neutronHeating", numberDensities, lib, microSuffix) * units.JOULES_PER_eV
def computeGammaEnergyDepositionConstants(numberDensities, lib, microSuffix):
"""
Compute the macroscopic gamma energy deposition group constants.
These group constants can be multiplied by the flux to obtain energy deposition rates.
Parameters
----------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
Returns
-------
energyDepositionConsts : np.ndarray
gamma energy deposition group constants. (J/cm)
Notes
-----
PMATRX documentation says units will be eV/s when multiplied by flux but it's eV/s/cm^3.
(eV/s/cm^3 = eV-bn * 1/cm^2/s * 1/bn-cm.)
Convert here to obtain J/cm (eV-bn * 1/bn-cm * J / eV)
"""
return computeMacroscopicGroupConstants("gammaHeating", numberDensities, lib, microSuffix) * units.JOULES_PER_eV
def computeFissionEnergyGenerationConstants(numberDensities, lib, microSuffix):
r"""
Get the fission energy generation group constant of a block.
.. math::
E_{generation_fission} = \kappa_f \Sigma_f
Power comes from fission and capture reactions.
Parameters
----------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
Returns
-------
fissionEnergyFactor: np.ndarray
Fission energy generation group constants (in Joules/cm)
"""
fissionEnergyFactor = computeMacroscopicGroupConstants(
FISSION_XS,
numberDensities,
lib,
microSuffix,
libType="micros",
multConstant=E_FISSION,
)
return fissionEnergyFactor
def computeCaptureEnergyGenerationConstants(numberDensities, lib, microSuffix):
r"""
Get the energy generation group constant of a block.
.. math::
E_{generation capture} = \kappa_c \Sigma_c
Typically, one only cares about the flux* this XS (to find total power),
but the XS itself is required in some sensitivity studies.
Power comes from fission and capture reactions.
Parameters
----------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
Returns
-------
captureEnergyFactor: np.ndarray
Capture energy generation group constants (in Joules/cm)
"""
captureEnergyFactor = None
for xs in CAPTURE_XS:
if captureEnergyFactor is None:
captureEnergyFactor = np.zeros(
np.shape(computeMacroscopicGroupConstants(xs, numberDensities, lib, microSuffix, libType="micros"))
)
captureEnergyFactor += computeMacroscopicGroupConstants(
xs,
numberDensities,
lib,
microSuffix,
libType="micros",
multConstant=E_CAPTURE,
)
return captureEnergyFactor
def computeMacroscopicGroupConstants(
constantName,
numberDensities,
lib,
microSuffix,
libType=None,
multConstant=None,
multLib=None,
):
r"""
Compute any macroscopic group constants given number densities and a microscopic library.
.. impl:: Compute macroscopic cross sections from microscopic cross sections and number densities.
:id: I_ARMI_NUCDATA_MACRO
:implements: R_ARMI_NUCDATA_MACRO
This function computes the macroscopic cross sections of a specified
reaction type from inputted microscopic cross sections and number
densities. The ``constantName`` parameter specifies what type of
reaction is requested. The ``numberDensities`` parameter is a dictionary
mapping the nuclide to its number density. The ``lib`` parameter is a library
object like :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` or
:py:class:`~armi.nuclearDataIO.xsLibraries.CompxsLibrary` that holds the
microscopic cross-section data. The ``microSuffix`` parameter specifies
from which part of the library the microscopic cross sections are
gathered; this is typically gathered from a components
``getMicroSuffix`` method like :py:meth:`Block.getMicroSuffix
`. ``libType`` is an optional
parameter specifying whether the reaction is for neutrons or gammas.
This function also has the optional parameters ``multConstant`` and
``multLib``, which allows another constant from the library, such as
neutrons per fission (nu) or energy per fission (kappa), to be
multiplied to the primary one. The macroscopic cross sections are then
computed as:
.. math::
\Sigma_{g} = \sum_{n} N_n \sigma_{n,g}\nu_n \quad g=1,...,G
where :math:`n` is the isotope index, :math:`g` is the energy group
index, :math:`\sigma` is the microscopic cross section, and :math:`\nu`
is the scalar multiplier. If the library (``lib``) with suffix
``microSuffix`` is missing a cross section for the ``constantName``
reaction for one or more of the nuclides in ``numberDensities`` an error
is raised; but if ``multConstant`` is missing that cross section, then
those nuclides are printed as a warning.
Parameters
----------
constantName : str
Name of the reaction for which to obtain the group constants. This name should match a
cross section name or an attribute in the collection.
numberDensities : dict
nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.
lib : library object
Microscopic cross section library.
microSuffix : str
Microscopic library suffix (e.g. 'AB') for this composite.
See composite `getMicroSuffix` method.
libType : str, optional
The block attribute containing the desired microscopic XS for this block:
either "micros" for neutron XS or "gammaXS" for gamma XS.
multConstant : str, optional
Name of constant by which the group constants will be multiplied. This name should match a
cross section name or an attribute in the collection.
multLib : library object, optional
Microscopic cross section nuclide library to obtain the multiplier from.
If None, same library as base cross section is used.
Returns
-------
macroGroupConstant : np.ndarray
Macroscopic group constants for the requested reaction.
"""
skippedNuclides = []
skippedMultNuclides = []
macroGroupConstants = None
# sort the numberDensities because a summation is being performed that may result in slight
# differences based on the order.
for nuclideName, numberDensity in sorted(numberDensities.items()):
if not numberDensity:
continue
try:
libNuclide = lib.getNuclide(nuclideName, microSuffix)
multLibNuclide = libNuclide
except KeyError:
skippedNuclides.append(nuclideName) # Nuclide does not exist in the library
continue
if multLib:
try:
multLibNuclide = multLib.getNuclide(nuclideName, microSuffix)
except KeyError:
skippedMultNuclides.append(nuclideName) # Nuclide does not exist in the library
continue
microGroupConstants = _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType)
multiplierVal = _getXsMultiplier(multLibNuclide, multConstant, libType)
if macroGroupConstants is None:
macroGroupConstants = np.zeros(microGroupConstants.shape)
if microGroupConstants.shape != macroGroupConstants.shape and not microGroupConstants.any():
microGroupConstants = np.zeros(macroGroupConstants.shape)
macroGroupConstants += np.asarray(numberDensity) * microGroupConstants * multiplierVal
if skippedNuclides:
msg = "The following nuclides are not in microscopic library {}: {}".format(lib, skippedNuclides)
runLog.error(msg, single=True)
raise ValueError(msg)
if skippedMultNuclides:
runLog.debug(
"The following nuclides are not in multiplier library {}: {}".format(multLib, skippedMultNuclides),
single=True,
)
return macroGroupConstants
def _getXsMultiplier(libNuclide, multiplier, libType):
if multiplier:
try:
microCollection = getattr(libNuclide, libType)
multiplierVal = getattr(microCollection, multiplier)
except Exception:
multiplierVal = libNuclide.isotxsMetadata[multiplier]
else:
multiplierVal = 1.0
return np.asarray(multiplierVal)
def _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType):
if libType:
microCollection = getattr(libNuclide, libType)
else:
microCollection = libNuclide
microGroupConstants = np.asarray(getattr(microCollection, constantName))
if not microGroupConstants.any():
runLog.debug(
"Nuclide {} does not have {} microscopic group constants.".format(nuclideName, constantName),
single=True,
)
return microGroupConstants
================================================
FILE: armi/nuclearDataIO/xsLibraries.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cross section library objects.
Cross section libraries, currently, contain neutron and/or gamma
cross sections, but are not necessarily intended to be only neutron and gamma data.
"""
import glob
import os
import re
from armi import runLog
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO.nuclearFileMetadata import NuclideXSMetadata, RegionXSMetadata
from armi.utils import properties
_ISOTXS_EXT = "ISO"
def compare(lib1, lib2):
"""Compare two XSLibraries, and return True if equal, or False if not."""
from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx
equal = True
# check the nuclides
equal &= _checkLabels(lib1, lib2)
equal &= _checkLabels(lib2, lib1)
equal &= isotxs.compare(lib1, lib2)
equal &= gamiso.compare(lib1, lib2)
equal &= pmatrx.compare(lib1, lib2)
return equal
def _checkLabels(llib1, llib2):
mismatch = set(llib1.nuclideLabels) - set(llib2.nuclideLabels)
if any(mismatch):
runLog.important("{} has nuclides that are not in {}: {}".format(llib1, llib2, mismatch))
return False
return True
def compareXSLibraryAttribute(lib1, lib2, attributeName, tolerance=0.0):
"""Compare the values of an attribute in two libraries."""
val1 = getattr(lib1, "_" + attributeName, None)
val2 = getattr(lib2, "_" + attributeName, None)
if not properties.areEqual(val1, val2, tolerance):
runLog.important(
"{} and {} have different `{}` attributes:\n{}\n{}".format(lib1, lib2, attributeName, val1, val2)
)
return False
return True
def compareLibraryNeutronEnergies(lib1, lib2, tolerance=0.0):
"""Compare the neutron velocities and energy upper bounds for two libraries."""
equals = True
equals &= compareXSLibraryAttribute(lib1, lib2, "neutronEnergyUpperBounds", tolerance)
equals &= compareXSLibraryAttribute(lib1, lib2, "neutronVelocities", tolerance)
return equals
def getSuffixFromNuclideLabel(nucLabel):
"""
Return the xs suffix for the nuclide label.
Parameters
----------
nucLabel: str
A string representing the nuclide and xs suffix, eg, "U235AA"
Returns
-------
suffix: str
The suffix of this string
"""
return nucLabel[-2:]
def getISOTXSLibrariesToMerge(xsLibrarySuffix, xsLibFileNames):
"""
Find ISOTXS libraries out of a list that should be merged based on the provided ``xsLibrarySuffix``.
Parameters
----------
xsLibrarySuffix : str
XS library suffix is used to determine which ISOTXS files should be merged together. This can be an
empty string or be something like `-doppler`.
xsLibFileNames : list
A list of library file paths like ISOAA, ISOBA, ISOCA, etc. Can be a standalone file name or a full path.
Notes
-----
Files that exist: ISOAA-n1, ISOAB-n1, ISOAA-n2, ISOAB-n2, ISOAA, ISOAB, ISODA, ISOBA.
xsLibrarySuffix: 'n2'
Results: ISOAA-n2, ISOAB-n2
"""
isosToMerge = [
iso
for iso in xsLibFileNames
if "ISOTXS" not in iso # Skip merged ISOTXS file
and ".ascii" not in iso # Skip BCD/ascii files
and "BCD" not in iso
] # Skip BCD/ascii files
if xsLibrarySuffix != "":
isosWithSuffix = [iso for iso in isosToMerge if re.match(f".*ISO[A-Za-z]{{2}}F?{xsLibrarySuffix}$", iso)]
isosToMerge = [
iso
for iso in isosToMerge
if "-" not in os.path.basename(iso)
and not any(os.path.basename(iso) == os.path.basename(iws).split("-")[0] for iws in isosWithSuffix)
]
isosToMerge += isosWithSuffix
else:
isosToMerge = [iso for iso in isosToMerge if "-" not in os.path.basename(iso)]
return isosToMerge
def mergeXSLibrariesInWorkingDirectory(
lib,
xsLibrarySuffix="",
mergeGammaLibs=False,
alternateDirectory=None,
):
"""
Merge neutron (ISOTXS) and gamma (GAMISO/PMATRX) library data into the provided library.
Notes
-----
Convention is for fuel XS id to come first alphabetically (A, B, C, etc.) and then be
followed by non-fuel. This should allow `referenceDummyNuclide` to be defined before
it is needed by a non-fuel cross section, but if the convention is not followed then
this could cause an issue.
Parameters
----------
lib : obj
ISOTXS library object
xsLibrarySuffix : str, optional
XS library suffix used to determine which ISOTXS files are merged together,
typically something like `-doppler`. If empty string, will merge everything
without suffix (indicated by a `-`).
mergeGammaLibs : bool, optional
If True, the GAMISO and PMATRX files that correspond to the ISOTXS library will be merged. Note: if these
files do not exist this will fail.
alternateDirectory : str, optional
An alternate directory in which to search for files other than the working directory. The main purpose
of this is for testing, but it could also be useful to users.
"""
from armi import nuclearDataIO
from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx
baseDir = alternateDirectory or os.getcwd()
globPath = os.path.join(baseDir, _ISOTXS_EXT + "*")
xsLibFiles = getISOTXSLibrariesToMerge(xsLibrarySuffix, [iso for iso in glob.glob(globPath)])
librariesToMerge = []
neutronVelocities = {} # Dictionary of neutron velocities from each ISOTXS file
referenceDummyNuclides = None
for xsLibFilePath in sorted(xsLibFiles):
try:
# get XS ID from the cross section library name
xsID = re.search("ISO([A-Z0-9a-z]{2})", xsLibFilePath).group(1)
except AttributeError:
# if glob has matched something that is not actually an ISOXX file,
# the .group() call will fail
runLog.debug(f"Ignoring file {xsLibFilePath} in the merging of ISOXX files")
continue
xsFileTypes = "ISOTXS" if not mergeGammaLibs else "ISOTXS, GAMISO, and PMATRX"
runLog.info("Retrieving {} data for XS ID {}{}".format(xsFileTypes, xsID, xsLibrarySuffix))
if xsLibFilePath in lib.isotxsMetadata.fileNames:
runLog.extra("Skipping merge of {} because data already exists in the library".format(xsLibFilePath))
continue
neutronLibrary = isotxs.readBinary(xsLibFilePath)
neutronVelocities[xsID] = neutronLibrary.neutronVelocity
dummyNuclidesInNeutron = [
nuc for nuc in neutronLibrary.nuclides if isinstance(nuc._base, nuclideBases.DummyNuclideBase)
]
if not dummyNuclidesInNeutron:
runLog.info(f"Adding dummy nuclides to library {xsID}")
addedDummyData = isotxs.addDummyNuclidesToLibrary(
neutronLibrary, referenceDummyNuclides
) # Add DUMMY nuclide data not produced by MC2-3
isotxsLibraryPath = os.path.join(
baseDir,
nuclearDataIO.getExpectedISOTXSFileName(suffix=xsLibrarySuffix, xsID=xsID),
)
isotxsDummyPath = isotxsLibraryPath
isotxs.writeBinary(neutronLibrary, isotxsDummyPath)
neutronLibraryDummyData = isotxs.readBinary(isotxsDummyPath)
librariesToMerge.append(neutronLibraryDummyData)
dummyNuclidesInNeutron = referenceDummyNuclides
else:
librariesToMerge.append(neutronLibrary)
if not referenceDummyNuclides:
referenceDummyNuclides = dummyNuclidesInNeutron
if mergeGammaLibs:
gamisoLibraryPath = os.path.join(
baseDir,
nuclearDataIO.getExpectedGAMISOFileName(suffix=xsLibrarySuffix, xsID=xsID),
)
pmatrxLibraryPath = os.path.join(
baseDir,
nuclearDataIO.getExpectedPMATRXFileName(suffix=xsLibrarySuffix, xsID=xsID),
)
# Check if the gamiso and pmatrx data paths exist with the xs library suffix so that
# these are merged in. If they don't both exist then that is OK and we can just
# revert back to expecting the files just based on the XS ID.
if not (os.path.exists(gamisoLibraryPath) and os.path.exists(pmatrxLibraryPath)):
runLog.warning(
"One of GAMISO or PMATRX data exist for "
f"XS ID {xsID} with suffix {xsLibrarySuffix}. "
"Attempting to find GAMISO/PMATRX data with "
f"only XS ID {xsID} instead."
)
gamisoLibraryPath = os.path.join(baseDir, nuclearDataIO.getExpectedGAMISOFileName(xsID=xsID))
pmatrxLibraryPath = os.path.join(baseDir, nuclearDataIO.getExpectedPMATRXFileName(xsID=xsID))
# GAMISO data
gammaLibrary = gamiso.readBinary(gamisoLibraryPath)
addedDummyData = gamiso.addDummyNuclidesToLibrary(
gammaLibrary, dummyNuclidesInNeutron
) # Add DUMMY nuclide data not produced by MC2-3
if addedDummyData:
gamisoDummyPath = gamisoLibraryPath
gamiso.writeBinary(gammaLibrary, gamisoDummyPath)
gammaLibraryDummyData = gamiso.readBinary(gamisoDummyPath)
librariesToMerge.append(gammaLibraryDummyData)
else:
librariesToMerge.append(gammaLibrary)
# PMATRX data
pmatrxLibrary = pmatrx.readBinary(pmatrxLibraryPath)
addedDummyData = pmatrx.addDummyNuclidesToLibrary(
pmatrxLibrary, dummyNuclidesInNeutron
) # Add DUMMY nuclide data not produced by MC2-3
if addedDummyData:
pmatrxDummyPath = pmatrxLibraryPath
pmatrx.writeBinary(pmatrxLibrary, pmatrxDummyPath)
pmatrxLibraryDummyData = pmatrx.readBinary(pmatrxDummyPath)
librariesToMerge.append(pmatrxLibraryDummyData)
else:
librariesToMerge.append(pmatrxLibrary)
for library in librariesToMerge:
lib.merge(library)
return neutronVelocities
class _XSLibrary:
"""Parent class for Isotxs and Compxs library objects."""
neutronEnergyUpperBounds = properties.createImmutableProperty(
"neutronEnergyUpperBounds", "an ISOTXS", "Get or set the neutron energy groups."
)
neutronVelocity = properties.createImmutableProperty(
"neutronVelocity", "an ISOTXS", "Get or set the mean neutron velocity in cm/s."
)
def __init__(self):
# each element is a string such as U235AA
self._orderedNuclideLabels = []
def __contains__(self, key):
return key in self._orderedNuclideLabels
def __setitem__(self, key, value):
if key in self._orderedNuclideLabels:
raise AttributeError("{} already contains {}".format(self, key))
value.container = self
self._orderedNuclideLabels.append(key)
def __getitem__(self, key):
raise NotImplementedError
def __delitem__(self, key):
self._orderedNuclideLabels.remove(key)
def merge(self, other):
raise NotImplementedError
def __len__(self):
return len(self._orderedNuclideLabels)
def _mergeNeutronEnergies(self, other):
self.neutronEnergyUpperBounds = other.neutronEnergyUpperBounds
# neutron velocity changes, but just use the first one.
if not hasattr(self, "_neutronVelocity"):
self.neutronVelocity = other.neutronVelocity
def items(self):
for key in self._orderedNuclideLabels:
yield (key, self[key])
class IsotxsLibrary(_XSLibrary):
"""
IsotxsLibrary objects are a collection of cross sections (XS) for both neutron and gamma reactions.
IsotxsLibrary objects must be initialized with data through one of the read methods within this package
See Also
--------
:py:func:`armi.nuclearDataIO.cccc.isotxs.readBinary`
:py:func:`armi.nuclearDataIO.cccc.gamiso.readBinary`
:py:func:`armi.nuclearDataIO.cccc.pmatrx.readBinary`
:py:class:`CompxsLibrary`
Examples
--------
>>> lib = xsLibraries.IsotxsLibrary()
>>> # this doesn't have any information yet, we can read ISOTXS information
>>> libIsotxs = isotxs.readBinary("ISOAA")
>>> # any number of XSLibraries can be merged
>>> lib.merge(libIsotxs) # now the `lib` contains the ISOAA information.
"""
def __init__(self):
_XSLibrary.__init__(self)
self.pmatrxMetadata = NuclideXSMetadata()
self.isotxsMetadata = NuclideXSMetadata()
self.gamisoMetadata = NuclideXSMetadata()
# keys are nuclide labels such as U235AA
# vals are XSNuclide objects
self._nuclides = {}
self._scatterWeights = {}
gammaEnergyUpperBounds = properties.createImmutableProperty(
"gammaEnergyUpperBounds",
"a PMATRX or GAMISO",
"Get or set the gamma energy groups.",
)
neutronDoseConversionFactors = properties.createImmutableProperty(
"neutronDoseConversionFactors",
"a PMATRX",
"Get or set the neutron dose conversion factors.",
)
gammaDoseConversionFactors = properties.createImmutableProperty(
"gammaDoseConversionFactors",
"a PMATRX",
"Get or set the gamma does conversion factors.",
)
@property
def numGroups(self):
"""Get the number of neutron energy groups."""
# This unlocks the immutable property so that it can be
# read prior to not being set to check the number of groups
# that are defined. If the property is not unlocked before
# accessing when it has not yet been defined then an exception
# is thrown.
properties.unlockImmutableProperties(self)
if self.neutronEnergyUpperBounds is not None:
energyBounds = self.neutronEnergyUpperBounds
else:
energyBounds = []
# Make sure to re-lock the properties after we are done.
properties.lockImmutableProperties(self)
return len(energyBounds)
@property
def numGroupsGamma(self):
"""Get the number of gamma energy groups."""
# This unlocks the immutable property so that it can be
# read prior to not being set to check the number of groups
# that are defined. If the property is not unlocked before
# accessing when it has not yet been defined then an exception
# is thrown.
properties.unlockImmutableProperties(self)
if self.gammaEnergyUpperBounds is not None:
energyBounds = self.gammaEnergyUpperBounds
else:
energyBounds = []
# Make sure to re-lock the properties after we are done.
properties.lockImmutableProperties(self)
return len(energyBounds)
@property
def xsIDs(self):
"""
Get the XS ID's present in this library.
Assumes the suffixes are the last 2 letters in the nucNames
"""
return list(set(getSuffixFromNuclideLabel(name) for name in self.nuclideLabels))
def __repr__(self):
isotxs = bool(self.isotxsMetadata.keys())
pmatrx = bool(self.pmatrxMetadata.keys())
gamiso = bool(self.gamisoMetadata.keys())
groups = ""
if self.numGroups:
groups += f"Neutron groups: {self.numGroups}, "
if self.numGroupsGamma:
groups += f"Gamma groups: {self.numGroupsGamma},"
return (
f""
)
def __setitem__(self, key, value):
_XSLibrary.__setitem__(self, key, value)
self._nuclides[key] = value
def __getitem__(self, key):
return self._nuclides[key]
def get(self, nuclideLabel, default):
return self._nuclides.get(nuclideLabel, default)
def getNuclide(self, nucName, suffix):
"""
Get a nuclide object from the XS library.
Parameters
----------
nucName : str
ARMI nuclide name, e.g. 'U235', 'PU239'
suffix : str
Restrict to a specific nuclide lib suffix e.g. 'AA'
Returns
-------
nuclide : Nuclide object
A nuclide from the library or None
"""
libLabel = nuclideBases.byName[nucName].label + suffix
try:
return self[libLabel]
except KeyError:
runLog.error("Error in {}.\nSee stderr.".format(self))
raise
def __delitem__(self, key):
_XSLibrary.__delitem__(self, key)
del self._nuclides[key]
@property
def nuclideLabels(self):
"""Get the nuclide Names."""
# need to create a new list so the _orderedNuclideLabels does not get modified.
return list(self._orderedNuclideLabels)
@property
def nuclides(self):
return [self[name] for name in self._orderedNuclideLabels]
def getNuclides(self, suffix):
"""Returns a list of the nuclide objects in the library."""
nucs = []
# nucName is U235IA, etc.. nuc.name is U235, etc
for nucLabel, nuc in self.items():
# `in` used below for support of >26 xs groups
if not suffix or suffix in getSuffixFromNuclideLabel(nucLabel):
# accept things with the suffix if one is given
if nuc not in nucs:
nucs.append(nuc)
return nucs
def merge(self, other):
"""Merge two XSLibraries."""
runLog.debug("Merging XS library {} into XS library {}".format(other, self))
self._mergeProperties(other)
# merging meta data may raise an exception before knowing anything about the contained nuclides
# if it raises an exception, nothing has been modified in two objects
isotxsMeta, pmatrxMeta, gamisoMeta = self._mergeMetadata(other)
self._mergeNuclides(other)
# only vampire the __dict__ if successful
other.__dict__ = {}
# only reassign metadata if successful
self.isotxsMetadata = isotxsMeta
self.pmatrxMetadata = pmatrxMeta
self.gamisoMetadata = gamisoMeta
def _mergeProperties(self, other):
properties.unlockImmutableProperties(other)
try:
self.neutronDoseConversionFactors = other.neutronDoseConversionFactors
self._mergeNeutronEnergies(other)
self.gammaEnergyUpperBounds = other.gammaEnergyUpperBounds
self.gammaDoseConversionFactors = other.gammaDoseConversionFactors
finally:
properties.lockImmutableProperties(other)
def _mergeMetadata(self, other):
isotxsMeta = self.isotxsMetadata.merge(other.isotxsMetadata, self, other, "ISOTXS", OSError)
pmatrxMeta = self.pmatrxMetadata.merge(other.pmatrxMetadata, self, other, "PMATRX", OSError)
gamisoMeta = self.gamisoMetadata.merge(other.gamisoMetadata, self, other, "GAMISO", OSError)
return isotxsMeta, pmatrxMeta, gamisoMeta
def _mergeNuclides(self, other):
# these must be different
for nuclideKey, nuclide in other.items():
if nuclideKey in self:
self[nuclideKey].merge(nuclide)
else:
self[nuclideKey] = nuclide
def resetScatterWeights(self):
self._scatterWeights = {}
def getScatterWeights(self, scatterMatrixKey="elasticScatter"):
"""
Build or retrieve pre-built scatter weight data.
This acts like a cache for _buildScatterWeights
See Also
--------
_buildScatterWeights
"""
if not self._scatterWeights.get(scatterMatrixKey):
self._scatterWeights[scatterMatrixKey] = self._buildScatterWeights(scatterMatrixKey)
return self._scatterWeights[scatterMatrixKey]
def _buildScatterWeights(self, scatterMatrixKey):
r"""
Build a scatter-weight lookup table for the scatter matrix.
Scatter "weights" are needed for sensitivity studies when deriviatives wrt the
scatter XS are required. They are defined like:
.. math::
w_{g^{\prime} \leftarrow g} = \frac{\sigma_{s,g^{\prime} \leftarrow g}}
{\sum_{g^{\prime\prime}=1}^G \sigma_{s, g^{\prime\prime} \leftarrow g}}
Returns
-------
scatterWeights : dict
(xsID, fromGroup) : weight column (sparse Gx1)
"""
runLog.info("Building {0} weights on cross section library".format(scatterMatrixKey))
scatterWeights = {}
for nucName, nuc in self.items():
nucScatterWeights = nuc.buildNormalizedScatterColumns(scatterMatrixKey)
for fromG, scatterColumn in nucScatterWeights.items():
scatterWeights[nucName, fromG] = scatterColumn
return scatterWeights
def purgeFissionProducts(self, r):
"""
Purge the fission products based on the active nuclides within the reactor.
Parameters
----------
r : py:class:`armi.reactors.reactor.Reactor`
a reactor, or None
.. warning:: Sometimes worker nodes do not have a reactor, fission products will not be purged.
"""
runLog.info("Purging detailed fission products from {}".format(self))
modeledNucs = r.blueprints.allNuclidesInProblem
for key, nuc in list(self.items()):
if nuc.name not in modeledNucs:
del self[key]
class CompxsLibrary(_XSLibrary):
"""
Library object used in reading/writing COMPXS files.
Contains macroscopic cross sections for homogenized regions.
See Also
--------
:py:class:`IsotxsLibrary`
:py:func:`armi.nuclearDataIO.cccc.compxs.readBinary`
Examples
--------
>>> lib = compxs.readBinary("COMPXS")
>>> lib.regions
"""
def __init__(self):
_XSLibrary.__init__(self)
self._regions = {}
self.compxsMetadata = RegionXSMetadata()
def __setitem__(self, key, value):
_XSLibrary.__setitem__(self, key, value)
self._regions[key] = value
def __getitem__(self, key):
return self._regions[key]
def __delitem__(self, key):
_XSLibrary.__delitem__(self, key)
del self._regions[key]
@property
def regions(self):
return [self[name] for name in self._orderedNuclideLabels]
@property
def regionLabels(self):
return list(self._orderedNuclideLabels)
def merge(self, other):
"""Merge two ``COMPXS`` libraries."""
self._mergeProperties(other)
self.compxsMetadata = self.compxsMetadata.merge(other.compxsMetadata, self, other, "COMPXS", OSError)
self._appendRegions(other)
def _mergeProperties(self, other):
properties.unlockImmutableProperties(other)
try:
self._mergeNeutronEnergies(other)
finally:
properties.lockImmutableProperties(other)
def _appendRegions(self, other):
offset = len(self.regions)
for region in other.regions:
newNumber = region.regionNumber + offset
self[newNumber] = region
self.compxsMetadata["numComps"] = len(self.regions)
================================================
FILE: armi/nuclearDataIO/xsNuclides.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module contains cross section nuclides, which are a wrapper around the
:py:class:`~armi.nucDirectory.nuclideBases.INuclide` objects. The cross section nuclide objects contain cross section
information from a specific calculation (e.g. neutron, or gamma cross sections).
:py:class:`XSNuclide` objects also contain meta data from the original file, so that another file can be reconstructed.
Warning
-------
:py:class:`XSNuclide` objects should only be created by reading data into
:py:class:`~armi.nuclearDataIO.xsLibrary.XSLibrary` objects, and then retrieving them through their label index (i.e.
"PU39AA").
"""
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO import nuclearFileMetadata, xsCollections, xsLibraries
from armi.utils.customExceptions import warn_when_root
from armi.utils.plotting import plotScatterMatrix # noqa: F401
@warn_when_root
def NuclideLabelDoesNotMatchNuclideLabel(nuclide, label, xsID):
return "The label {} (xsID:{}) for nuclide {}, does not match the nucDirectory label.".format(label, xsID, nuclide)
class XSNuclide(nuclideBases.NuclideWrapper):
"""
A nuclide object for a specific library.
XSNuclide objects can contain GAMISO, ISOTXS, and PMATRX data all on a single instance.
"""
def __init__(self, xsCollection, xsCollectionKey):
nuclideBases.NuclideWrapper.__init__(self, xsCollection, xsCollectionKey)
self.xsId = xsLibraries.getSuffixFromNuclideLabel(xsCollectionKey)
self.source = 0.0
# 2D record... nucNames
# 4D record
self.isotxsMetadata = nuclearFileMetadata.NuclideMetadata()
self.gamisoMetadata = nuclearFileMetadata.NuclideMetadata()
self.pmatrxMetadata = nuclearFileMetadata.NuclideMetadata()
# 5D and 7D records
self.micros = xsCollections.XSCollection(parent=self)
self.gammaXS = xsCollections.XSCollection(parent=self)
self.neutronHeating = None
self.neutronDamage = None
self.gammaHeating = None
self.isotropicProduction = None
self.linearAnisotropicProduction = None
self.nOrderProductionMatrix = {}
def updateBaseNuclide(self):
"""
Update which nuclide base this :py:class:`XSNuclide` points to.
Notes
-----
During instantiation, not everything is available, only they user-supplied nuclide label,
i.e. :py:class:`~armi.nucDirectory.nuclideBases.NuclideWrapper.containerKey`.
During the read operation,
"""
if self._base is not None:
return
# most nuclides have the correct NuclideBase ID
nuclideId = self.isotxsMetadata["nuclideId"]
nuclideBase = nuclideBases.byName.get(nuclideId, None)
if nuclideBase is None or isinstance(nuclideBase, nuclideBases.DummyNuclideBase):
# FP, DUMMY, DUMP
nuclideBase = nuclideBases.byLabel.get(self.nucLabel, None)
if nuclideBase is None:
raise OSError("Could not determine NuclideBase for label {}".format(self.nucLabel))
if self.nucLabel != nuclideBase.label:
NuclideLabelDoesNotMatchNuclideLabel(nuclideBase, self.nucLabel, self.xsId)
nuclideBases.changeLabel(nuclideBase, self.nucLabel)
self._base = nuclideBase
def getMicroXS(self, interaction, group):
"""Returns the microscopic xs as the ISOTXS value if it exists or a 0 since it doesn't."""
if interaction in self.micros.__dict__:
try:
return self.micros[interaction][group]
except IndexError:
raise IndexError(
"Group {0} not found in interaction {1} of nuclide {2}".format(group, interaction, self.name)
)
else:
return 0
def getXS(self, interaction):
"""Get the cross section of a particular interaction.
See Also
--------
armi.nucDirectory.homogRegion.getXS
"""
return self.micros[interaction]
def buildNormalizedScatterColumns(self, scatterMatrixKey):
"""
Build normalized columns of a scatter matrix.
the vectors represent all scattering out of each group.
The rows of the scatter matrix represent in-scatter and the columns
represent out-scatter. So this sums up the columns.
Returns
-------
scatterWeights : dict
keys are fromG indices, values are sparse matrix columns (size: Gx1)
containing normalized columns of the scatter matrix.
"""
scatter = self.micros[scatterMatrixKey]
scatterWeights = {}
if scatter is None:
return scatterWeights
for fromG in range(self.container.numGroups):
outScatter = scatter[:, fromG] # fromG column of scatter matrix.
total = outScatter.sum()
if total != 0.0:
normalizedOutScatter = outScatter / total
else:
normalizedOutScatter = outScatter
scatterWeights[fromG] = normalizedOutScatter
return scatterWeights
@property
def trans(self):
"""Get the transmutations for this nuclide.
Notes
-----
This is a property wrapper around the base nuclide's :code:`trans` attribute
"""
return self._base.trans
@property
def decays(self):
"""Get the decays for this nuclide.
Notes
-----
This is a property wrapper around the base nuclide's :code:`decays` attribute
"""
return self._base.decays
def merge(self, other):
"""
Merge the attributes of two XSNuclides.
Parameters
----------
other : armi.nuclearDataIO.xsNuclides.XSNuclide
The other nuclide to merge information.
Notes
-----
The merge is really more like "cannibalize" in that the object performing the merge takes on the attributes of
the :code:`other`. It isn't necessary to create new objects for the newly merged attributes, because the 99%
usage is only used during runtime, where the second XSNuclide, and it's container (e.g. ISTOXS, GAMISO, etc.)
are discarded after the merge.
"""
self.isotxsMetadata = self.isotxsMetadata.merge(other.isotxsMetadata, self, other, "ISOTXS", AttributeError)
self.gamisoMetadata = self.gamisoMetadata.merge(other.gamisoMetadata, self, other, "GAMISO", AttributeError)
self.pmatrxMetadata = self.pmatrxMetadata.merge(other.pmatrxMetadata, self, other, "PMATRX", AttributeError)
self.micros.merge(other.micros)
self.gammaXS.merge(other.gammaXS)
self.neutronHeating = _mergeAttributes(self, other, "neutronHeating")
self.neutronDamage = _mergeAttributes(self, other, "neutronDamage")
self.gammaHeating = _mergeAttributes(self, other, "gammaHeating")
self.isotropicProduction = _mergeAttributes(self, other, "isotropicProduction")
self.linearAnisotropicProduction = _mergeAttributes(self, other, "linearAnisotropicProduction")
# this is lazy, but should work, because the n-order wouldn't be set without the others being set first.
self.nOrderProductionMatrix = self.nOrderProductionMatrix or other.nOrderProductionMatrix
def _mergeAttributes(this, other, attrName):
"""Function for merging XSNuclide attributes.
Notes
-----
This function checks to see that the attribute has only been assigned for a single instance, and then uses uses
the one that has been assigned.
Returns
-------
The proper value for the attribute.
"""
attr1 = getattr(this, attrName)
attr2 = getattr(other, attrName)
if attr1 is not None and attr2 is not None:
raise AttributeError(
"Cannot merge {} and {}, the attribute `{}` has been assigned on bothinstances.".format(
this, other, attrName
)
)
return attr1 if attr1 is not None else attr2
================================================
FILE: armi/operators/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Operators build and hold the ARMI reactor model and perform operations on it.
Different operators may perform different calculation loops upon the reactor model.
Operators can be thought of as schedulers for the interactions between the various
ARMI physics packages and the reactor object(s).
Operators are generally created by a :py:mod:`armi.cases` object and are chosen by
the ``runType`` setting. Custom operators may be introduced via the :py:mod:`armi.plugins` system.
The ARMI framework comes with two general-purpose Operators, which can be used for
very real analysis given a proper set of plugins. The :py:class:`~armi.operators.operator.Operator`
is the Standard operator, which loops over cycles and timenodes. The
:py:class:`~armi.operators.snapshots.OperatorSnapshots`
is the Snapshots operator, which loops over specific point in time from a previous Standard run
and performs additional analysis (e.g. for detailed follow-on analysis/transients).
See Also
--------
armi.cases : Builds operators
armi.reactor : The reactor model that the operator operates upon
armi.interfaces : Code that operators schedule to perform the real analysis or
math on the reactor model
"""
# ruff: noqa: I001
from armi import context, getPluginManagerOrFail
from armi.operators.runTypes import RunTypes
from armi.operators.operator import Operator
from armi.operators.operatorMPI import OperatorMPI
from armi.operators.snapshots import OperatorSnapshots
def factory(cs):
"""Choose an operator subclass and instantiate it object based on settings."""
return getOperatorClassFromSettings(cs)(cs)
def getOperatorClassFromSettings(cs):
"""Choose a operator class based on user settings (possibly from plugin).
Parameters
----------
cs : Settings
Returns
-------
Operator : Operator
Raises
------
ValueError
If the Operator class cannot be determined from the settings.
"""
runType = cs["runType"]
if runType == RunTypes.STANDARD:
if context.MPI_SIZE == 1:
return Operator
else:
return OperatorMPI
elif runType == RunTypes.SNAPSHOTS:
return OperatorSnapshots
plugInOperator = None
for potentialOperator in getPluginManagerOrFail().hook.getOperatorClassFromRunType(runType=runType):
if plugInOperator:
raise ValueError(
"More than one Operator class was "
f"recognized for runType `{runType}`: "
f"{plugInOperator} and {potentialOperator}. "
"This is not allowed. Please adjust plugin config."
)
plugInOperator = potentialOperator
if plugInOperator:
return plugInOperator
raise ValueError(
f"No valid operator was found for runType: `{runType}`. Please adjust settings or plugin configuration."
)
================================================
FILE: armi/operators/operator.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The standard ARMI operator.
This builds and maintains the interface stack and loops through it for a certain number of cycles with a certain number
of timenodes per cycle.
This is analogous to a real reactor operating over some period of time, often from initial startup, through the various
cycles, and out to the end of plant life.
"""
import collections
import os
import re
import time
from typing import Optional, Tuple
from armi import context, interfaces, runLog
from armi.bookkeeping import db, memoryProfiler
from armi.bookkeeping.report import reportingUtils
from armi.operators.runTypes import RunTypes
from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC
from armi.physics.neutronics.globalFlux.globalFluxInterface import (
GlobalFluxInterfaceUsingExecuters,
)
from armi.settings import settingsValidation
from armi.settings.fwSettings.globalSettings import (
CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION,
CONF_DEFERRED_INTERFACE_NAMES,
CONF_DEFERRED_INTERFACES_CYCLE,
CONF_TIGHT_COUPLING,
CONF_TIGHT_COUPLING_MAX_ITERS,
)
from armi.utils import (
codeTiming,
getAvailabilityFactors,
getBurnSteps,
getCycleLengths,
getCycleNames,
getMaxBurnSteps,
getPowerFractions,
getPreviousTimeNode,
getStepLengths,
pathTools,
units,
)
class Operator:
"""
Orchestrate an ARMI run, building all the pieces, looping through the interfaces, and manipulating the reactor.
This Operator loops over a user-input number of cycles, each with a user-input number of subcycles (called time
nodes). It calls a series of interaction hooks on each of the :py:class:`~armi.interfaces.Interface` in the
Interface Stack.
.. figure:: /.static/armi_general_flowchart.png
:align: center
**Figure 1.** The computational flow of the interface hooks in a Standard Operator
.. note:: The :doc:`/developer/guide` has some additional narrative on this topic.
.. impl:: An operator will have a reactor object to communicate between plugins.
:id: I_ARMI_OPERATOR_COMM
:implements: R_ARMI_OPERATOR_COMM
A major design feature of ARMI is that the Operator orchestrates the simulation, and as part of that, the
Operator has access to the Reactor data model. In code, this just means the reactor object is a mandatory
attribute of an instance of the Operator. But conceptually, this means that while the Operator drives the
simulation of the reactor, all code has access to the same copy of the reactor data model. This is a crucial
idea that allows disparate external nuclear models to interact; they interact with the ARMI reactor data model.
.. impl:: An operator is built from user settings.
:id: I_ARMI_OPERATOR_SETTINGS
:implements: R_ARMI_OPERATOR_SETTINGS
A major design feature of ARMI is that a run is built from user settings. In code, this means that a case
``Settings`` object is passed into this class to initialize an Operator. Conceptually, this means that the
Operator that controls a reactor simulation is defined by user settings. Because developers can create their own
settings, the user can control an ARMI simulation with arbitrary granularity in this way. In practice, settings
common control things like: how many cycles a reactor is being modeled for, how many timesteps are to be modeled
per time node, the verbosity of the logging of the run, and which modeling steps will be run.
.. impl:: The operator shall advance the reactor through time.
:id: I_ARMI_DB_TIME2
:implements: R_ARMI_DB_TIME
A major design feature of any scientific model is time evolution of the physical system. The operator is in
charge of driving the reactor through time. It sets various parameters that define the temporal position of the
reactor: cycle, node, timeNode, and time. This information is then stored in the output database.
Attributes
----------
cs : Settings
Global settings that define the run.
cycleNames : list of str
The name of each cycle. Cycles without a name are `None`.
stepLengths : list of list of float
A two-tiered list, where primary indices correspond to cycle and
secondary indices correspond to the length of each intra-cycle step (in days).
cycleLengths : list of float
The duration of each individual cycle in a run (in days). This is the entire cycle, from startup to startup and
includes outage time.
burnSteps : list of int
The number of sub-cycles in each cycle.
availabilityFactors : list of float
The fraction of time in a cycle that the plant is producing power. Note that capacity factor is always less than
or equal to this, depending on the power fraction achieved during each cycle. Note that this is not a two-tiered
list like stepLengths or powerFractions, because each cycle can have only one availabilityFactor.
powerFractions : list of list of float
A two-tiered list, where primary indices correspond to cycles and secondary indices correspond to the fraction
of full rated capacity that the plant achieves during that step of the cycle. Zero power fraction can indicate
decay-only cycles.
interfaces : list
The Interface objects that will operate upon the reactor
"""
inspector = settingsValidation.Inspector
def __init__(self, cs):
"""
Constructor for operator.
Parameters
----------
cs : Settings
Global settings that define the run.
Raises
------
OSError
If unable to create the FAST_PATH directory.
"""
self.r = None
self.cs = cs
runLog.LOG.startLog(self.cs.caseTitle)
self.timer = codeTiming.MasterTimer.getMasterTimer()
self.interfaces = []
self.restartData = []
self.loadedRestartData = []
self._cycleNames = None
self._stepLengths = None
self._cycleLengths = None
self._burnSteps = None
self._maxBurnSteps = None
self._powerFractions = None
self._availabilityFactors = None
self._convergenceSummary = None
# Create the welcome headers for the case (case, input, machine, and some basic reactor information)
reportingUtils.writeWelcomeHeaders(self, cs)
self._initFastPath()
@property
def burnSteps(self):
if not self._burnSteps:
self._burnSteps = getBurnSteps(self.cs)
if self._burnSteps == [] and self.cs["nCycles"] == 1:
# it is possible for there to be one cycle with zero burn up, in which case burnSteps is an empty list
pass
else:
self._checkReactorCycleAttrs({"burnSteps": self._burnSteps})
return self._burnSteps
@property
def maxBurnSteps(self):
if not self._maxBurnSteps:
self._maxBurnSteps = getMaxBurnSteps(self.cs)
return self._maxBurnSteps
@property
def stepLengths(self):
"""
Calculate step lengths.
.. impl:: Calculate step lengths from cycles and burn steps.
:id: I_ARMI_FW_HISTORY
:implements: R_ARMI_FW_HISTORY
In all computational modeling of physical systems, it is necessary to break time into discrete chunks. In
reactor modeling, it is common to first break the time a reactor is simulated for into the practical cycles
the reactor runs. And then those cycles are broken down into smaller chunks called burn steps. The final
step lengths this method returns is a two-tiered list, where primary indices correspond to the cycle and
secondary indices correspond to the length of each intra-cycle step (in days).
"""
if not self._stepLengths:
self._stepLengths = getStepLengths(self.cs)
if self._stepLengths == [] and self.cs["nCycles"] == 1:
# it is possible for there to be one cycle with zero burn up, in which case stepLengths is an empty list
pass
else:
self._checkReactorCycleAttrs({"Step lengths": self._stepLengths})
self._consistentPowerFractionsAndStepLengths()
return self._stepLengths
@property
def cycleLengths(self):
if not self._cycleLengths:
self._cycleLengths = getCycleLengths(self.cs)
self._checkReactorCycleAttrs({"cycleLengths": self._cycleLengths})
return self._cycleLengths
@property
def powerFractions(self):
if not self._powerFractions:
self._powerFractions = getPowerFractions(self.cs)
self._checkReactorCycleAttrs({"powerFractions": self._powerFractions})
self._consistentPowerFractionsAndStepLengths()
return self._powerFractions
@property
def availabilityFactors(self):
if not self._availabilityFactors:
self._availabilityFactors = getAvailabilityFactors(self.cs)
self._checkReactorCycleAttrs({"availabilityFactors": self._availabilityFactors})
return self._availabilityFactors
@property
def cycleNames(self):
if not self._cycleNames:
self._cycleNames = getCycleNames(self.cs)
self._checkReactorCycleAttrs({"Cycle names": self._cycleNames})
return self._cycleNames
@staticmethod
def _initFastPath():
"""
Create the FAST_PATH directory for fast local operations.
Notes
-----
The FAST_PATH was once created at import-time in order to support modules that use FAST_PATH without operators
(e.g. Database). However, we decided to leave FAST_PATH as the CWD in INTERACTIVE mode, so this should not be a
problem anymore, and we can safely move FAST_PATH creation back into the Operator.
If the operator is being used interactively (e.g. at a prompt) we will still use a temporary local fast path (in
case the user is working on a slow network path).
"""
context.activateLocalFastPath()
try:
os.makedirs(context.getFastPath())
except OSError:
# If FAST_PATH exists already that generally should be an error because different processes will be stepping
# on each other. The exception to this rule is in cases that instantiate multiple operators in one process
# (e.g. unit tests that loadTestReactor). Since the FAST_PATH is set at import, these will use the same path
# multiple times. We pass here for that reason.
if not os.path.exists(context.getFastPath()):
# if it actually doesn't exist, that's an actual error. Raise
raise
def _checkReactorCycleAttrs(self, attrsDict):
"""Check that the list has nCycles number of elements."""
for name, param in attrsDict.items():
if len(param) != self.cs["nCycles"]:
raise ValueError(
"The `{}` setting did not have a length consistent with the number of cycles.\n"
"Expected {} value(s), but only had {} defined.\n"
"Current input: {}".format(name, self.cs["nCycles"], len(param), param)
)
def _consistentPowerFractionsAndStepLengths(self):
"""Check that the internally-resolved _powerFractions and _stepLengths have consistent shapes, if they exist."""
if self._powerFractions and self._stepLengths:
for cycleIdx in range(len(self._powerFractions)):
if len(self._powerFractions[cycleIdx]) != len(self._stepLengths[cycleIdx]):
raise ValueError(
"The number of entries in lists for subcycle power fractions and sub-steps are inconsistent in "
f"cycle {cycleIdx}"
)
@property
def atEOL(self):
"""
Return whether we are approaching EOL.
For the standard operator, this will return true when the current cycle is the last cycle
(cs["nCycles"] - 1). Other operators may need to impose different logic.
"""
return self.r.p.cycle == self.cs["nCycles"] - 1
def initializeInterfaces(self, r):
"""
Attach the reactor to the operator and initialize all interfaces.
This does not occur in `__init__` so that the ARMI operator can be initialized before a reactor is created,
which is useful for summarizing the case information quickly.
Parameters
----------
r : Reactor
The Reactor object to attach to this Operator.
"""
self.r = r
r.o = self
with self.timer.getTimer("Interface Creation"):
self.createInterfaces()
self._processInterfaceDependencies()
if context.MPI_RANK == 0:
runLog.header("=========== Interface Stack Summary ===========")
runLog.info(reportingUtils.getInterfaceStackSummary(self))
self.interactAllInit()
else:
self._attachInterfaces()
self._loadRestartData()
def __repr__(self):
return "<{} {} {}>".format(self.__class__.__name__, self.cs["runType"], self.cs)
def __enter__(self):
"""Context manager to enable interface-level error handling hooks."""
return self
def __exit__(self, exception_type, exception_value, stacktrace):
if any([exception_type, exception_value, stacktrace]):
runLog.error(r"{}\n{}\{}".format(exception_type, exception_value, stacktrace))
self.interactAllError()
def operate(self):
"""
Run the operation loop.
See Also
--------
mainOperator : run the operator loop on the primary MPI node (for parallel runs)
workerOperate : run the operator loop for the worker MPI nodes
"""
self._mainOperate()
def _mainOperate(self):
"""Main loop for a standard ARMI run. Steps through time interacting with the interfaces."""
dbi = self.getInterface("database")
if dbi is not None and dbi.enabled():
dbi.initDB()
if self.cs["loadStyle"] != "fromInput" and self.cs["runType"] != RunTypes.SNAPSHOTS:
self.interactAllRestart(dbi)
self.interactAllBOL()
startingCycle = self.r.p.cycle # may be starting at t != 0 in restarts
for cycle in range(startingCycle, self.cs["nCycles"]):
keepGoing = self._cycleLoop(cycle, startingCycle)
if not keepGoing:
break
self.interactAllEOL()
def interactAllRestart(self, dbi: Optional[db.DatabaseInterface]):
"""Prepare for a restart simulation.
Some steps are necessary to be taken after interfaces are constructed but before we
start the real simulation. Crucially, we need to load the previous time point from the
database. The previous time node is chosen because that is the last point where we are
certain we have valid data and can safely recover.
If restarting at BOC, trigger the EOC actions from the previous cycle. This is necessary to
perform any fuel management operations that would have happened at the end of the previous cycle.
"""
startCycle = self.cs["startCycle"]
startNode = self.cs["startNode"]
prevTimeNode = getPreviousTimeNode(startCycle, startNode, self.cs)
if dbi is not None:
dbi.prepRestartRun()
else:
raise ValueError("No database interface means nothing is responsible for restarting from DB")
activeInterfaces = self.getActiveInterfaces("Restart", excludedInterfaceNames=("database",))
self._interactAll("Restart", activeInterfaces, (startCycle, startNode), prevTimeNode)
if startNode == 0:
runLog.important("Calling `o.interactAllEOC` due to loading the last time node of the previous cycle.")
self.interactAllEOC(prevTimeNode[0])
# advance time time since we loaded the previous time step
self.r.p.cycle = startCycle
self.r.p.timeNode = startNode
def _cycleLoop(self, cycle, startingCycle):
"""Run the portion of the main loop that happens each cycle."""
self.r.p.cycleLength = self.cycleLengths[cycle]
self.r.p.availabilityFactor = self.availabilityFactors[cycle]
self.r.p.cycle = cycle
self.r.core.p.coupledIteration = 0
if cycle == startingCycle:
startingNode = self.r.p.timeNode
else:
startingNode = 0
self.r.p.timeNode = startingNode
halt = self.interactAllBOC(self.r.p.cycle)
if halt:
return False
# read total core power from settings (power or powerDensity)
basicPower = self.cs["power"] or (self.cs["powerDensity"] * self.r.core.getHMMass())
for timeNode in range(startingNode, int(self.burnSteps[cycle])):
self.r.core.p.power = self.powerFractions[cycle][timeNode] * basicPower
self.r.p.capacityFactor = self.r.p.availabilityFactor * self.powerFractions[cycle][timeNode]
self.r.p.stepLength = self.stepLengths[cycle][timeNode]
self._timeNodeLoop(cycle, timeNode)
else: # do one last node at the end using the same power as the previous node
timeNode = self.burnSteps[cycle]
if self.burnSteps[cycle] == 0:
# this is a zero-burnup case
powFrac = 1
else:
powFrac = self.powerFractions[cycle][timeNode - 1]
self.r.core.p.power = powFrac * basicPower
self._timeNodeLoop(cycle, timeNode)
self.interactAllEOC(self.r.p.cycle)
return True
def _timeNodeLoop(self, cycle, timeNode):
"""Run the portion of the main loop that happens each subcycle."""
self.r.p.timeNode = timeNode
if timeNode == 0:
dt = 0
else:
dt = self.r.o.stepLengths[cycle][timeNode - 1] / units.DAYS_PER_YEAR
self.r.p.time = self.r.p.time + dt
self.interactAllEveryNode(cycle, timeNode)
self._performTightCoupling(cycle, timeNode)
def _performTightCoupling(self, cycle: int, timeNode: int, writeDB: bool = True):
"""If requested, perform tight coupling and write out database.
Notes
-----
writeDB is False for OperatorSnapshots as the DB gets written at EOL.
"""
if not self.couplingIsActive():
# no coupling was requested
return
skipCycles = tuple(int(val) for val in self.cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION])
if cycle in skipCycles:
runLog.warning(
f"interactAllCoupled disabled this cycle ({self.r.p.cycle}) due to "
"`cyclesSkipTightCouplingInteraction` setting."
)
else:
self._convergenceSummary = collections.defaultdict(list)
for coupledIteration in range(self.cs[CONF_TIGHT_COUPLING_MAX_ITERS]):
self.r.core.p.coupledIteration = coupledIteration + 1
converged = self.interactAllCoupled(coupledIteration)
if converged:
runLog.important(f"Tight coupling iterations for c{cycle:02d}n{timeNode:02d} have converged!")
break
if not converged:
runLog.warning(
f"Tight coupling iterations for c{cycle:02d}n{timeNode:02d} have not converged!"
f" The maximum number of iterations, {self.cs[CONF_TIGHT_COUPLING_MAX_ITERS]}, was reached."
)
if writeDB:
# database has not yet been written, so we need to write it.
dbi = self.getInterface("database")
dbi.writeDBEveryNode()
def _interactAll(self, interactionName, activeInterfaces, *args):
"""
Loop over the supplied activeInterfaces and perform the supplied interaction on each.
Notes
-----
This is the base method for the other ``interactAll`` methods.
"""
interactMethodName = "interact{}".format(interactionName)
printMemUsage = self.cs["verbosity"] == "debug" and self.cs["debugMem"]
halt = False
cycleNodeTag = self._expandCycleAndTimeNodeArgs(interactionName)
runLog.header("=========== Triggering {} Event ===========".format(interactionName + cycleNodeTag))
for statePointIndex, interface in enumerate(activeInterfaces, start=1):
self.printInterfaceSummary(interface, interactionName, statePointIndex)
# maybe make this a context manager
if printMemUsage:
memBefore = memoryProfiler.PrintSystemMemoryUsageAction()
memBefore.broadcast()
memBefore.invoke(self, self.r, self.cs)
interactionMessage = f"{interface.name}.{interactionName}"
with self.timer.getTimer(interactionMessage):
interactMethod = getattr(interface, interactMethodName)
halt = halt or interactMethod(*args)
if printMemUsage:
memAfter = memoryProfiler.PrintSystemMemoryUsageAction()
memAfter.broadcast()
memAfter.invoke(self, self.r, self.cs)
memAfter -= memBefore
memAfter.printUsage("after {:25s} {:15s} interaction".format(interface.name, interactionName))
# Allow inherited classes to clean up things after an interaction
self._finalizeInteract()
runLog.header("=========== Completed {} Event ===========\n".format(interactionName + cycleNodeTag))
return halt
def _finalizeInteract(self):
"""Member called after each interface has completed its interaction.
Useful for cleaning up data.
"""
pass
def printInterfaceSummary(self, interface, interactionName, statePointIndex):
"""
Log which interaction point is about to be executed.
This looks better as multiple lines but it's a lot easier to grep as one line. We leverage newlines instead of
long banners to save disk space.
"""
nodeInfo = self._expandCycleAndTimeNodeArgs(interactionName)
line = "=========== {:02d} - {:30s} {:15s} ===========".format(
statePointIndex, interface.name, interactionName + nodeInfo
)
runLog.header(line)
def _expandCycleAndTimeNodeArgs(self, interactionName):
"""Return text annotating information for current run event.
Notes
-----
- Init, BOL, EOL: empty
- Everynode: cycle, time node
- BOC, EOC: cycle number
- Coupled: cycle, time node, iteration number
"""
if interactionName == "Coupled":
cycleNodeInfo = (
f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, "
f"year {'{0:.2f}'.format(self.r.p.time)} - iteration "
f"{self.r.core.p.coupledIteration}"
)
elif interactionName in ("BOC", "EOC"):
cycleNodeInfo = f" - timestep: cycle {self.r.p.cycle}"
# - timestep: cycle 2
elif interactionName in ("Init", "BOL", "EOL"):
cycleNodeInfo = ""
else:
cycleNodeInfo = (
f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year {'{0:.2f}'.format(self.r.p.time)}"
)
return cycleNodeInfo
def interactAllInit(self):
"""Call interactInit on all interfaces in the stack after they are initialized."""
self._interactAll("Init", self.getInterfaces())
def interactAllBOL(self, excludedInterfaceNames=()):
"""
Call interactBOL for all interfaces in the interface stack at beginning-of-life.
All enabled or bolForce interfaces will be called excluding interfaces with excludedInterfaceNames.
"""
activeInterfaces = self.getActiveInterfaces("BOL", excludedInterfaceNames)
self._interactAll("BOL", activeInterfaces)
def interactAllBOC(self, cycle):
"""Interact at beginning of cycle of all enabled interfaces."""
activeInterfaces = self.getActiveInterfaces("BOC", cycle=cycle)
return self._interactAll("BOC", activeInterfaces, cycle)
def interactAllEveryNode(self, cycle, tn, excludedInterfaceNames=()):
"""
Call the interactEveryNode hook for all enabled interfaces.
All enabled interfaces will be called excluding interfaces with excludedInterfaceNames.
Parameters
----------
cycle : int
The cycle that is currently being run. Starts at 0
tn : int
The time node that is currently being run (0 for BOC, etc.)
excludedInterfaceNames : list, optional
Names of interface names that will not be interacted with.
"""
activeInterfaces = self.getActiveInterfaces("EveryNode", excludedInterfaceNames)
self._interactAll("EveryNode", activeInterfaces, cycle, tn)
def interactAllEOC(self, cycle, excludedInterfaceNames=()):
"""Interact end of cycle for all enabled interfaces."""
self.r.p.time += self.r.p.cycleLength * (1 - self.r.p.availabilityFactor) / units.DAYS_PER_YEAR
activeInterfaces = self.getActiveInterfaces("EOC", excludedInterfaceNames)
self._interactAll("EOC", activeInterfaces, cycle)
def interactAllEOL(self, excludedInterfaceNames=()):
"""
Run interactEOL for all enabled interfaces.
Notes
-----
If the interfaces are flagged to be reversed at EOL, they are separated from the main stack and appended at the
end in reverse order. This allows, for example, an interface that must run first to also run last.
"""
activeInterfaces = self.getActiveInterfaces("EOL", excludedInterfaceNames)
self._interactAll("EOL", activeInterfaces)
def interactAllCoupled(self, coupledIteration):
"""
Run all interfaces that are involved in tight physics coupling.
.. impl:: Physics coupling is driven from Operator.
:id: I_ARMI_OPERATOR_PHYSICS1
:implements: R_ARMI_OPERATOR_PHYSICS
This method runs all the interfaces that are defined as part of the tight physics coupling of the reactor.
Then it returns if the coupling has converged or not.
Tight coupling implies the operator has split iterations between two or more physics solvers at the same
solution point in simulated time. For example, a flux solution might be computed, then a temperature
solution, and then another flux solution based on updated temperatures (which updates densities, dimensions,
and Doppler).
This is distinct from loose coupling, which simply uses the temperature values from the previous timestep in
the current flux solution. It's also distinct from full coupling where all fields are solved simultaneously.
ARMI supports tight and loose coupling.
"""
activeInterfaces = self.getActiveInterfaces("Coupled")
# Store the previous iteration values before calling interactAllCoupled for each interface.
for interface in activeInterfaces:
if interface.coupler is not None:
interface.coupler.storePreviousIterationValue(interface.getTightCouplingValue())
self._interactAll("Coupled", activeInterfaces, coupledIteration)
return self._checkTightCouplingConvergence(activeInterfaces)
def _checkTightCouplingConvergence(self, activeInterfaces: list):
"""Check if interfaces are converged.
Parameters
----------
activeInterfaces : list
the list of active interfaces on the operator
Notes
-----
This is split off from self.interactAllCoupled to accommodate testing.
"""
# Summarize the coupled results and the convergence status.
converged = []
for interface in activeInterfaces:
coupler = interface.coupler
if coupler is not None:
key = f"{interface.name}: {coupler.parameter}"
converged.append(coupler.isConverged(interface.getTightCouplingValue()))
self._convergenceSummary[key].append(coupler.eps)
reportingUtils.writeTightCouplingConvergenceSummary(self._convergenceSummary)
return all(converged)
def interactAllError(self):
"""Interact when an error is raised by any other interface. Provides a wrap-up option on the way to a crash."""
for i in self.interfaces:
runLog.extra("Error-interacting with {0}".format(i.name))
i.interactError()
def createInterfaces(self):
"""
Dynamically discover all available interfaces and call their factories, potentially adding them to the stack.
An operator contains an ordered list of interfaces. These communicate between the core ARMI structure and
auxiliary computational modules and/or external codes. At specified interaction points in a run, the list of
interfaces is executed.
Each interface optionally defines interaction "hooks" for each of the interaction points. The normal interaction
points are BOL, BOC, every node, EOC, and EOL. If an interface defines an interactBOL method, that will run at
BOL, and so on.
The majority of ARMI capabilities lie within interfaces, and this architecture provides much of the flexibility
of ARMI.
See Also
--------
addInterface : Adds a particular interface to the interface stack.
armi.interfaces.STACK_ORDER : A system to determine the required order of interfaces.
armi.interfaces.getActiveInterfaceInfo : Collects the interface classes from relevant packages.
"""
runLog.header("=========== Creating Interfaces ===========")
interfaceList = interfaces.getActiveInterfaceInfo(self.cs)
for klass, kwargs in interfaceList:
self.addInterface(klass(self.r, self.cs), **kwargs)
def addInterface(
self,
interface,
index=None,
reverseAtEOL=False,
enabled=True,
bolForce=False,
):
"""
Attach an interface to this operator.
Notes
-----
Order matters.
Parameters
----------
interface : Interface
the interface to add
index : int, optional. Will insert the interface at this index rather than appending it to the end of the list
reverseAtEOL : bool, optional.
The interactEOL hooks will run in reverse order if True. All interfaces with this flag will be run as a
group after all other interfaces. This allows something to run first at BOL and last at EOL, etc.
enabled : bool, optional
If enabled, will run at all hooks. If not, won't run any (with possible exception at BOL, see bolForce).
Whenever possible, Interfaces that are needed during runtime for some peripheral operation but not during
the main loop should be instantiated by the part of the code that actually needs the interface.
bolForce: bool, optional
If true, will run at BOL hook even if disabled. This is often a sign that the interface in question should
be ephemerally instantiated on demand rather than added to the interface stack at all.
Raises
------
RuntimeError
If an interface of the same name or purpose is already attached to the Operator.
"""
if self.getInterface(interface.name):
raise RuntimeError(f"An interface with name {interface.name} is already attached.")
iFunc = self.getInterface(purpose=interface.purpose)
if iFunc:
if issubclass(type(iFunc), type(interface)):
runLog.info(
"Ignoring Interface {newFunc} because existing interface {old} already more specific".format(
newFunc=interface, old=iFunc
)
)
return
elif issubclass(type(interface), type(iFunc)):
self.removeInterface(iFunc)
runLog.info(
"Will Insert Interface {newFunc} because it is a subclass of {old} interface and "
" more derived".format(newFunc=interface, old=iFunc)
)
else:
raise RuntimeError(
"Cannot add {0}; the {1} already is designated "
"as the {2} interface. Multiple interfaces of the same "
"purpose is not supported.".format(interface, iFunc, interface.purpose)
)
runLog.debug("Adding {0}".format(interface))
if index is None:
self.interfaces.append(interface)
else:
self.interfaces.insert(index, interface)
if reverseAtEOL:
interface.reverseAtEOL = True
if not enabled:
interface.enabled(False)
interface.bolForce(bolForce)
interface.attachReactor(self, self.r)
def _processInterfaceDependencies(self):
"""
Check all interfaces' dependencies and adds missing ones.
Notes
-----
Order does not matter here because the interfaces added here are disabled and playing supporting role so it is
not intended to run on the interface stack. They will be called by other interfaces.
As mentioned in :py:meth:`addInterface`, it may be better to just instantiate utility code when its needed
rather than rely on this system.
"""
# Make multiple passes in case there's one added that depends on another.
for _dependencyPass in range(5):
numInterfaces = len(self.interfaces)
# manipulation friendly, so it's ok to add additional things to the stack
for i in self.getInterfaces():
for dependency in i.getDependencies(self.cs):
name = dependency.name
purpose = dependency.purpose
klass = dependency
if not self.getInterface(name, purpose=purpose):
runLog.extra(
"Attaching {} interface (disabled, BOL forced) due to dependency in {}".format(
klass.name, i.name
)
)
self.addInterface(klass(r=self.r, cs=self.cs), enabled=False, bolForce=True)
if len(self.interfaces) == numInterfaces:
break
else:
raise RuntimeError("Interface dependency resolution did not converge.")
def removeAllInterfaces(self):
"""Removes all of the interfaces."""
for interface in self.interfaces:
interface.detachReactor()
self.interfaces = []
def removeInterface(self, interface=None, interfaceName=None):
"""
Remove a single interface from the interface stack.
Parameters
----------
interface : Interface, optional
An actual interface object to remove.
interfaceName : str, optional
The name of the interface to remove.
Returns
-------
success : boolean
True if the interface was removed
False if it was not (because it wasn't there to be removed)
"""
if interfaceName:
interface = self.getInterface(interfaceName)
if interface and interface in self.interfaces:
self.interfaces.remove(interface)
interface.detachReactor()
return True
else:
runLog.warning("Cannot remove interface {0} because it is not in the interface stack.".format(interface))
return False
def getInterface(self, name=None, purpose=None):
"""
Returns a specific interface from the stack by its name or more generic purpose.
Parameters
----------
name : str, optional
Interface name
purpose : str
Interface purpose (general, like 'globalFlux','th',etc.). This is useful when you need the ___ solver (e.g.
globalFlux) but don't care which particular one is active (e.g. SERPENT vs. DIF3D)
Raises
------
RuntimeError
If there are more than one interfaces of the given name or purpose.
"""
candidateI = None
for i in self.interfaces:
if (name and i.name == name) or (purpose and i.purpose == purpose):
if candidateI is None:
candidateI = i
else:
raise RuntimeError(
"Cannot retrieve a single interface as there are multiple "
"interfaces with name {} or purpose {} attached. ".format(name, purpose)
)
return candidateI
def interfaceIsActive(self, name):
"""True if named interface exists and is enabled.
Notes
-----
This logic is significantly simpler that getActiveInterfaces. This logic only touches the enabled() flag, but
doesn't take into account the case settings.
"""
i = self.getInterface(name)
return i and i.enabled()
def getInterfaces(self):
"""
Get list of interfaces in interface stack.
.. impl:: An operator will expose an ordered list of interfaces.
:id: I_ARMI_OPERATOR_INTERFACES
:implements: R_ARMI_OPERATOR_INTERFACES
This method returns an ordered list of instances of the Interface class. This list is useful because at any
time node in the reactor simulation, these interfaces will be called in sequence to perform various types of
calculations. It is important to note that this Operator instance has a list of Plugins, and each of those
Plugins potentially defines multiple Interfaces. And these Interfaces define their own order, separate from
the ordering of the Plugins.
Notes
-----
Returns a copy so you can manipulate the list in an interface, like dependencies.
"""
return self.interfaces[:]
def getActiveInterfaces(
self,
interactState: str,
excludedInterfaceNames: Tuple[str] = (),
cycle: int = 0,
):
"""Retrieve the interfaces which are active for a given interaction state.
Parameters
----------
interactState: str
A string dictating which interaction state the interfaces should be pulled for.
excludedInterfaceNames: Tuple[str]
A tuple of strings dictating which interfaces should be manually skipped.
cycle: int
The given cycle. 0 by default.
Returns
-------
activeInterfaces: List[Interfaces]
The interfaces deemed active for the given interactState.
"""
# Validate the inputs
if excludedInterfaceNames is None:
excludedInterfaceNames = ()
if interactState not in ("BOL", "BOC", "EveryNode", "EOC", "EOL", "Coupled", "Restart"):
raise ValueError(f"{interactState} is an unknown interaction state!")
# Ensure the interface is enabled.
enabled = lambda i: i.enabled()
if interactState == "BOL":
enabled = lambda i: i.enabled() or i.bolForce()
# Ensure the name of the interface isn't in some exclusion list.
nameCheck = lambda i: True
if interactState in ("EveryNode", "EOC", "EOL"):
nameCheck = lambda i: i.name not in excludedInterfaceNames
elif interactState == "BOC" and cycle < self.cs[CONF_DEFERRED_INTERFACES_CYCLE]:
nameCheck = lambda i: i.name not in self.cs[CONF_DEFERRED_INTERFACE_NAMES]
elif interactState == "BOL":
nameCheck = (
lambda i: i.name not in self.cs[CONF_DEFERRED_INTERFACE_NAMES] and i.name not in excludedInterfaceNames
)
# Finally, find the active interfaces.
activeInterfaces = [i for i in self.interfaces if enabled(i) and nameCheck(i)]
# Special Case: At EOL we reverse the order of some interfaces.
if interactState == "EOL":
actInts = [ii for ii in activeInterfaces if not ii.reverseAtEOL]
actInts.extend(reversed([ii for ii in activeInterfaces if ii.reverseAtEOL]))
activeInterfaces = actInts
return activeInterfaces
def reattach(self, r, cs=None):
"""Add links to globally-shared objects to this operator and all interfaces.
Notes
-----
Could be a good opportunity for weakrefs.
"""
self.r = r
self.r.o = self
if cs is not None:
self.cs = cs
for i in self.interfaces:
i.r = r
i.o = self
if cs is not None:
i.cs = cs
def detach(self):
"""
Break links to globally-shared objects to this operator and all interfaces.
May be required prior to copying these objects over the network.
Notes
-----
Could be a good opportunity for weakrefs.
"""
if self.r:
self.r.o = None
for comp in self.r:
comp.parent = None
self.r = None
for i in self.interfaces:
i.o = None
i.r = None
i.cs = None
def _attachInterfaces(self):
"""
Links all the interfaces in the interface stack to the operator, reactor, and cs.
See Also
--------
createInterfaces : creates all interfaces
addInterface : adds a single interface to the stack
"""
for i in self.interfaces:
i.attachReactor(self, self.r)
def _loadRestartData(self):
"""
Read a restart.dat file which contains all the fuel management factorLists and cycle lengths.
Notes
-----
This allows the ARMI to do the same shuffles that it did last time, assuming fuel management logic has not
changed. Note, it would be better if the moves were just read from a table in the database.
"""
restartName = self.cs.caseTitle + ".restart.dat"
if not os.path.exists(restartName):
return
else:
runLog.info(f"Loading restart data from {restartName}")
with open(restartName, "r") as restart:
for line in restart:
match = re.search(
r"cycle=(\d+)\s+time=(\d+\.\d+[Ee+-]+\d+)\s+factorList=[\[\{](.+?)[\]\}]",
line,
)
if match:
newStyle = re.findall(r"'(\w+)':\s*(\d*\.?\d*)", line)
if newStyle:
# key-based factorList. load a dictionary.
factorList = {}
for key, val in newStyle:
factorList[key] = float(val)
else:
# list based factorList. Load a list. (old style, backward compat)
try:
factorList = [float(item) for item in match.group(3).split(",")]
except ValueError:
factorList = match.group(3).split(",")
runLog.debug("loaded restart data for cycle %d" % float(match.group(1)))
self.restartData.append((float(match.group(1)), float(match.group(2)), factorList))
runLog.info("loaded restart data for {0} cycles".format(len(self.restartData)))
def loadState(self, cycle, timeNode, timeStepName="", fileName=None, updateMassFractions=None):
"""
Convenience method reroute to the database interface state reload method.
See Also
--------
armi.bookkeeping.db.loadOperator:
A method for loading an operator given a database. loadOperator does not require an operator prior to
loading the state of the reactor. loadState does, and therefore armi.init must be called which requires
access to the blueprints, settings, and geometry files. These files are stored implicitly on the database,
so loadOperator creates the reactor first, and then attaches it to the operator. loadState should be used if
you are in the middle of an ARMI calculation and need load a different time step. If you are loading from a
fresh ARMI session, either method is sufficient if you have access to all the input files.
"""
dbi = self.getInterface("database")
if not dbi:
raise RuntimeError("Cannot load from snapshot without a database interface")
if updateMassFractions is not None:
runLog.warning("deprecated: updateMassFractions is no longer a valid option for loadState")
dbi.loadState(cycle, timeNode, timeStepName, fileName)
def snapshotRequest(self, cycle, node, iteration=None):
"""
Process a snapshot request at this time.
This copies various physics input and output files to a special folder that follow-on analysis be executed upon
later.
Notes
-----
This was originally used to produce MC2/DIF3D inputs for external parties (who didn't have ARMI) to review.
Since then, the concept of snapshots has evolved with respect to the
:py:class:`~armi.operators.snapshots.OperatorSnapshots`.
"""
from armi.physics.neutronics.settings import CONF_LOADING_FILE
runLog.info(f"Producing snapshot for cycle {cycle} node {node}")
self.r.core.zones.summary()
newFolder = f"snapShot{cycle}_{node}"
if os.path.exists(newFolder):
runLog.important(f"Deleting existing snapshot data in {newFolder}")
pathTools.cleanPath(newFolder, forceClean=True) # careful with cleanPath!
# give it a minute.
time.sleep(1)
if os.path.exists(newFolder):
runLog.warning(f"Deleting existing snapshot data in {newFolder} failed")
else:
os.mkdir(newFolder)
# Moving the cross section files is to a snapshot directory is a reasonable requirement, but these hard-coded
# names are not desirable. This is legacy and should be updated to be more robust for users.
for fileName in os.listdir("."):
if "mcc" in fileName and re.search(r"[A-Z]AF?\d?.inp", fileName):
base, ext = os.path.splitext(fileName)
if iteration is not None:
newFile = "{0}_{1:03d}_{2:d}_{4}{3}".format(base, cycle, node, ext, iteration)
else:
newFile = "{0}_{1:03d}_{2:d}{3}".format(base, cycle, node, ext)
# add the cycle and timenode to the XS input file names so that a rx-coeff case that
# runs in here won't overwrite them.
pathTools.copyOrWarn(fileName, fileName, os.path.join(newFolder, newFile))
if "rzmflx" in fileName:
pathTools.copyOrWarn("rzmflx for snapshot", fileName, newFolder)
fileNamePossibilities = [f"ISOTXS-c{cycle}n{node}", f"ISOTXS-c{cycle}"]
if iteration is not None:
fileNamePossibilities = [f"ISOTXS-c{cycle}n{node}i{iteration}"] + fileNamePossibilities
for isoFName in fileNamePossibilities:
if os.path.exists(isoFName):
break
pathTools.copyOrWarn("ISOTXS for snapshot", isoFName, pathTools.armiAbsPath(newFolder, "ISOTXS"))
globalFluxLabel = GlobalFluxInterfaceUsingExecuters.getLabel(self.cs.caseTitle, cycle, node, iteration)
globalFluxInput = globalFluxLabel + ".inp"
globalFluxOutput = globalFluxLabel + ".out"
pathTools.copyOrWarn("DIF3D input for snapshot", globalFluxInput, newFolder)
pathTools.copyOrWarn("DIF3D output for snapshot", globalFluxOutput, newFolder)
pathTools.copyOrWarn("Shuffle logic for snapshot", self.cs[CONF_SHUFFLE_LOGIC], newFolder)
pathTools.copyOrWarn("Loading definition for snapshot", self.cs[CONF_LOADING_FILE], newFolder)
@staticmethod
def setStateToDefault(cs):
"""Update the state of ARMI to fit the kind of run this operator manages."""
return cs.modified(newSettings={"runType": RunTypes.STANDARD})
def couplingIsActive(self):
"""True if any kind of physics coupling is active."""
return self.cs[CONF_TIGHT_COUPLING]
================================================
FILE: armi/operators/operatorMPI.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The MPI-aware variant of the standard ARMI operator.
.. impl:: There is an MPI-aware variant of the ARMI Operator.
:id: I_ARMI_OPERATOR_MPI
:implements: R_ARMI_OPERATOR_MPI
This sets up the main Operator on the primary MPI node and initializes
worker processes on all other MPI nodes. At certain points in the run,
particular interfaces might call into action all the workers. For
example, a depletion or subchannel T/H module may ask the MPI pool to
perform a few hundred independent physics calculations in parallel. In
many cases, this can speed up the overall execution of an analysis,
if a big enough computer or computing cluster is available.
See :py:class:`~armi.operators.operator.Operator` for the parent class.
Notes
-----
This is not *yet* smart enough to use shared memory when the MPI tasks are on the same machine. Everything goes through
MPI. This can be optimized as needed.
"""
import gc
import os
import re
import time
import traceback
from armi import context, getPluginManager, mpiActions, runLog
from armi.operators.operator import Operator
from armi.reactor import reactors
class OperatorMPI(Operator):
"""MPI-aware Operator."""
def __init__(self, cs):
try:
Operator.__init__(self, cs)
except:
# kill the workers too so everything dies.
runLog.important("Primary node failed on init. Quitting.")
if context.MPI_COMM: # else it's a single cpu case.
context.MPI_COMM.bcast("quit", root=0)
raise
def operate(self):
"""
Operate method for all nodes.
Calls _mainOperate or workerOperate depending on which MPI rank we are, and
handles errors.
"""
runLog.debug("OperatorMPI.operate")
if context.MPI_RANK == 0:
# this is the primary
try:
# run the regular old operate method
Operator.operate(self)
runLog.important(time.ctime())
except Exception as ee:
runLog.error("Error in Primary Node. Check STDERR for a traceback.\n{}".format(ee))
raise
finally:
# If there are other processes, tell them to stop
if context.MPI_SIZE > 1:
runLog.important("Stopping all MPI worker nodes and cleaning temps.")
# send the quit command to the workers.
context.MPI_COMM.bcast("quit", root=0)
runLog.debug("Waiting for all nodes to close down")
# wait until they're done cleaning up.
context.MPI_COMM.bcast("finished", root=0)
runLog.important("All worker nodes stopped.")
# even though we waited, still need more time to close stdout.
time.sleep(1)
runLog.debug("Main operate finished")
runLog.close() # concatenate all logs.
else:
try:
self.workerOperate()
except:
# grab the final command
runLog.warning("An error has occurred in one of the worker nodes. See STDERR for traceback.")
# bcasting quit won't work if the main is sitting around waiting for a different bcast or gather.
traceback.print_exc()
runLog.debug("Worker failed")
runLog.close()
raise
def workerOperate(self):
"""
The main loop on any worker MPI nodes.
Notes
-----
This method is what worker nodes are in while they wait for instructions from
the primary node in a parallel run. The nodes will sit, waiting for a "worker
command". When this comes (from a bcast from the primary), a set of if statements
are evaluated, with specific behaviors defined for each command. If the operator
doesn't understand the command, it loops through the interface stack to see if
any of the interfaces understand it.
Originally, "magic strings" were broadcast, which were handled either here or in
one of the interfaces' ``workerOperate`` methods. Since then, the
:py:mod:`~armi.mpiActions` system has been devised which just broadcasts
``MpiAction`` objects. Both methods are still supported.
See Also
--------
armi.mpiActions : MpiAction information
armi.interfaces.workerOperate : interface-level handling of worker commands.
"""
while True:
# sit around waiting for a command from the primary
runLog.extra("Node {0} ready and waiting".format(context.MPI_RANK))
cmd = context.MPI_COMM.bcast(None, root=0)
runLog.extra("worker received command {0}".format(cmd))
# got a command. go use it.
if isinstance(cmd, mpiActions.MpiAction):
cmd.invoke(self, self.r, self.cs)
elif cmd == "quit":
self.workerQuit()
break # If this break is removed, the program will remain in the while loop forever.
elif cmd == "finished":
runLog.warning(
"Received unexpected FINISHED command. Usually a QUIT command precedes this. "
"Skipping cleanup of temporary files."
)
break
elif cmd == "sync":
# wait around for a sync
runLog.debug("Worker syncing")
note = context.MPI_COMM.bcast("wait", root=0)
if note != "wait":
raise RuntimeError(f'did not get "wait". Got {note}')
elif cmd == "reset":
runLog.extra("Workers are being reset.")
else:
# We don't understand the command on our own. Check the interfaces this allows all interfaces to have
# their own custom operation code.
handled = False
for i in self.interfaces:
handled = i.workerOperate(cmd)
if handled:
break
if not handled:
if context.MPI_RANK == 0:
print("Interfaces" + str(self.interfaces))
runLog.error(
"No interface understood worker command {0}\n check stdout for err\n"
"available interfaces:\n {1}".format(
cmd,
"\n ".join(f"name:{i.name} typeName:{i.purpose} {i}" for i in self.interfaces),
)
)
raise RuntimeError(f"Failed to delegate worker command {cmd} to an interface.")
pm = getPluginManager()
resetFlags = pm.hook.mpiActionRequiresReset(cmd=cmd)
# only reset if all the plugins agree to reset
if all(resetFlags) or cmd == "reset":
self._resetWorker()
# might be an mpi action which has a reactor and everything, preventing garbage collection
del cmd
gc.collect()
def _finalizeInteract(self):
"""Inherited member called after each interface has completed its interact.
This will force all the workers to clear their reactor data so that it
isn't carried around to the next interact.
Notes
-----
This is only called on the root processor. Worker processors will know
what to do with the "reset" broadcast.
"""
if context.MPI_SIZE > 1:
context.MPI_COMM.bcast("reset", root=0)
runLog.extra("Workers have been reset.")
def _resetWorker(self):
"""
Clear out the reactor on the workers to start anew.
Notes
-----
This was made to help minimize the amount of RAM that is used during some
gigantic long-running cases. Resetting after building copies of reactors
or transforming their geometry is one approach. We hope to implement
more efficient solutions in the future.
Warning
-------
This should build empty non-core systems too.
"""
# Nothing to do if we never had anything
if self.r is None:
return
cs = self.cs
bp = self.r.blueprints
spatialGrid = self.r.core.spatialGrid
spatialGrid.armiObject = None
xsGroups = self.getInterface("xsGroups")
if xsGroups:
xsGroups.clearRepresentativeBlocks()
self.detach()
self.r = reactors.Reactor(cs.caseTitle, bp)
core = reactors.Core("Core")
self.r.add(core)
core.spatialGrid = spatialGrid
core.spatialGrid.armiObject = core
self.reattach(self.r, cs)
@staticmethod
def workerQuit():
runLog.debug("Worker ending")
runLog.close() # no more messages.
# wait until all workers are closed so we can delete them.
context.MPI_COMM.bcast("finished", root=0)
def collapseAllStderrs(self):
"""Takes all the individual stderr files from each processor and arranges them nicely into one file."""
stderrFiles = []
for fName in os.listdir("."):
match = re.search(r"_(\d\d\d\d)\.stderr", fName)
if match:
stderrFiles.append((match.group(1), fName))
stderrFiles.sort()
stderr = open("{0}w.stderr".format(self.cs.caseTitle), "w")
for cpu, fName in stderrFiles:
f = open(fName)
stderr.write("Processor {0}\n".format(cpu))
stderr.write(f.read())
stderr.write("\n")
f.close()
stderr.close()
================================================
FILE: armi/operators/runTypes.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constants defining the different supported run types.
These were moved here to better structure the dependencies within this
package. Dependencies should be organized in a tree-like structure, with
``__init__.py`` living at the top. These will likely need to be extended by plugins in
the near future.
"""
class RunTypes:
"""All available values of the ``runType`` setting that determine which Operator to use."""
STANDARD = "Standard"
SNAPSHOTS = "Snapshots"
EQUILIBRIUM = "Equilibrium"
================================================
FILE: armi/operators/snapshots.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Snapshot Operator."""
from armi import runLog
from armi.operators import operatorMPI
class OperatorSnapshots(operatorMPI.OperatorMPI):
"""
This operator just loops over the requested snapshots and computes at them.
These may add CR worth curves, rx coefficients, transient runs etc at these snapshots.
This operator can be run as a restart, adding new physics to a previous run.
"""
def __init__(self, cs):
super().__init__(cs)
# disable fuel management and optimization
# disable depletion because we don't want to change number densities for tn's >0 (or any)
self.disabledInterfaces = ["depletion", "fuelHandler", "optimize"]
def createInterfaces(self):
operatorMPI.OperatorMPI.createInterfaces(self)
for toDisable in self.disabledInterfaces:
i = self.getInterface(name=toDisable, purpose=toDisable)
if i:
i.enabled(False)
def _mainOperate(self):
"""
General main loop for ARMI snapshot case.
Instead of going through all cycles, this goes through just the snapshots.
See Also
--------
Operator._mainOperate : The primary ARMI loop for non-restart cases.
"""
runLog.important("---- Beginning Snapshot (restart) ARMI Operator Loop ------")
# run things that happen before a calculation.
# setups, etc.
self.interactAllBOL()
# figure out which snapshots to run in. Parse the CCCNNN settings
snapshots = [(int(i[:3]), int(i[3:])) for i in self.cs["dumpSnapshot"]]
# update the snapshot requests if the user chose to load from a specific cycle/node
dbi = self.getInterface("database")
# database is excluded since SS writes by itself
excludeDB = ("database",)
for ssCycle, ssNode in snapshots:
runLog.important("Beginning snapshot ({0:02d}, {1:02d})".format(ssCycle, ssNode))
dbi.loadState(ssCycle, ssNode)
# need to update reactor power after the database load
# this is normally handled in operator._cycleLoop
self.r.core.p.power = self.cs["power"]
self.r.core.p.powerDensity = self.cs["powerDensity"]
halt = self.interactAllBOC(self.r.p.cycle)
if halt:
break
# database is excluded since it writes after coupled
self.interactAllEveryNode(ssCycle, ssNode, excludedInterfaceNames=excludeDB)
self._performTightCoupling(ssCycle, ssNode, writeDB=False)
# tight coupling is done, now write to DB
dbi.writeDBEveryNode()
self.interactAllEOC(self.r.p.cycle)
# run things that happen at EOL, like reports, plotters, etc.
self.interactAllEOL(excludedInterfaceNames=excludeDB)
dbi.closeDB() # dump the database to file
runLog.important("Done with ARMI snapshots case.")
@staticmethod
def setStateToDefault(cs):
"""Update the state of ARMI to fit the kind of run this operator manages."""
from armi.operators.runTypes import RunTypes
return cs.modified(newSettings={"runType": RunTypes.STANDARD})
@property
def atEOL(self):
"""
Notes
-----
This operator's atEOL method behaves very differently than other operators.
The idea is that snapshots don't really have an EOL since they are independent of
chrological order and may or may not contain the last time node from the load database.
"""
return False
================================================
FILE: armi/operators/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for built-in operators."""
================================================
FILE: armi/operators/tests/test_operatorSnapshots.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for operator snapshots."""
import unittest
from pathlib import Path
from unittest.mock import Mock
from armi import settings
from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.operators import getOperatorClassFromSettings
from armi.operators.runTypes import RunTypes
from armi.operators.snapshots import OperatorSnapshots
from armi.settings.fwSettings.globalSettings import CONF_GROW_TO_FULL_CORE_AFTER_LOAD
from armi.testing import TESTING_ROOT, loadTestReactor
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestOperatorSnapshots(unittest.TestCase):
@classmethod
def setUpClass(cls):
newSettings = {}
newSettings["axialExpansion"] = False
newSettings["db"] = True
newSettings["genReports"] = False
newSettings["summarizeAssemDesign"] = False
newSettings["runType"] = "Standard"
newSettings["verbosity"] = "error"
newSettings["branchVerbosity"] = "error"
newSettings["nCycles"] = 1
newSettings["dumpSnapshot"] = ["000000", "008000", "016005"]
o1, cls.r = loadTestReactor(
customSettings=newSettings,
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
cls.o = OperatorSnapshots(o1.cs)
cls.o.r = cls.r
# let's disable all the interfaces, to save time
allInterfaces = [
"database",
"fissionProducts",
"fuelHandler",
"history",
"main",
"memoryProfiler",
"snapshot",
"xsGroups",
]
for i in allInterfaces:
cls.o.disabledInterfaces.append(i)
# mock a Database Interface
cls.dbi = DatabaseInterface(cls.r, o1.cs)
cls.dbi.loadState = lambda c, n: None
cls.dbi.writeDBEveryNode = lambda: None
cls.dbi.closeDB = lambda: None
cls.o.createInterfaces()
def test_atEOL(self):
self.assertFalse(self.o.atEOL)
def test_setStateToDefault(self):
cs0 = self.o.cs.modified(newSettings={"runType": RunTypes.SNAPSHOTS})
self.assertEqual(cs0["runType"], RunTypes.SNAPSHOTS)
cs = self.o.setStateToDefault(cs0)
self.assertEqual(cs["runType"], RunTypes.STANDARD)
def test_mainOperate(self):
# Mock some tooling that we aren't testing
self.o.interactBOL = lambda: None
self.o.getInterface = lambda s: (self.dbi if s == "database" else super().getInterface(s))
self.assertEqual(self.r.core.p.power, 0.0)
self.o._mainOperate()
self.assertEqual(self.r.core.p.power, 1000000.0)
def test_createInterfacesDisabled(self):
# If someone adds an interface, we don't want this test to break, so let's do >6
self.assertGreater(len(self.o.interfaces), 6)
for i in self.o.interfaces:
self.assertFalse(i.enabled())
class TestOperatorSnapshotsSettings(unittest.TestCase):
def test_getOperatorClassFromSettings(self):
cs = settings.Settings()
cs = cs.modified(newSettings={"runType": RunTypes.SNAPSHOTS})
o = getOperatorClassFromSettings(cs)
self.assertEqual(o, OperatorSnapshots)
class TestSnapshotFullCoreExpan(unittest.TestCase):
"""Test that a snapshot operator can do full core analysis with a 1/3 core DB."""
DB_PATH = Path("test_operator_snapshot_full_core_expansion.h5")
@classmethod
def setUpClass(cls):
cls.td = TemporaryDirectoryChanger()
cls.td.__enter__()
o, cls.symmetricReactor = loadTestReactor(
inputFilePath=TESTING_ROOT, inputFileName="reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml"
)
dbi: DatabaseInterface = next(filter(lambda i: isinstance(i, DatabaseInterface), o.interfaces))
dbi.initDB(cls.DB_PATH)
dbi.writeDBEveryNode()
dbi.closeDB()
cls.snapshotSettings: settings.Settings = o.cs.modified(
newSettings={"runType": RunTypes.SNAPSHOTS, "reloadDBName": str(cls.DB_PATH)}
)
@classmethod
def tearDownClass(cls):
cls.DB_PATH.unlink()
cls.td.__exit__(None, None, None)
def test_fullCoreFromThirdCore(self):
self.assertFalse(self.symmetricReactor.core.isFullCore)
cs = self.snapshotSettings.modified(
newSettings={CONF_GROW_TO_FULL_CORE_AFTER_LOAD: True, "dumpSnapshot": ["0000"]}
)
o = getOperatorClassFromSettings(cs)(cs)
self.assertIsInstance(o, OperatorSnapshots)
o.r = self.symmetricReactor
# Just want Database interface not history tracker not reporting not etc.
o.addInterface(DatabaseInterface(o.r, o.cs))
# Mock interactAllBOC so we don't do iteract every nodes
# We just want to trigger the re-attachment of the loaded reactor
o.interactAllBOC = Mock(return_value=True)
o.operate()
self.assertTrue(o.r.core.isFullCore)
================================================
FILE: armi/operators/tests/test_operators.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for operators."""
import collections
import io
import os
import sys
import unittest
from contextlib import contextmanager
from unittest.mock import patch
from armi import settings
from armi.bookkeeping.db.databaseInterface import DatabaseInterface
from armi.interfaces import Interface, TightCoupler
from armi.operators.operator import Operator
from armi.physics.neutronics.globalFlux.globalFluxInterface import (
GlobalFluxInterfaceUsingExecuters,
)
from armi.reactor.reactors import Core, Reactor
from armi.reactor.tests import test_reactors
from armi.settings.caseSettings import Settings
from armi.settings.fwSettings.globalSettings import (
CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION,
CONF_DEFERRED_INTERFACE_NAMES,
CONF_DEFERRED_INTERFACES_CYCLE,
CONF_RUN_TYPE,
CONF_TIGHT_COUPLING,
CONF_TIGHT_COUPLING_SETTINGS,
)
from armi.tests import mockRunLogs
from armi.utils import directoryChangers
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class InterfaceA(Interface):
purpose = "A"
name = "First"
class InterfaceB(InterfaceA):
"""Dummy Interface that extends A."""
purpose = "A"
name = "Second"
class InterfaceC(Interface):
purpose = "A"
name = "Third"
class OperatorTests(unittest.TestCase):
def setUp(self):
self.o, self.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
self.activeInterfaces = [ii for ii in self.o.interfaces if ii.enabled()]
def test_operatorData(self):
"""Test that the operator has input data, a reactor model.
.. test:: The Operator includes input data and the reactor data model.
:id: T_ARMI_OPERATOR_COMM
:tests: R_ARMI_OPERATOR_COMM
"""
self.assertEqual(self.o.r, self.r)
self.assertEqual(type(self.o.cs), settings.Settings)
@patch("armi.operators.Operator._interactAll")
def test_orderedInterfaces(self, interactAll):
"""Test the default interfaces are in an ordered list, looped over at each time step.
.. test:: An ordered list of interfaces are run at each time step.
:id: T_ARMI_OPERATOR_INTERFACES
:tests: R_ARMI_OPERATOR_INTERFACES
.. test:: Interfaces are run at BOC, EOC, and at time points between.
:id: T_ARMI_INTERFACE
:tests: R_ARMI_INTERFACE
.. test:: When users set the time discretization, it is enforced.
:id: T_ARMI_FW_HISTORY2
:tests: R_ARMI_FW_HISTORY
"""
# an ordered list of interfaces
self.assertGreater(len(self.o.interfaces), 0)
for i in self.o.interfaces:
self.assertTrue(isinstance(i, Interface))
# make sure we only iterate one time step
self.o.cs = self.o.cs.modified(newSettings={"nCycles": 2})
self.r.p.cycle = 1
# mock some stdout logging of what's happening when
def sideEffect(node, activeInts, *args, **kwargs):
print(node)
print(activeInts)
interactAll.side_effect = sideEffect
# run the operator through one cycle
origout = sys.stdout
try:
out = io.StringIO()
sys.stdout = out
self.o.operate()
finally:
sys.stdout = origout
# grab the log data
log = out.getvalue()
# verify we have some common interfaces listed
self.assertIn("main", log)
self.assertIn("fuelHandler", log)
self.assertIn("fissionProducts", log)
self.assertIn("history", log)
self.assertIn("snapshot", log)
# At the first time step, we get one ordered list of interfaces
interfaces = log.split("BOL")[1].split("EOL")[0].split(",")
self.assertGreater(len(interfaces), 0)
for i in interfaces:
self.assertIn("Interface", i)
# verify the various time nodes are hit in order
timeNodes = ["BOL", "BOC"] + ["EveryNode"] * 3 + ["EOC", "EOL"]
for node in timeNodes:
self.assertIn(node, log)
log = node.join(log.split(node)[1:])
def test_addInterfaceSubclassCollision(self):
cs = settings.Settings()
interfaceA = InterfaceA(self.r, cs)
interfaceB = InterfaceB(self.r, cs)
self.o.addInterface(interfaceA)
# 1) Adds B and gets rid of A
self.o.addInterface(interfaceB)
self.assertEqual(self.o.getInterface("Second"), interfaceB)
self.assertEqual(self.o.getInterface("First"), None)
# 2) Now we have B which is a subclass of A,
# we want to not add A (but also not have an error)
self.o.addInterface(interfaceA)
self.assertEqual(self.o.getInterface("Second"), interfaceB)
self.assertEqual(self.o.getInterface("First"), None)
# 3) Also if another class not a subclass has the same purpose,
# raise an error
interfaceC = InterfaceC(self.r, cs)
self.assertRaises(RuntimeError, self.o.addInterface, interfaceC)
# 4) Check adding a different purpose Interface
interfaceC.purpose = "C"
self.o.addInterface(interfaceC)
self.assertEqual(self.o.getInterface("Second"), interfaceB)
self.assertEqual(self.o.getInterface("Third"), interfaceC)
def test_interfaceIsActive(self):
self.o, _r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
self.assertTrue(self.o.interfaceIsActive("main"))
self.assertFalse(self.o.interfaceIsActive("Fake-o"))
def test_getActiveInterfaces(self):
"""Ensure that the right interfaces are returned for a given interaction state."""
self.o.cs[CONF_DEFERRED_INTERFACES_CYCLE] = 1
self.o.cs[CONF_DEFERRED_INTERFACE_NAMES] = ["history"]
# Test invalid inputs.
with self.assertRaises(ValueError):
self.o.getActiveInterfaces("notAnInterface")
# Test BOL
interfaces = self.o.getActiveInterfaces("BOL", excludedInterfaceNames=("xsGroups"))
interfaceNames = [interface.name for interface in interfaces]
self.assertNotIn("xsGroups", interfaceNames)
self.assertNotIn("history", interfaceNames)
# Test BOC
interfaces = self.o.getActiveInterfaces("BOC", cycle=0)
interfaceNames = [interface.name for interface in interfaces]
self.assertNotIn("history", interfaceNames)
# Test EveryNode and EOC
interfaces = self.o.getActiveInterfaces("EveryNode", excludedInterfaceNames=("xsGroups"))
interfaceNames = [interface.name for interface in interfaces]
self.assertIn("history", interfaceNames)
self.assertNotIn("xsGroups", interfaceNames)
# Test Coupled
interfaces = self.o.getActiveInterfaces("Coupled")
for test, ref in zip(interfaces, self.activeInterfaces):
self.assertEqual(test.name, ref.name)
# Test EOL
interfaces = self.o.getActiveInterfaces("EOL")
self.assertEqual(interfaces[-1].name, "main")
# Test excludedInterfaceNames
excludedInterfaceNames = ["fissionProducts", "fuelHandler", "xsGroups"]
interfaces = self.o.getActiveInterfaces("EOL", excludedInterfaceNames=excludedInterfaceNames)
interfaceNames = [ii.name for ii in interfaces]
self.assertIn("history", interfaceNames)
self.assertIn("main", interfaceNames)
self.assertIn("snapshot", interfaceNames)
self.assertNotIn("fissionProducts", interfaceNames)
self.assertNotIn("fuelHandler", interfaceNames)
self.assertNotIn("xsGroups", interfaceNames)
def test_loadStateError(self):
"""The ``loadTestReactor()`` test tool does not have any history in the DB to load from."""
# a first, simple test that this method fails correctly
with self.assertRaises(RuntimeError):
self.o.loadState(0, 1)
def test_setStateToDefault(self):
# reset the runType for testing
self.assertEqual(self.o.cs[CONF_RUN_TYPE], "Standard")
self.o.cs = self.o.cs.modified(newSettings={"runType": "fake"})
self.assertEqual(self.o.cs[CONF_RUN_TYPE], "fake")
# validate the method works
cs = self.o.setStateToDefault(self.o.cs)
self.assertEqual(cs[CONF_RUN_TYPE], "Standard")
@patch("shutil.copy")
@patch("os.listdir")
def test_snapshotRequest(self, fakeDirList, fakeCopy):
fakeDirList.return_value = ["mccAA.inp"]
with TemporaryDirectoryChanger():
with mockRunLogs.BufferLog() as mock:
self.o.snapshotRequest(0, 1)
self.assertIn("ISOTXS-c0", mock.getStdout())
self.assertIn("DIF3D input for snapshot", mock.getStdout())
self.assertIn("Shuffle logic for snapshot", mock.getStdout())
self.assertIn("Loading definition for snapshot", mock.getStdout())
self.assertTrue(os.path.exists("snapShot0_1"))
with TemporaryDirectoryChanger():
with mockRunLogs.BufferLog() as mock:
self.o.snapshotRequest(0, 2, iteration=1)
self.assertIn("ISOTXS-c0", mock.getStdout())
self.assertIn("DIF3D input for snapshot", mock.getStdout())
self.assertIn("Shuffle logic for snapshot", mock.getStdout())
self.assertIn("Loading definition for snapshot", mock.getStdout())
self.assertTrue(os.path.exists("snapShot0_2"))
class TestCreateOperator(unittest.TestCase):
def test_createOperator(self):
"""Test that an operator can be created from settings.
.. test:: Create an operator from settings.
:id: T_ARMI_OPERATOR_SETTINGS
:tests: R_ARMI_OPERATOR_SETTINGS
"""
cs = settings.Settings()
o = Operator(cs)
# high-level items
self.assertTrue(isinstance(o, Operator))
self.assertTrue(isinstance(o.cs, settings.Settings))
# validate some more nitty-gritty operator details come from settings
burnStepsSetting = cs["burnSteps"]
if type(burnStepsSetting) is not list:
burnStepsSetting = [burnStepsSetting]
self.assertEqual(o.burnSteps, burnStepsSetting)
self.assertEqual(o.maxBurnSteps, max(burnStepsSetting))
powerFracsSetting = cs["powerFractions"]
if powerFracsSetting:
self.assertEqual(o.powerFractions, powerFracsSetting)
else:
self.assertEqual(o.powerFractions, [[1] * cs["burnSteps"]])
class TestTightCoupling(unittest.TestCase):
def setUp(self):
self.cs = settings.Settings()
self.cs[CONF_TIGHT_COUPLING] = True
self.o = Operator(self.cs)
self.o.r = Reactor("empty", None)
self.o.r.core = Core("empty")
def test_getStepLengths(self):
"""Test the step lengths are correctly calculated, based on settings.
.. test:: Users can control time discretization of the simulation through settings.
:id: T_ARMI_FW_HISTORY0
:tests: R_ARMI_FW_HISTORY
"""
self.assertEqual(self.cs["nCycles"], 1)
self.assertAlmostEqual(self.cs["cycleLength"], 365.242199)
self.assertEqual(self.cs["burnSteps"], 4)
self.assertEqual(len(self.o.stepLengths), 1)
self.assertEqual(len(self.o.stepLengths[0]), 4)
def test_couplingIsActive(self):
"""Ensure that ``cs[CONF_TIGHT_COUPLING]`` controls ``couplingIsActive``."""
self.assertTrue(self.o.couplingIsActive())
self.o.cs[CONF_TIGHT_COUPLING] = False
self.assertFalse(self.o.couplingIsActive())
def test_performTightCoupling_Inactive(self):
"""Ensures no action by ``_performTightCoupling`` if ``cs[CONF_TIGHT_COUPLING] = false``."""
self.o.cs[CONF_TIGHT_COUPLING] = False
self.o._performTightCoupling(0, 0, writeDB=False)
self.assertEqual(self.o.r.core.p.coupledIteration, 0)
def test_performTightCoupling_skip(self):
"""Ensure that cycles within ``cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION]`` are skipped."""
self.o.cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION] = [1]
with mockRunLogs.BufferLog() as mock:
self.o._performTightCoupling(1, 0, writeDB=False)
self.assertIn("interactAllCoupled disabled this cycle", mock.getStdout())
self.assertEqual(self.o.r.core.p.coupledIteration, 0)
def test_performTightCoupling_notConverged(self):
"""Ensure that the appropriate ``runLog.warning`` is addressed in tight coupling reaches max num of iters.
.. test:: The tight coupling logic can fail if there is no convergence.
:id: T_ARMI_OPERATOR_PHYSICS0
:tests: R_ARMI_OPERATOR_PHYSICS
"""
class NoConverge(TightCoupler):
def isConverged(self, _val: TightCoupler._SUPPORTED_TYPES) -> bool:
return False
class InterfaceNoConverge(Interface):
name = "NoConverge"
def __init__(self, r, cs):
super().__init__(r, cs)
self.coupler = NoConverge(param="dummy", tolerance=None, maxIters=1)
def getTightCouplingValue(self):
return 0.0
self.o.addInterface(InterfaceNoConverge(None, self.o.cs))
with mockRunLogs.BufferLog() as mock:
self.o._performTightCoupling(0, 0, writeDB=False)
self.assertIn("have not converged! The maximum number of iterations", mock.getStdout())
def test_performTightCoupling_WriteDB(self):
"""Ensure a tight coupling iteration accours and that a DB WILL be written if requested."""
hasCouplingInteraction = 1
with directoryChangers.TemporaryDirectoryChanger():
with mockRunLogs.BufferLog() as mock:
self.dbWriteForCoupling(writeDB=True)
self.assertIn("Writing to database for statepoint:", mock.getStdout())
self.assertEqual(self.o.r.core.p.coupledIteration, hasCouplingInteraction)
def test_performTightCoupling_NoWriteDB(self):
"""Ensure a tight coupling iteration accours and that a DB WILL NOT be written if requested."""
hasCouplingInteraction = 1
with directoryChangers.TemporaryDirectoryChanger():
with mockRunLogs.BufferLog() as mock:
self.dbWriteForCoupling(writeDB=False)
self.assertNotIn("Writing to database for statepoint:", mock.getStdout())
self.assertEqual(self.o.r.core.p.coupledIteration, hasCouplingInteraction)
def dbWriteForCoupling(self, writeDB: bool):
self.o.removeAllInterfaces()
dbi = DatabaseInterface(self.o.r, self.o.cs)
dbi.initDB(fName=self._testMethodName + ".h5")
self.o.addInterface(dbi)
self.o._performTightCoupling(0, 0, writeDB=writeDB)
h5Contents = list(dbi.database.getH5Group(dbi.r).items())
if writeDB:
self.assertTrue(h5Contents)
else:
self.assertFalse(h5Contents)
dbi.database.close()
def test_computeTightCouplingConvergence(self):
"""Ensure that tight coupling convergence can be computed and checked.
Notes
-----
- Assertion #1: ensure that the convergence of Keff, eps, is greater than 1e-5 (the
prescribed convergence criteria)
- Assertion #2: ensure that eps is (prevIterKeff - currIterKeff)
"""
prevIterKeff = 0.9
currIterKeff = 1.0
self.o.cs[CONF_TIGHT_COUPLING_SETTINGS] = {"globalFlux": {"parameter": "keff", "convergence": 1e-05}}
globalFlux = GlobalFluxInterfaceUsingExecuters(self.o.r, self.o.cs)
globalFlux.coupler.storePreviousIterationValue(prevIterKeff)
self.o.addInterface(globalFlux)
# set keff to some new value and compute tight coupling convergence
self.o.r.core.p.keff = currIterKeff
self.o._convergenceSummary = collections.defaultdict(list)
self.assertFalse(self.o._checkTightCouplingConvergence([globalFlux]))
self.assertAlmostEqual(
globalFlux.coupler.eps,
currIterKeff - prevIterKeff,
)
class CyclesSettingsTests(unittest.TestCase):
"""Check that we can correctly access the various cycle settings from the operator."""
detailedCyclesSettings = """
metadata:
version: uncontrolled
settings:
power: 1000000000.0
nCycles: 3
cycles:
- name: startup sequence
cumulative days: [1, 2, 3]
power fractions: [0.1, 0.2, 0.3]
availability factor: 0.1
- cycle length: 10
burn steps: 5
power fractions: [0.2, 0.2, 0.2, 0.2, 0]
availability factor: 0.5
- name: prepare for shutdown
step days: [3, R4]
power fractions: [0.3, R4]
runType: Standard
"""
def setUp(self):
self.standaloneDetailedCS = Settings()
self.standaloneDetailedCS.loadFromString(self.detailedCyclesSettings)
self.detailedOperator = Operator(self.standaloneDetailedCS)
def test_getPowerFractions(self):
"""Test that the power fractions are calculated correctly.
.. test:: Test the powerFractions are retrieved correctly for multiple cycles.
:id: T_ARMI_SETTINGS_POWER1
:tests: R_ARMI_SETTINGS_POWER
"""
powerFractionsSolution = [
[0.1, 0.2, 0.3],
[0.2, 0.2, 0.2, 0.2, 0],
[0.3, 0.3, 0.3, 0.3, 0.3],
]
self.assertEqual(self.detailedOperator.powerFractions, powerFractionsSolution)
self.detailedOperator._powerFractions = None
self.assertEqual(self.detailedOperator.powerFractions, powerFractionsSolution)
def test_getCycleNames(self):
cycleNamesSolution = ["startup sequence", None, "prepare for shutdown"]
self.assertEqual(self.detailedOperator.cycleNames, cycleNamesSolution)
self.detailedOperator._cycleNames = None
self.assertEqual(self.detailedOperator.cycleNames, cycleNamesSolution)
def test_getAvailabilityFactors(self):
"""Check that the "availability factor" is correctly set from the "cycles" setting.
.. test:: Users can manually control time discretization of the simulation.
:id: R_ARMI_FW_HISTORY3
:tests: R_ARMI_FW_HISTORY
"""
availabilityFactorsSolution = [0.1, 0.5, 1]
self.assertEqual(self.detailedOperator.availabilityFactors, availabilityFactorsSolution)
self.detailedOperator._availabilityFactors = None
self.assertEqual(self.detailedOperator.availabilityFactors, availabilityFactorsSolution)
def test_getStepLengths(self):
"""Test that the manually-set, detailed time steps are retrievable.
.. test:: Users can manually control time discretization of the simulation.
:id: T_ARMI_FW_HISTORY1
:tests: R_ARMI_FW_HISTORY
"""
stepLengthsSolution = [
[1, 1, 1],
[10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5],
[3, 3, 3, 3, 3],
]
# detailed step lengths can be set manually
self.assertEqual(self.detailedOperator.stepLengths, stepLengthsSolution)
self.detailedOperator._stepLength = None
self.assertEqual(self.detailedOperator.stepLengths, stepLengthsSolution)
# when doing detailed step information, we don't get step information from settings
cs = self.detailedOperator.cs
self.assertEqual(cs["nCycles"], 3)
with self.assertRaises(ValueError):
cs["cycleLength"]
with self.assertRaises(ValueError):
cs["burnSteps"]
def test_getCycleLengths(self):
"""Check that the "cycle length" is correctly set from the "cycles" setting.
.. test:: Users can manually control time discretization of the simulation.
:id: R_ARMI_FW_HISTORY4
:tests: R_ARMI_FW_HISTORY
"""
cycleLengthsSolution = [30, 10, 15]
self.assertEqual(self.detailedOperator.cycleLengths, cycleLengthsSolution)
self.detailedOperator._cycleLengths = None
self.assertEqual(self.detailedOperator.cycleLengths, cycleLengthsSolution)
def test_getBurnSteps(self):
"""Check that the "burn steps" is correctly set from the "cycles" setting.
.. test:: Users can manually control time discretization of the simulation.
:id: R_ARMI_FW_HISTORY5
:tests: R_ARMI_FW_HISTORY
"""
burnStepsSolution = [3, 5, 5]
self.assertEqual(self.detailedOperator.burnSteps, burnStepsSolution)
self.detailedOperator._burnSteps = None
self.assertEqual(self.detailedOperator.burnSteps, burnStepsSolution)
def test_getMaxBurnSteps(self):
"""Check that the max of the "burn steps" is correctly set from the "cycles" setting.
.. test:: Users can manually control time discretization of the simulation.
:id: R_ARMI_FW_HISTORY6
:tests: R_ARMI_FW_HISTORY
"""
maxBurnStepsSolution = 5
self.assertEqual(self.detailedOperator.maxBurnSteps, maxBurnStepsSolution)
self.detailedOperator._maxBurnSteps = None
self.assertEqual(self.detailedOperator.maxBurnSteps, maxBurnStepsSolution)
class TestInterfaceAndEventHeaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.o, cls.r = test_reactors.loadTestReactor(
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
customSettings={CONF_TIGHT_COUPLING: True},
)
cls.r.p.cycle = 0
cls.r.p.timeNode = 1
cls.r.p.time = 11.01
cls.r.core.p.coupledIteration = 7
def test_expandCycleAndTimeNodeArgs_Empty(self):
"""When cycleNodeInfo should be an empty string."""
for task in ["Init", "BOL", "EOL"]:
self.assertEqual(self.o._expandCycleAndTimeNodeArgs(interactionName=task), "")
def test_expandCycleAndTimeNodeArgs_Cycle(self):
"""When cycleNodeInfo should return only the cycle."""
for task in ["BOC", "EOC"]:
self.assertEqual(
self.o._expandCycleAndTimeNodeArgs(interactionName=task),
f" - timestep: cycle {self.r.p.cycle}",
)
def test_expandCycleAndTimeNodeArgs_EveryNode(self):
"""When cycleNodeInfo should return the cycle and node."""
self.assertEqual(
self.o._expandCycleAndTimeNodeArgs(interactionName="EveryNode"),
f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year {'{0:.2f}'.format(self.r.p.time)}",
)
def test_expandCycleAndTimeNodeArgs_Coupled(self):
"""When cycleNodeInfo should return the cycle, node, and iteration number."""
self.assertEqual(
self.o._expandCycleAndTimeNodeArgs(interactionName="Coupled"),
(
f" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year "
f"{'{0:.2f}'.format(self.r.p.time)} - iteration {self.r.core.p.coupledIteration}"
),
)
class OperatorRestartTests(unittest.TestCase):
"""Tests on the behavior of the interactAllRestart hook."""
@classmethod
def setUpClass(cls):
cls.START_CYCLE = 4
cls.START_NODE = 2
cls.o, cls.r = test_reactors.loadTestReactor(
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
customSettings={
"loadStyle": "fromDB",
"startCycle": cls.START_CYCLE,
"startNode": cls.START_NODE,
# Need more cycles than we're restarting
"nCycles": cls.START_CYCLE + 3,
},
)
def setUp(self):
self.dbi: DatabaseInterface = self.o.getInterface("database")
self.assertIsNotNone(self.dbi, msg="Database interface required for test.")
def test_nominalRestart(self):
"""Make sure the database interface is uniquely called and the interactRestart is not called for DB.
We need to make sure the database interface loads the reactor before every other interface goes first.
But then, when all the interfaces get their chance to ``interactRestart``, the database interface
does not. Since it did it's work already.
"""
mainInterface: Interface = self.o.getInterface(name="main")
self.assertIsNotNone(mainInterface)
with (
patch.object(self.dbi, "interactRestart") as dbInteractRestart,
patch.object(self.dbi, "prepRestartRun") as dbPrepRestart,
patch.object(mainInterface, "interactRestart") as mainIfcRestart,
):
self.o.interactAllRestart(self.dbi)
dbPrepRestart.assert_called_once()
# Skip DatabaseInterface.interactRestart since we jumped ahead and "restarted" with prepRestartRun
dbInteractRestart.assert_not_called()
# Ensure we called other interfaces restarts at the previous node
mainIfcRestart.assert_called_once_with(
(self.START_CYCLE, self.START_NODE), (self.START_CYCLE, self.START_NODE - 1)
)
self.assertEqual(self.o.r.p.cycle, self.START_CYCLE)
self.assertEqual(self.o.r.p.timeNode, self.START_NODE)
@contextmanager
def patchCS(self, **kwargs):
"""Patch the case settings, restoring at the end of the context block.
Kwargs are key: value pairs for settings to be modified.
Can't use ``patch.dict`` because case settings don't have at least a ``.copy``
method that ``patch.dict`` expects.
"""
cs = self.o.cs
old = {k: cs[k] for k in kwargs}
for k, v in kwargs.items():
cs[k] = v
yield
for k, v in old.items():
cs[k] = v
def test_callPreviousEOC(self):
"""When restarting at the start of the cycle, make sure we call the previous interactEOC for all interfaces."""
with (
self.patchCS(startNode=0),
patch.object(self.o, "interactAllEOC") as patchEOC,
# Don't want to attempt to load a ficticious DB
patch.object(self.dbi, "prepRestartRun"),
):
self.o.interactAllRestart(self.dbi)
patchEOC.assert_called_once_with(self.START_CYCLE - 1)
def test_noDatabaseNoRestart(self):
"""Ensure there must be a database interface responsible for loading from database."""
with self.assertRaisesRegex(ValueError, "No database interface"):
self.o.interactAllRestart(None)
================================================
FILE: armi/physics/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Physics Packages are where the magic of physical simulation happens in an ARMI run.
.. tip:: The vast majority of physics capabilities are provided by :py:mod:`Plugins `.
Thus, this package contains some fairly generic physics-related code that belongs in a reactor
analysis framework.
Besides providing some generic physics-related capabilities, this package also provides a recommended
*physics namespace* for all ARMI plugins to follow. The physics namespaces we've come up with is
as follows:
fuelCycle
Fuel management, fabrication, reprocessing, mass flow, etc.
neutronics
Radiation transport, nuclear depletion, nuclear cross sections, reactivity coefficients,
kinetics, etc.
safety
Systems analysis in accident scenarios, source term, dose conversion, etc.
fuelPerformance
Changes in fuel systems vs. burnup and time, including thermophysical modeling of
fuel, cladding, fuel salt, etc.
thermalHydraulics
Heat transfer, fluid flow, pressure drop, power cycles, you name it.
economics
Economic modeling and cost estimation.
.. important:: Yeah, we know that it is kind of a stretch to call economics a kind of physics.
We have found it very useful to use `Python namespace packages `_
to mirror this exact namespace in physics plugins that are outside of the ARMI framework. Thus, there can
be two totally separate plugins::
IAEA/
physics/
neutronics/
superSourceTerm/
__init__.py
plugin.py
and::
IAEA/
physics/
economics/
magwoodsbrain/
__init__.py
plugin.py
And then the associated ARMI-based app could import both ``IAEA.physics.neutronics.superSourceTerm`` and
``IAEA.physics.economics.magwoodsbrain``. Having a consistency in namespace along these lines is
quite nice.
"""
================================================
FILE: armi/physics/constants.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some constants."""
DPA_CROSS_SECTIONS = {}
"""Multigroup dpa cross sections.
Displacements per atom are correlated to material damage.
Notes
-----
This data structure can be updated by plugins with design-specific dpa data.
:meta hide-value:
"""
# The following are multigroup DPA XS for EBR II. They were generated using an ultra hard MCC spectrum
# that calculated buckling and had an initial keff of 2. Even so, Inc600/625/X750 33 group dpa XS values are less than
# 5% for all but 5 energy groups. The maximum deviation is 18% in INC625 between .192 and .331 MeV.
DPA_CROSS_SECTIONS["dpa_EBRII_HT9"] = [
2.34569e03,
1.92004e03,
1.58640e03,
1.25670e03,
8.24006e02,
5.20750e02,
3.96146e02,
3.28749e02,
2.06149e02,
1.42452e02,
1.15189e02,
6.60183e01,
8.23281e01,
1.31771e01,
1.94552e01,
3.33861e01,
1.27099e01,
6.20510e00,
3.58651e00,
3.74080e00,
4.52607e-01,
1.62650e-01,
1.24318e-01,
1.56210e-01,
1.89583e-01,
2.36694e-01,
2.97445e-01,
3.92136e-01,
5.07320e-01,
6.81782e-01,
1.07978e00,
2.43258e00,
4.35563e00,
]
DPA_CROSS_SECTIONS["dpa_EBRII_INC600"] = [
2.57204e03,
2.11682e03,
1.64031e03,
1.21591e03,
8.69816e02,
6.47128e02,
4.25248e02,
3.59778e02,
2.89208e02,
1.89443e02,
1.55667e02,
1.22460e02,
8.25721e01,
1.15026e02,
9.90510e01,
2.42252e01,
1.73504e01,
9.34915e00,
5.67409e00,
3.13557e00,
5.95081e-01,
1.95832e-01,
1.93791e-01,
2.52465e-01,
3.11159e-01,
3.71897e-01,
4.95951e-01,
6.50177e-01,
8.39344e-01,
1.12626e00,
1.78500e00,
4.02021e00,
7.19616e00,
]
DPA_CROSS_SECTIONS["dpa_EBRII_INC625"] = [
2.49791e03,
2.05899e03,
1.60441e03,
1.20292e03,
8.68237e02,
6.39219e02,
4.16975e02,
3.50177e02,
2.74491e02,
1.89846e02,
1.53178e02,
1.16379e02,
7.35708e01,
1.05281e02,
8.96142e01,
2.58537e01,
1.91218e01,
8.44318e00,
5.16493e00,
2.67000e00,
5.66731e-01,
2.20242e-01,
1.92435e-01,
3.31226e-01,
3.69475e-01,
5.24326e-01,
4.78120e-01,
6.22211e-01,
8.15999e-01,
1.07725e00,
1.70732e00,
3.84540e00,
6.88285e00,
]
DPA_CROSS_SECTIONS["dpa_EBRII_INCX750"] = [
2.59270e03,
2.13361e03,
1.65837e03,
1.23739e03,
8.86458e02,
6.51012e02,
4.27294e02,
3.58449e02,
2.88178e02,
1.88428e02,
1.56886e02,
1.27132e02,
8.89576e01,
1.31703e02,
1.04350e02,
2.55248e01,
1.77532e01,
9.43101e00,
5.60558e00,
3.06838e00,
5.85632e-01,
1.90347e-01,
1.89737e-01,
2.50070e-01,
3.08765e-01,
3.69079e-01,
4.92257e-01,
6.45369e-01,
8.33181e-01,
1.11802e00,
1.77196e00,
3.98945e00,
7.13947e00,
]
DPA_CROSS_SECTIONS["dpa_EBRII_PE16"] = [
2.47895e03,
2.03583e03,
1.61943e03,
1.23864e03,
8.58439e02,
5.95879e02,
4.10632e02,
3.42948e02,
2.49940e02,
1.69919e02,
1.39511e02,
1.00171e02,
8.21254e01,
7.94117e01,
6.73353e01,
2.84413e01,
1.61127e01,
7.13145e00,
4.59314e00,
3.12973e00,
5.17916e-01,
1.51560e-01,
1.56357e-01,
2.37675e-01,
2.81173e-01,
3.65433e-01,
4.12907e-01,
5.40601e-01,
7.03084e-01,
9.37963e-01,
1.48726e00,
3.34954e00,
5.99536e00,
]
================================================
FILE: armi/physics/executers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Executors are useful for having a standard way to run physics calculations.
They may involve external codes (with inputs/execution/output) or in-memory
data pathways.
"""
import hashlib
import os
from armi import runLog
from armi.context import MPI_RANK, getFastPath
from armi.utils import directoryChangers, pathTools
class ExecutionOptions:
"""
A data structure representing all options needed for a physics kernel.
.. impl:: Options for executing external calculations.
:id: I_ARMI_EX0
:implements: R_ARMI_EX
Implements a basic container to hold and report options to be used in
the execution of an external code (see :need:`I_ARMI_EX1`).
Options are stored as instance attributes and can be dumped as a string
using :py:meth:`~armi.physics.executers.ExecutionOptions.describe`, which
will include the name and value of all public attributes of the instance.
Also facilitates the ability to execute parallel instances of a code by
providing the ability to resolve a ``runDir`` that is aware of the
executing MPI rank. This is done via :py:meth:`~armi.physics.executers.ExecutionOptions.setRunDirFromCaseTitle`,
where the user passes in a ``caseTitle`` string, which is hashed and combined
with the MPI rank to provide a unique directory name to be used by each parallel
instance.
Attributes
----------
inputFile : str
Name of main input file. Often passed to stdin of external code.
outputFile : str
Name of main output file. Often the stdout of external code.
extraInputFiles : list of tuples
(sourceName, destName) pairs of file names that will be brought from the
working dir into the runDir. Allows renames while in transit.
extraOutputFiles : list of tuples
(sourceName, destName) pairs of file names that will be extracted from the
runDir to the working dir
executablePath : str
Path to external executable to run (if external code is used)
runDir : str
Path on running system where the run will take place. This is often used
to ensure external codes that use hard-drive disk space run on a local disk
rather than a shared network drive
workingDir : str
Path on system where results will be placed after the run. This is often
a shared network location. Auto-applied during execution by default.
label : str
A name for the run that may be used as a prefix for input/output files generated.
interface : str
A name for the interface calling the Executer that may be used to organize the
input/output files generated within sub-folders under the working directory.
savePhysicsFiles : bool
Dump the physics kernel I/O files from the execution to a dedicated directory that
will not be overwritten so they will be available after the run.
copyOutput : bool
Copy the output from running the executable back to the working directory.
applyResultsToReactor : bool
Update the in-memory reactor model with results upon completion. Set to False
when information from a run is needed for auxiliary purposes rather than progressing
the reactor model.
"""
def __init__(self, label=None):
self.inputFile = None
self.outputFile = None
self.extraInputFiles = []
self.extraOutputFiles = []
self.executablePath = None
self.runDir = None
self.workingDir = None
self.label = label
self.interfaceName = None
self.applyResultsToReactor = True
self.paramsToScaleSubset = None
self.savePhysicsFiles = False
self.copyOutput = True
def __repr__(self):
return f"<{self.__class__.__name__}: {self.label}>"
def fromUserSettings(self, cs):
"""Set options from a particular Settings object."""
raise NotImplementedError()
def fromReactor(self, reactor):
"""Set options from a particular reactor object."""
raise NotImplementedError()
def resolveDerivedOptions(self):
"""Called by executers right before executing."""
def setRunDirFromCaseTitle(self, caseTitle: str) -> None:
"""
Set run directory derived from case title and label.
This is optional (you can set runDir to whatever you want). If you
use this, you will get a relatively consistent naming convention
for your fast-path folders.
"""
# This creates a hash of the case title plus the label
# to shorten the running directory and to avoid path length
# limitations on the OS.
caseString = f"{caseTitle}-{str(self.label)}".encode("utf-8")
caseTitleHash = str(hashlib.sha1(caseString).hexdigest())[:8]
self.runDir = os.path.join(getFastPath(), f"{caseTitleHash}-{MPI_RANK}")
def describe(self) -> str:
"""Make a string summary of all options."""
lines = ["Options summary:", "----------------"]
for key, val in sorted(self.__dict__.items()):
if not key.startswith("_"):
lines.append(f" {key:40s}{str(val)[:80]:80s}")
return "\n".join(lines)
class Executer:
"""
Short-lived object that coordinates a calculation step and updates a reactor.
Notes
-----
This is deliberately **not** a :py:class:`~mpiActions.MpiAction`. Thus, Executers can run as
potentially multiple steps in a parent (parallelizable ) MpiAction or in other flexible
ways. This is intended to maximize reusability.
"""
def __init__(self, options, reactor):
self.options = options
self.r = reactor
self.dcType = directoryChangers.TemporaryDirectoryChanger
def run(self):
"""
Run the executer steps.
This should use the current state of the reactor as input,
perform some kind of calculation, and update the reactor
with the output.
"""
raise NotImplementedError()
class DefaultExecuter(Executer):
"""
An Executer that uses a common run sequence.
This sequence has been found to be relatively common in many
externally-executed physics codes. It is here for convenience
but is not required. The sequence look like:
* Choose modeling options (either from the global run settings input or dictated programmatically)
* Apply geometry transformations to the ARMI Reactor as needed
* Build run-specific working directory
* Write input file(s)
* Put specific input files and libs in run directory
* Run the analysis (external execution, or not)
* Process output while still in run directory
* Check error conditions
* Move desired output files back to main working directory
* Clean up run directory
* Un-apply geometry transformations as needed
* Update ARMI data model as desired
.. impl:: Default tool for executing external calculations.
:id: I_ARMI_EX1
:implements: R_ARMI_EX
Facilitates the execution of external calculations by accepting ``options`` (an
:py:class:`~armi.physics.executers.ExecutionOptions` object) and providing
methods that build run directories and execute a code based on the values in
``options``.
The :py:meth:`~armi.physics.executers.DefaultExecuter.run` method will first
resolve any derived options in the ``options`` object and check if the specified
``executablePath`` option is valid, raising an error if not. If it is,
preparation work for executing the code is performed, such as performing any geometry
transformations specified in subclasses or building the directories needed
to save input and output files. Once the temporary working directory is created,
the executer moves into it and runs the external code, applying any results
from the run as specified in subclasses.
Finally, any geometry perturbations that were performed are undone.
"""
def run(self):
"""
Run the executer steps.
.. warning::
If a calculation requires anything different from what this method does,
do not update this method with new complexity! Instead, simply make your own
run sequence and/or class. This pattern is useful only in that it is fairly simple.
By all means, do use ``DirectoryChanger`` and ``ExecuterOptions``
and other utilities.
"""
self.options.resolveDerivedOptions()
runLog.debug(self.options.describe())
if self.options.executablePath and not os.path.exists(self.options.executablePath):
raise IOError(f"Required executable `{self.options.executablePath}` not found for {self}")
self._performGeometryTransformations()
inputs, outputs = self._collectInputsAndOutputs()
state = f"c{self.r.p.cycle}n{self.r.p.timeNode}"
dirName = self.options.interfaceName or self.options.label
if self.options.savePhysicsFiles:
outputDir = os.path.join(pathTools.armiAbsPath(os.getcwd()), state, dirName)
else:
outputDir = pathTools.armiAbsPath(os.getcwd())
# must either write input to CWD for analysis and then copy to runDir
# or not list it in inputs (for optimization)
self.writeInput()
with self.dcType(
self.options.runDir,
filesToMove=inputs,
filesToRetrieve=outputs,
outputPath=outputDir,
) as dc:
self.options.workingDir = dc.initial
self._updateRunDir(dc.destination)
self._execute()
output = self._readOutput()
if self.options.applyResultsToReactor:
output.apply(self.r)
self._undoGeometryTransformations()
self._updateAdditionalParameters()
return output
def _updateRunDir(self, directory):
"""
If a ``TemporaryDirectoryChanger`` is used, the ``runDir`` needs to be updated.
If a ForcedCreationDirectoryChanger is used instead, nothing needs to be done.
Parameters
----------
directory : str
New path for runDir
"""
if self.dcType == directoryChangers.TemporaryDirectoryChanger:
self.options.runDir = directory
def _collectInputsAndOutputs(self):
"""
Get total lists of input and output files.
If self.options.copyOutput is false, don't copy the main `outputFile` back from
the working directory.
In some ARMI runs, the executer can be run hundreds or thousands of times and
generate many output files that aren't strictly necessary to keep around. One
can save space by choosing not to copy the outputs back in these special cases.
``extraOutputFiles`` are typically controlled by the subclass, so the copyOutput
option only affects the main ``outputFile``.
"""
inputs = [self.options.inputFile] if self.options.inputFile else []
inputs.extend(self.options.extraInputFiles)
if self.options.outputFile and self.options.copyOutput:
outputs = [self.options.outputFile]
else:
outputs = []
outputs.extend(self.options.extraOutputFiles)
return inputs, outputs
def _execute(self) -> bool:
runLog.extra(
f"Executing {self.options.executablePath}\n"
f"\tInput: {self.options.inputFile}\n"
f"\tOutput: {self.options.outputFile}\n"
f"\tWorking dir: {self.options.runDir}"
)
return True
def writeInput(self):
pass
def _readOutput(self):
raise NotImplementedError()
def _applyOutputToDataModel(self, output):
pass
def _performGeometryTransformations(self):
pass
def _undoGeometryTransformations(self):
pass
def _updateAdditionalParameters(self):
pass
================================================
FILE: armi/physics/fuelCycle/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The fuel cycle package analyzes the various elements of nuclear fuel cycles from mining to disposal.
Fuel cycle code can include things like:
* In- and ex-core fuel management
* Fuel chemistry
* Fuel processing
* Fuel fabrication
* Fuel mass flow scenarios
* And so on
There is one included fuel cycle plugin: The Fuel Handler.
The fuel handler plugin moves fuel around in a reactor.
"""
from armi import interfaces, operators, plugins
from armi.operators import RunTypes
from armi.physics.fuelCycle import fuelHandlers, settings
ORDER = interfaces.STACK_ORDER.FUEL_MANAGEMENT
class FuelHandlerPlugin(plugins.ArmiPlugin):
"""The built-in ARMI fuel management plugin."""
@staticmethod
@plugins.HOOKIMPL
def exposeInterfaces(cs):
"""
Implementation of the exposeInterfaces plugin hookspec.
Notes
-----
The interface may import user input modules to customize the actual
fuel management.
"""
from armi.physics.neutronics.settings import CONF_NEUTRONICS_KERNEL
fuelHandlerNeedsToBeActive = (
cs[settings.CONF_FUEL_HANDLER_NAME]
or cs[settings.CONF_SHUFFLE_SEQUENCE_FILE]
or (cs["eqDirect"] and cs["runType"].lower() == RunTypes.STANDARD.lower())
)
if not fuelHandlerNeedsToBeActive or "MCNP" in cs[CONF_NEUTRONICS_KERNEL]:
return []
else:
enabled = cs["runType"] != operators.RunTypes.SNAPSHOTS
return [interfaces.InterfaceInfo(ORDER, fuelHandlers.FuelHandlerInterface, {"enabled": enabled})]
@staticmethod
@plugins.HOOKIMPL
def defineSettings():
"""Define settings for the plugin."""
return settings.getFuelCycleSettings()
@staticmethod
@plugins.HOOKIMPL
def defineSettingsValidators(inspector):
"""Implementation of settings inspections for fuel cycle settings."""
return settings.getFuelCycleSettingValidators(inspector)
================================================
FILE: armi/physics/fuelCycle/assemblyRotationAlgorithms.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Algorithms used to rotate hex assemblies in a reactor core.
Notes
-----
These algorithms are defined in assemblyRotationAlgorithms.py, but they are used in:
``FuelHandler.outage()``.
.. warning:: Nothing should go in this file, but rotation algorithms.
"""
import math
from collections import defaultdict
from armi import runLog
from armi.physics.fuelCycle.hexAssemblyFuelMgmtUtils import (
getOptimalAssemblyOrientation,
)
from armi.physics.fuelCycle.settings import CONF_ASSEM_ROTATION_STATIONARY
from armi.physics.fuelCycle.utils import (
assemblyHasFuelPinBurnup,
assemblyHasFuelPinPowers,
)
from armi.reactor.assemblies import Assembly
def _rotationNumberToRadians(rot: int) -> float:
"""Convert a rotation number to radians, assuming a HexAssembly."""
return rot * math.pi / 3
def buReducingAssemblyRotation(fh):
"""
Rotates all detail assemblies to put the highest bu pin in the lowest power orientation.
Parameters
----------
fh : FuelHandler object
A fully initialized FuelHandler object.
See Also
--------
simpleAssemblyRotation : an alternative rotation algorithm
"""
runLog.info("Algorithmically rotating assemblies to minimize burnup")
# Store how we should rotate each assembly but don't perform the rotation just yet
# Consider assembly A is shuffled to a new location and rotated.
# Now, assembly B is shuffled to where assembly A used to be. We need to consider the
# power profile of A prior to it's rotation to understand the power profile B may see.
rotations: dict[int, list[Assembly]] = defaultdict(list)
for aPrev in fh.moved:
# If the assembly was out of the core, it will not have pin powers.
# No rotation information to be gained.
if aPrev.lastLocationLabel in Assembly.NOT_IN_CORE:
continue
aNow = fh.r.core.getAssemblyWithStringLocation(aPrev.lastLocationLabel)
# An assembly in the SFP could have burnup but if it's coming from the load
# queue it's totally fresh. Skip a check over all pins in the model
if aNow.lastLocationLabel == Assembly.LOAD_QUEUE:
continue
# no point in rotation if there's no pin detail
if assemblyHasFuelPinPowers(aPrev) and assemblyHasFuelPinBurnup(aNow):
rot = getOptimalAssemblyOrientation(aNow, aPrev)
rotations[rot].append(aNow)
if fh.cs[CONF_ASSEM_ROTATION_STATIONARY]:
for a in filter(
lambda asm: asm not in fh.moved and assemblyHasFuelPinPowers(asm) and assemblyHasFuelPinBurnup(asm),
fh.r.core,
):
rot = getOptimalAssemblyOrientation(a, a)
rotations[rot].append(a)
nRotations = 0
for rot, assems in filter(lambda item: item[0], rotations.items()):
# Radians used for the actual rotation. But a neater degrees print out is nice for logs
radians = _rotationNumberToRadians(rot)
degrees = round(math.degrees(radians), 3)
for a in assems:
runLog.important(f"Rotating assembly {a} {degrees} CCW.")
a.rotate(radians)
nRotations += 1
runLog.info(f"Rotated {nRotations} assemblies.")
def simpleAssemblyRotation(fh):
"""
Rotate all pin-detail assemblies that were just shuffled by 60 degrees.
Parameters
----------
fh : FuelHandler object
A fully initialized FuelHandler object.
Notes
-----
Also, optionally rotate stationary (non-shuffled) assemblies if the setting is set.
Obviously, only pin-detail assemblies can be rotated, because homogenized assemblies are isotropic.
Examples
--------
>>> simpleAssemblyRotation(fh)
See Also
--------
FuelHandler.outage : calls this method based on a user setting
"""
runLog.info("Rotating assemblies by 60 degrees")
numRotated = 0
hist = fh.o.getInterface("history")
rot = math.radians(60)
for a in hist.getDetailAssemblies():
if a in fh.moved or fh.cs[CONF_ASSEM_ROTATION_STATIONARY]:
a.rotate(rot)
numRotated += 1
ring, pos = a.spatialLocator.getRingPos()
runLog.extra("Rotating Assembly ({0},{1}) to Orientation {2}".format(ring, pos, 1))
runLog.extra("Rotated {0} assemblies".format(numRotated))
================================================
FILE: armi/physics/fuelCycle/fuelHandlerFactory.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""factory for the FuelHandler."""
import importlib
from pathlib import Path
from armi.physics.fuelCycle import fuelHandlers
from armi.physics.fuelCycle.settings import CONF_FUEL_HANDLER_NAME, CONF_SHUFFLE_LOGIC
from armi.utils import directoryChangers, pathTools
def fuelHandlerFactory(operator):
"""
Return an instantiated FuelHandler object based on user settings.
The FuelHandler is expected to be a short-lived object that only lives for
the cycle upon which it acts. At the next cycle, this factory will be
called again to instantiate a new FuelHandler.
"""
cs = operator.cs
fuelHandlerClassName = cs[CONF_FUEL_HANDLER_NAME]
fuelHandlerModulePath = cs[CONF_SHUFFLE_LOGIC]
if not fuelHandlerClassName:
# give the default FuelHandler. This does not have an implemented outage, but
# still offers moving capabilities. Useful when you just need to make explicit
# moves but do not have a fully-defined fuel management input.
return fuelHandlers.FuelHandler(operator)
# User did request a custom fuel handler. We must go find and import it
# from the input directory.
with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False):
try:
modulePath = Path(fuelHandlerModulePath)
if modulePath.exists() and modulePath.suffix == ".py":
module = pathTools.importCustomPyModule(modulePath)
else:
module = importlib.import_module(fuelHandlerModulePath)
if not hasattr(module, fuelHandlerClassName):
raise KeyError(
"The requested fuel handler object {0} is not "
"found in the fuel management input file {1} from CWD {2}. "
"Check input"
"".format(fuelHandlerClassName, fuelHandlerModulePath, cs.inputDirectory)
)
# instantiate the custom object
fuelHandlerCls = getattr(module, fuelHandlerClassName)
fuelHandler = fuelHandlerCls(operator)
# also get getFactorList function from module level if it's there.
# This is a legacy input option, getFactorList should now generally
# be an method of the FuelHandler object
if hasattr(module, "getFactorList"):
# staticmethod binds the provided getFactorList function to the
# fuelHandler object without passing the implicit self argument.
# The __get__ pulls the actual function out from the descriptor.
fuelHandler.getFactorList = staticmethod(module.getFactorList).__get__(fuelHandlerCls)
except (IOError, ImportError):
raise ValueError(
"Either the file specified in the `shuffleLogic` setting ({}) or the "
"fuel handler class name specified in the `fuelHandlerName` setting ({}) "
"cannot be found. CWD is: {}. Update input.".format(
fuelHandlerModulePath, fuelHandlerClassName, cs.inputDirectory
)
)
return fuelHandler
================================================
FILE: armi/physics/fuelCycle/fuelHandlerInterface.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A place for the FuelHandler's Interface."""
from armi import interfaces, runLog
from armi.physics.fuelCycle import fuelHandlerFactory, fuelHandlers
from armi.physics.fuelCycle.settings import (
CONF_PLOT_SHUFFLE_ARROWS,
CONF_RUN_LATTICE_BEFORE_SHUFFLING,
CONF_SHUFFLE_LOGIC,
CONF_SHUFFLE_SEQUENCE_FILE,
)
from armi.utils import plotting
class FuelHandlerInterface(interfaces.Interface):
"""
Moves and/or processes fuel in a Standard Operator.
Fuel management traditionally runs at the beginning of a cycle, before
power or temperatures have been updated. This allows pre-run fuel management
steps for highly customized fuel loadings. In typical runs, no fuel management
occurs at the beginning of the first cycle and the as-input state is left as is.
.. impl:: ARMI provides a shuffle logic interface.
:id: I_ARMI_SHUFFLE
:implements: R_ARMI_SHUFFLE
This interface allows for a user to define custom shuffle logic that
modifies to the core model. Being based on the :py:class:`~armi.interfaces.Interface`
class, it has direct access to the current core model.
User logic is able to be executed from within the
:py:meth:`~armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel` method,
which will use the :py:meth:`~armi.physics.fuelCycle.fuelHandlerFactory.fuelHandlerFactory`
to search for a Python file or importable module specified by the case setting ``shuffleLogic``.
If it exists, the fuel handler with name specified by the user via the ``fuelHandlerName``
case setting will be imported, and any actions in its ``outage`` method
will be executed at the :py:meth:`~armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.interactBOC`
hook.
If no class with the name specified by the ``fuelHandlerName`` setting is found
in the module or file specified by ``shuffleLogic``, an error is returned.
See the user manual for how the custom shuffle logic module or file should be constructed.
"""
name = "fuelHandler"
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
# assembly name key, (x, y) values. used for making shuffle arrows.
self.oldLocations = {}
# need order due to nature of moves but with fast membership tests
self.moved = []
self.cycle = 0
@staticmethod
def specifyInputs(cs):
files = {
cs.getSetting(settingName): [
cs[settingName],
]
for settingName in [CONF_SHUFFLE_LOGIC, "explicitRepeatShuffles", CONF_SHUFFLE_SEQUENCE_FILE]
if cs[settingName]
}
return files
def interactBOC(self, cycle=None):
"""
Move and/or process fuel.
Also, if requested, first have the lattice physics system update XS.
"""
# if lattice physics is requested, compute it here instead of after fuel management.
# This enables XS to exist for branch searching, etc.
mc2 = self.o.getInterface(purpose="latticePhysics")
xsgm = self.o.getInterface("xsGroups")
if mc2 and self.cs[CONF_RUN_LATTICE_BEFORE_SHUFFLING]:
runLog.extra(
f'Running {mc2} lattice physics before fuel management due to the "{CONF_RUN_LATTICE_BEFORE_SHUFFLING}"'
" setting being activated."
)
xsgm.interactBOC(cycle=cycle)
mc2.interactBOC(cycle=cycle)
if self.enabled() and (
self.cs["loadStyle"] != "fromDB" or self.cs["startNode"] == 0 or (self.cs["startCycle"] != cycle)
):
# in restart cases, only do this if restarting at BOC to avoid duplicating shuffles
# the logic to accomplish this is a bit long because we don't pass the
# timeNode into interactBOC hooks. Otherwise it would be much easier
# to determine when to call this or not
self.manageFuel(cycle)
def interactEOC(self, cycle=None):
if self.r.excore.get("sfp") is not None:
runLog.extra(f"There are {len(self.r.excore['sfp'])} assemblies in the Spent Fuel Pool")
def interactEOL(self):
"""Make reports at EOL."""
self.makeShuffleReport()
def manageFuel(self, cycle):
"""Perform the fuel management for this cycle."""
fh = fuelHandlerFactory.fuelHandlerFactory(self.o)
fh.prepCore()
fh.prepShuffleMap()
# take note of where each assembly is located before the outage
# for mapping after the outage
self.r.core.locateAllAssemblies()
shuffleFactors, _ = fh.getFactorList(cycle)
fh.outage(shuffleFactors) # move the assemblies around
if self.cs[CONF_PLOT_SHUFFLE_ARROWS]:
arrows = fh.makeShuffleArrows()
plotting.plotFaceMap(
self.r.core,
"percentBu",
labelFmt=None,
fName="{}.shuffles_{}.png".format(self.cs.caseTitle, self.r.p.cycle),
shuffleArrows=arrows,
)
def makeShuffleReport(self):
"""
Create a data file listing all the shuffles that occurred in a case.
This can be used to export shuffling to an external code or to
perform explicit repeat shuffling in a restart.
It creates a ``*SHUFFLES.txt`` file based on the Reactor.moves structure
See Also
--------
readMoves : reads this file and parses it.
"""
fname = self.cs.caseTitle + "-SHUFFLES.txt"
out = open(fname, "w")
for cycle in range(self.cs["nCycles"]):
# do cycle+1 because cycle 0 at t=0 isn't usually interesting
# remember, we put cycle 0 in so we could do BOL branch searches.
# This also syncs cycles up with external physics kernel cycles.
out.write("Before cycle {0}:\n".format(cycle))
movesThisCycle = self.r.core.moves.get(cycle)
if movesThisCycle is not None:
for move in movesThisCycle:
enrichLine = " ".join(["{0:.8f}".format(enrich) for enrich in move.enrichList])
if move.fromLoc in fuelHandlers.FuelHandler.DISCHARGE_LOCS:
# this is a re-entering assembly. Give extra info so repeat shuffles can handle it
out.write(
"{0} moved to {1} with assembly type {2} ringPosCycle={4} with enrich list: {3}\n".format(
move.fromLoc,
move.toLoc,
move.assemType,
enrichLine,
move.ringPosCycle,
)
)
else:
# skip extra info. regular expression in readMoves will handle it just fine.
out.write(
"{0} moved to {1} with assembly type {2} with enrich list: {3}\n".format(
move.fromLoc, move.toLoc, move.assemType, enrichLine
)
)
out.write("\n")
out.close()
def workerOperate(self, cmd):
"""Delegate mpi command to the fuel handler object."""
fh = fuelHandlerFactory.fuelHandlerFactory(self.o)
return fh.workerOperate(cmd)
================================================
FILE: armi/physics/fuelCycle/fuelHandlers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module handles fuel management operations such as shuffling, rotation, and
fuel processing (in fluid systems).
The :py:class:`FuelHandlerInterface` instantiates a ``FuelHandler``, which is typically a user-defined
subclass the :py:class:`FuelHandler` object in custom shuffle-logic input files.
Users point to the code modules with their custom fuel handlers using the
``shuffleLogic`` and ``fuelHandlerName`` settings, as described in :ref:`fuel-management-input`.
These subclasses override ``chooseSwaps`` that determine
the particular shuffling of a case.
This module also handles repeat shuffles when doing a restart.
"""
# ruff: noqa: F401
import inspect
import math
import os
import re
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import numpy as np
from ruamel.yaml import YAML
from ruamel.yaml.constructor import DuplicateKeyError
from armi import runLog
from armi.physics.fuelCycle import assemblyRotationAlgorithms as rotAlgos
from armi.physics.fuelCycle.fuelHandlerFactory import fuelHandlerFactory
from armi.physics.fuelCycle.fuelHandlerInterface import FuelHandlerInterface
from armi.physics.fuelCycle.settings import (
CONF_ASSEMBLY_ROTATION_ALG,
CONF_SHUFFLE_SEQUENCE_FILE,
)
from armi.reactor import grids
from armi.reactor.flags import Flags
from armi.reactor.parameters import ParamLocation
from armi.utils.customExceptions import InputError
@dataclass(eq=True)
class AssemblyMove:
"""Description of an individual shuffle move.
Parameters
----------
fromLoc : str
Original location label.
toLoc : str
Destination location label.
enrichList : list[float]
Axial U235 weight percent enrichment values for each block.
assemType : str, optional
Type of assembly that is moving.
ringPosCycle : list[int], optional
List of ints of length 3. For assembly retrieval from SFP.
(ring, position, cycle) specifies the desired assembly resided at
(ring, position) during specified cycle number.
rotation : float, optional
Degrees of manual rotation to apply after shuffling.
"""
fromLoc: str
toLoc: str
enrichList: List[float] = field(default_factory=list)
assemType: Optional[str] = None
ringPosCycle: Optional[list[int]] = None
rotation: Optional[float] = None
def __post_init__(self):
"""Perform some data checks."""
errorMsg = (
"invalid (ring, position, cycle) specified for assembly retrieval from SFP\n"
f"expected: list of ints, len=3\nreceived: {self.ringPosCycle}"
)
if self.ringPosCycle is not None:
if not isinstance(self.ringPosCycle, list):
raise TypeError(errorMsg)
if len(self.ringPosCycle) != 3:
raise ValueError(errorMsg)
for val in self.ringPosCycle:
if not isinstance(val, int):
raise TypeError(errorMsg)
@dataclass
class ProcessMoveListResult:
"""Container for the results of :meth:`FuelHandler.processMoveList`."""
loadChains: List[List[str]]
loopChains: List[List[str]]
enriches: List[List[float]]
loadChargeTypes: List[Optional[str]]
ringPosCycles: List[Optional[list[int]]]
dischargeDests: List[str]
rotations: List[Tuple[str, float]]
alreadyDone: List[str]
class FuelHandler:
"""
A fuel handling machine can move fuel around the core and reactor.
It makes decisions on how to shuffle fuel based on user specifications.
It provides some supervisory data tracking, such as having the ability
to print out information about all moves that happened in a cycle (without
the user needing to explicitly track this information).
To use this, simply create an input Python file and point to it by path
with the ``fuelHandler`` setting. In that file, subclass this object.
"""
DISCHARGE_LOCS = frozenset({"SFP", "Delete"})
"""Special strings to indicate an assembly is no longer in the core."""
def __init__(self, operator):
# we need access to the operator to find the core, get settings, grab other interfaces, etc.
self.o = operator
self.moved = []
self.pendingRotations = []
@property
def cycle(self):
"""
Link to the current cycle number.
Notes
-----
This retains backwards compatibility with previous fuel handler inputs.
"""
return self.o.r.p.cycle
@property
def cs(self):
"""Link to the Case Settings object."""
return self.o.cs
@property
def r(self):
"""Link to the Reactor object."""
return self.o.r
def outage(self, factor=1.0):
"""
Simulates a reactor reload outage. Moves and tracks fuel.
This sets the moveList structure.
"""
if self.moved:
raise ValueError("Cannot perform two outages with same FuelHandler instance.")
# determine if a repeat shuffle is occurring or a new shuffle pattern
if self.cs[CONF_SHUFFLE_SEQUENCE_FILE]:
if not os.path.exists(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]):
raise FileNotFoundError(
"Requested shuffle sequence file {0} does not exist. Cannot perform shuffling. ".format(
self.cs[CONF_SHUFFLE_SEQUENCE_FILE]
)
)
runLog.important("Applying shuffle sequence from {}".format(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]))
# location hist params updated within performShuffle
self.performShuffle(self.cs[CONF_SHUFFLE_SEQUENCE_FILE], yaml=True)
elif self.cs["explicitRepeatShuffles"]:
# repeated shuffle
if not os.path.exists(self.cs["explicitRepeatShuffles"]):
raise RuntimeError(
"Requested repeat shuffle file {0} does not exist. Cannot perform shuffling. ".format(
self.cs["explicitRepeatShuffles"]
)
)
runLog.important("Repeating a shuffling pattern from {}".format(self.cs["explicitRepeatShuffles"]))
# location hist params updated within performShuffle
self.performShuffle(self.cs["explicitRepeatShuffles"])
else:
# Normal shuffle from user-provided shuffle logic input
self.chooseSwaps(factor)
self.updateAllLocationHistParams(self.cycle)
# do rotations if pin-level details are available (requires fluxRecon plugin)
if self.cs["fluxRecon"] and self.cs[CONF_ASSEMBLY_ROTATION_ALG]:
# Rotate assemblies ONLY IF at least some assemblies have pin detail
# The user can choose the algorithm method name directly in the settings
if hasattr(rotAlgos, self.cs[CONF_ASSEMBLY_ROTATION_ALG]):
rotationMethod = getattr(rotAlgos, self.cs[CONF_ASSEMBLY_ROTATION_ALG])
rotationMethod(self)
else:
raise RuntimeError(
"FuelHandler {0} does not have a rotation algorithm called {1}.\nChange your {2} setting".format(
rotAlgos,
self.cs[CONF_ASSEMBLY_ROTATION_ALG],
CONF_ASSEMBLY_ROTATION_ALG,
)
)
for loc, deg in self.pendingRotations:
assem = self.r.core.getAssemblyWithStringLocation(loc)
if assem is None:
runLog.warning(f"No assembly found at {loc} for manual rotation")
continue
runLog.important(f"Rotating assembly {assem} in {loc} by {deg} degrees CCW from shuffle file")
assem.rotate(math.radians(deg))
self.pendingRotations = []
# inform the reactor of how many moves occurred so it can put the number in the database.
if self.moved:
numMoved = len(self.moved) * self.r.core.powerMultiplier
# tell the reactor which assemblies moved where
# also tell enrichments of each block in case there's some autoboosting going on.
# This is also essential for repeating shuffles in later restart runs.
for a in self.moved:
try:
ringPosCycle = None
# grab first (ring, pos) at cycle info which can be used to identify this assembly if it goes to SFP
if a.p.ringPosHist:
for cycleNum, rp in enumerate(a.p.ringPosHist):
if isinstance(rp, tuple) and rp[0] not in a.NOT_IN_CORE:
ringPosCycle = [int(rp[0]), int(rp[1]), cycleNum]
break
else:
ringPosCycle = None
self.r.core.setMoveList(
self.cycle,
a.lastLocationLabel,
a.getLocation(),
[b.getUraniumMassEnrich() for b in a],
a.getType(),
ringPosCycle,
)
except:
runLog.important("A fuel management error has occurred. ")
runLog.important("Trying operation on assembly {}".format(a))
runLog.important("The moved list is {}".format(self.moved))
raise
else:
numMoved = 0
self.o.r.core.p.numMoves = numMoved
self.o.r.core.setBlockMassParams()
runLog.important("Fuel handler performed {0} assembly shuffles.".format(numMoved))
# now wipe out the self.moved version so it doesn't transmit the assemblies during distributeState
moved = self.moved[:]
self.moved = []
return moved
def _preconditionLocationHistParam(self, a, cycle):
"""
Trim assembly location history param to be consistent with the specified
cycle or the reactor cycle parameter in preparation for the current ring and
position to be added.
list index corresponds to the cycle number n, which will be appended after the
parameter is preconditioned to length n (max index is n-1)
e.g. i=0 is the initial position, i=1 is the position at BOC1, etc.
Parameters
----------
a : armi.reactor.assembly.Assembly
cycle : int
cycle number at BOC to update assembly location history
"""
# Param length is shorter than expected (data from previous cycles is missing or shuffling was not performed
# on a previous cycle)
if len(a.p.ringPosHist) < cycle:
a.p.ringPosHist += [(a.NOT_CREATED_YET, a.NOT_CREATED_YET)] * (cycle - len(a.p.ringPosHist))
# Param length is longer than expected. perhaps a restart analysis of some sort. trim trailing data
if len(a.p.ringPosHist) > cycle:
a.p.ringPosHist = a.p.ringPosHist[:cycle]
return a
def _updateAssemLocationHistParam(self, a, cycle):
"""
Update assembly location history parameter with current assembly location for
specified cycle number.
Index of a.p.ringPosHist corresponds to the cycle number BOC assembly location
e.g. i=0 is the initial position, i=1 is the position at BOC1, etc.
"""
a = self._preconditionLocationHistParam(a, cycle)
# assem param should now be the correct len. append data at correct index.
if a.getLocation() in a.NOT_IN_CORE:
a.p.ringPosHist.append((a.getLocation(), a.getLocation()))
else:
ring, pos, _ = grids.locatorLabelToIndices(a.getLocation())
a.p.ringPosHist.append((ring, pos))
def updateAllLocationHistParams(self, cycle):
"""
Update location history param for all assemblies with current assembly locations
for specified cycle number
Index of a.p.ringPosHist corresponds to the cycle number BOC assembly location
e.g. i=0 is the initial position, i=1 is the position at BOC1, etc.
"""
for a in self.r.core:
self._updateAssemLocationHistParam(a, cycle)
for a in list(self.r.excore["sfp"]):
self._updateAssemLocationHistParam(a, cycle)
def chooseSwaps(self, shuffleFactors=None):
"""Moves the fuel around or otherwise processes it between cycles."""
raise NotImplementedError
@staticmethod
def getFactorList(cycle, cs=None, fallBack=False):
"""
Return factors between 0 and 1 that control fuel management.
This is the default shuffle control function. Usually you would override this
with your own in a custom shuffleLogic.py file. For more details about how this
works, refer to :ref:`fuel-management-input`.
This will get bound to the default FuelHandler as a static method below. This is
done to allow a user to mix and match FuelHandler class implementations and
getFactorList implementations at run time.
Notes
-----
Ultimately, this approach will likely get replaced using the plugin framework, but
we aren't there yet.
"""
# prefer to keep these 0 through 1 since this is what the branch search can do.
defaultFactorList = {"eqShuffles": 1}
factorSearchFlags = []
return defaultFactorList, factorSearchFlags
def prepCore(self):
"""Aux function to run before XS generation (do moderation, etc)."""
pass
@staticmethod
def _compareAssem(candidate, current):
"""Check whether the candidate assembly should replace the current ideal assembly.
Given a candidate tuple (diff1, a1) and current tuple (diff2, a2), decide whether the
candidate is better than the current ideal. This first compares the diff1 and diff2 values.
If diff1 is sufficiently less than diff2, a1 wins, returning True. Otherwise, False. If
diff1 and diff2 are sufficiently close, the assembly with the lesser assemNum wins. This
should result in a more stable comparison than on floating-point comparisons alone.
"""
if np.isclose(candidate[0], current[0], rtol=1e-8, atol=1e-8):
return candidate[1].p.assemNum < current[1].p.assemNum
else:
return candidate[0] < current[0]
@staticmethod
def _getParamMax(a, paramName, blockLevelMax=True):
"""Get assembly/block-level maximum parameter value in assembly."""
multiplier = a.getSymmetryFactor()
if multiplier != 1:
# handle special case: volume-integrated parameters where symmetry factor is not 1
if blockLevelMax:
paramCollection = a[0].p
else:
paramCollection = a.p
isVolumeIntegrated = paramCollection.paramDefs[paramName].location == ParamLocation.VOLUME_INTEGRATED
multiplier = a.getSymmetryFactor() if isVolumeIntegrated else 1.0
if blockLevelMax:
return a.getChildParamValues(paramName).max() * multiplier
else:
return a.p[paramName] * multiplier
def findAssembly(
self,
targetRing=None,
width=(0, 0),
param=None,
compareTo=None,
forceSide=None,
exclusions=None,
typeSpec=None,
mandatoryLocations=None,
zoneList=None,
excludedLocations=None,
minParam=None,
minVal=None,
maxParam=None,
maxVal=None,
findMany=False,
coords=None,
exactType=False,
acceptFirstCandidateRing=False,
blockLevelMax=False,
findFromSfp=False,
maxNumAssems=None,
circularRingFlag=False,
):
r"""
Search reactor for assemblies with various criterion. Primarily for shuffling.
Parameters
----------
targetRing : int, optional
The ring in which to search
width : tuple of integers
A (size, side) tuple where size is the number of rings on either side to also check.
side=1: only look in higher, -1: only look lower, 0: both sides
param : string, optional
A block (if blockLevelMax) or assem level param name such as 'power' or 'percentBu'
(requires compareTo).
compareTo : float or Assembly instance
an assembly to be compared to. Alternatively, a floating point number to compare to.
Even more alternatively, an (assembly,mult) or (float,mult) tuple where mult is a
multiplier. For example, if you wanted an assembly that had a bu close to half of
assembly bob, you'd give param='percentBu', compareTo=(bob,0.5) If you want one with a
bu close to 0.3, you'd do param='percentBu',compareTo=0.3. Yes, if you give a (float,
multiplier) tuple the code will still work as expected.
forceSide : bool, optional
requires the found assembly to have either 1: higher, -1: lower, None: any param than
compareTo
exclusions : list, optional
List of assemblies that will be excluded from the search
minParam : float or list, optional
a parameter to compare to minVal for setting lower bounds. If list, must correspond to
parameters in minVal in order.
maxParam : float or list, optional
a parameter to compare to maxVal for setting upper bounds of acceptable assemblies.
If list, must correspond to parameters in maxVal in order.
minVal : float or list, optional
a value or a (parameter, multiplier) tuple for setting lower bounds
For instance, if minParam='timeToLimit' and minVal=10, only assemblies with timeToLimit
higher than 10 will be returned. (Of course, there is also maxParam and maxVal)
maxVal : float or list, optional
a value or a (parameter, multiplier) tuple for setting upper bounds
mandatoryLocations : list, optional
A list of string-representations of locations in the core for limiting the search to
several places. Any locations also included in `excludedLocations` will be excluded.
excludedLocations : list, optional
a list of string-representations of locations in the core that will be excluded from
the search
zoneList : list, optional
name of a zone defined in settings.py that will be picked from. Under development
findMany : bool, optional
If True, will return a list of assembies that match. Don't give a param.
typeSpec : Flags or list of Flags, optional
only assemblies with this type list will be returned. If none, only fuel will be found.
coords : tuple, optional
x,y tuple in cm. the fuel handler will try to find an assembly with a center closest to
that point
exactType : bool, optional
require type to be exactly equal to what's in the type list. So
Flags.IGNITER | Flags.FUEL is not Flags.INNER | Flags.IGNITER | Flags.FUEL
acceptFirstCandidateRing : bool, optional
takes the first assembly found in the earliest ring (without searching all rings for a
maxBu, for example) So if the candidate rings are 1-10 and we're looking for igniter
fuel with a maxBurnup, we don't get the max burnup in all those rings, but rather the
igniter with the max burnup in the ring closest to 1. If there are no igniters until
ring 4, you will get an igniter in ring 4.
blockLevelMax : bool, optional
If true, the param to search for will be built as the maximum block-level param of this
name instead of the assembly param. This avoids the need to assign assembly level params
sometimes.
default: false.
findFromSfp : bool, optional
If true, will look in the spent-fuel pool instead of in the core.
maxNumAssems : int, optional
The maximum number of assemblies to return. Only relevant if findMany==True
circularRingFlag : bool, optional
Toggle using rings that are based on distance from the center of the reactor
Notes
-----
The call signature on this method may have gotten slightly out of hand as valuable
capabilities were added in fuel management studies. For additional expansion, it may be
worth reconsidering the design of these query operations.
Returns
-------
Assembly instance or assemList of assembly instances that match criteria, or None if none
match
Examples
--------
This returns the feed fuel assembly in ring 4 that has a burnup closest to 100%
(the highest burnup assembly)::
feed = self.findAssembly(
targetRing=4, width=(0, 0), param="maxPercentBu", compareTo=100, typeSpec=Flags.FEED | Flags.FUEL
)
"""
# list for storing multiple results if findMany is true.
assemList = []
# process input arguments
if targetRing is None:
# look through the full core
targetRing = 0
width = (100, 0)
if exclusions is None:
exclusions = []
if isinstance(minVal, list):
# list given with multiple mins
minVals = minVal
minParams = minParam
else:
minVals = [minVal]
minParams = [minParam]
if isinstance(maxVal, list):
maxVals = maxVal
maxParams = maxParam
else:
# just one given. put it in a list so the below machinery can handle it.
maxVals = [maxVal]
maxParams = [maxParam]
if typeSpec is None:
# restrict motions to fuel only
# not really necessary. take this default out if you want to move control rods, etc.
typeSpec = Flags.FUEL
minDiff = (1e60, None)
# compareTo can either be a tuple, a value, or an assembly
# if it's a tuple, it can either be an int/float and a multiplier, or an assembly and a multiplier
# if it's not a tuple, the multiplier will be assumed to be 1.0
mult = 1.0 # if no mult brought in, just assume 1.0
if isinstance(compareTo, tuple):
# tuple (assem or int/float, multiplier) brought in.
# separate it
compareTo, mult = compareTo
if isinstance(compareTo, (float, int)):
# floating point or int.
compVal = compareTo * mult
elif param:
# assume compareTo is an assembly
compVal = FuelHandler._getParamMax(compareTo, param, blockLevelMax) * mult
if coords:
# find the assembly closest to xt,yt if coords are given without considering params.
aTarg = None
minD = 1e10
xt, yt = coords # assume (x,y) tuple
for a in self.r.core:
x, y, _ = a.spatialLocator.getLocalCoordinates()
d = (y - yt) ** 2 + (x - xt) ** 2
if d < minD:
minD = d
aTarg = a
return aTarg
if findFromSfp:
# hack to enable SFP searching.
candidateRings = ["SFP"]
else:
# set up candidateRings based on targetRing and width. The target rings comes first b/c it is preferred.
candidateRings = [targetRing]
if width[1] <= 0:
# 0 or -1 implies that the inner rings can be added.
for inner in range(width[0]):
candidateRings.append(targetRing - inner - 1) # +1 to get 1,2,3 instead of 0,1,2
if width[1] >= 0:
# if 1, add in the outer rings
for outer in range(width[0]):
candidateRings.append(targetRing + outer + 1)
# get lists of assemblies in each candidate ring. Do it in this order in case we prefer ones in the first.
# scan through all assemblies and find the one (or more) that best fits the criteria
for ringI, assemsInRings in enumerate(
self._getAssembliesInRings(candidateRings, typeSpec, exactType, exclusions, circularRingFlag)
):
for a in assemsInRings:
innocent = True
# Check that this assembly's minParam is > the minimum for each minParam
for minIndex, minVal in enumerate(minVals):
minParam = minParams[minIndex]
if minParam:
# a minimum was specified. Check to see if we're ok
if isinstance(minVal, tuple):
# tuple turned in. it's a multiplier and a param
realMinVal = FuelHandler._getParamMax(a, minVal[0], blockLevelMax) * minVal[1]
else:
realMinVal = minVal
if FuelHandler._getParamMax(a, minParam, blockLevelMax) < realMinVal:
# this assembly does not meet the minVal specifications. Skip it.
innocent = False
break # for speed (not a big deal here)
if not innocent:
continue
# Check upper bounds, to make sure this assembly doesn't have maxParams>maxVals
for maxIndex, maxVal in enumerate(maxVals):
maxParam = maxParams[maxIndex]
if maxParam:
if isinstance(maxVal, tuple):
# tuple turned in. it's a multiplier and a param
realMaxVal = FuelHandler._getParamMax(a, maxVal[0], blockLevelMax) * maxVal[1]
else:
realMaxVal = maxVal
if FuelHandler._getParamMax(a, maxParam, blockLevelMax) > realMaxVal:
# this assembly has a maxParam that's higher than maxVal and therefore
# doesn't qualify. skip it.
innocent = False
break
if not innocent:
continue
# Check to see if this assembly is in the list of candidate locations. if not, skip it.
if mandatoryLocations:
if a.getLocation() not in mandatoryLocations:
continue
if excludedLocations:
if a.getLocation() in excludedLocations:
# this assembly is in the excluded location list. skip it.
continue
# only process of the Assembly is in a Zone
if not self.isAssemblyInAZone(zoneList, a):
continue
# Now find the assembly with the param closest to the target val.
if param:
diff = abs(FuelHandler._getParamMax(a, param, blockLevelMax) - compVal)
if (
forceSide == 1
and FuelHandler._getParamMax(a, param, blockLevelMax) > compVal
and FuelHandler._compareAssem((diff, a), minDiff)
):
# forceSide=1, so that means look in rings further out
minDiff = (diff, a)
elif (
forceSide == -1
and FuelHandler._getParamMax(a, param, blockLevelMax) < compVal
and FuelHandler._compareAssem((diff, a), minDiff)
):
# forceSide=-1, so that means look in rings closer in from the targetRing
minDiff = (diff, a)
elif FuelHandler._compareAssem((diff, a), minDiff):
# no preference of which side, just take the one with the closest param.
minDiff = (diff, a)
else:
# no param specified. Just return one closest to the target ring
diff = None
if a.spatialLocator.getRingPos()[0] == targetRing:
# short circuit the search
if findMany:
assemList.append((diff, a))
continue
else:
return a
elif abs(a.spatialLocator.getRingPos()[0] - targetRing) < minDiff[0]:
minDiff = (
abs(a.spatialLocator.getRingPos()[0] - targetRing),
a,
)
if findMany:
# returning many assemblies. If there's a param, we'd like it to be honored by
# ordering this list from smallest diff to largest diff.
assemList.append((diff, a))
if ringI == 0 and acceptFirstCandidateRing and minDiff[1]:
# an acceptable assembly was found in the targetRing (ringI==0)
# and the user requested this to be returned. Therefore, return it without
# scanning through the additional rings.
return minDiff[1]
if not minDiff[1]:
# can't find assembly in targetRing with close param to compareTo
pass
if findMany:
assemList.sort() # prefer items that have params that are the closest to the value.
# extract the assemblies.
assemsInRings = [a for diff, a in assemList]
if maxNumAssems:
return assemsInRings[:maxNumAssems]
else:
return assemsInRings
else:
return minDiff[1]
@staticmethod
def isAssemblyInAZone(zoneList, a):
"""Does the given assembly in one of these zones."""
if zoneList:
# ruff: noqa: SIM110
for zone in zoneList:
if a.getLocation() in zone:
# Success!
return True
return False
else:
# A little counter-intuitively, if there are no zones, we return True.
return True
def _getAssembliesInRings(
self,
ringList,
typeSpec=Flags.FUEL,
exactType=False,
exclusions=None,
circularRingFlag=False,
):
"""
Find assemblies in particular rings.
Parameters
----------
ringList : list
List of integer ring numbers to find assemblies in. Optionally, a string specifying a
special location like the SFP (spent fuel pool)
typeSpec : Flags or iterable of Flags, optional
Flag types to restrict assemblies to
exactType : bool, optional
Match the type in typelist exactly
exclusions : list of Assemblies, optional
exclude these assemblies from the results
circularRingFlag : bool
A flag to toggle on using rings that are based on distance from the center of the reactor
Returns
-------
assemblyList : list
List of assemblies in each ring of the ringList. [[a1,a2,a3],[a4,a5,a6,a7],...]
"""
if "SFP" in ringList and self.r.excore.get("sfp") is None:
sfpAssems = []
runLog.warning(
f"{self} can't pull from SFP; no SFP is attached to the reactor {self.r}."
"To get assemblies from an SFP, you must add an SFP system to the blueprints"
f"or otherwise instantiate a SpentFuelPool object as r.excore['sfp']"
)
else:
sfpAssems = list(self.r.excore["sfp"])
assemblyList = [[] for _i in range(len(ringList))] # empty lists for each ring
if exclusions is None:
exclusions = []
exclusions = set(exclusions)
if circularRingFlag:
assemListTmp = []
assemListTmp2 = []
if ringList[0] == "SFP":
# kind of a hack for now. Need the capability.
assemblyList = sfpAssems
else:
for i, ringNumber in enumerate(ringList):
assemListTmp = self.r.core.getAssembliesInCircularRing(ringNumber, typeSpec, exactType, exclusions)
for a in assemListTmp:
if a in exclusions:
continue
if not a.hasFlags(typeSpec, exact=exactType):
continue
# save only the assemblies not in the exclusions and with the proper type
assemListTmp2.append(a)
# make the list of lists of assemblies
assemblyList[i] = assemListTmp2
else:
if ringList[0] == "SFP":
# kind of a hack for now. Need the capability.
assemList = sfpAssems
else:
assemList = self.r.core.getAssemblies()
for a in assemList:
if a in exclusions:
continue
if not a.hasFlags(typeSpec, exact=exactType):
continue
if a.getLocation() == "SFP":
ring = "SFP"
else:
ring = a.spatialLocator.getRingPos()[0]
if ring in ringList:
# keep it in the right order
assemblyList[ringList.index(ring)].append(a)
return assemblyList
def swapAssemblies(self, a1, a2):
"""Moves a whole assembly from one place to another.
.. impl:: User-specified blocks can be left in place during within-core swaps.
:id: I_ARMI_SHUFFLE_STATIONARY0
:implements: R_ARMI_SHUFFLE_STATIONARY
Before assemblies are moved, the ``_transferStationaryBlocks`` class method is called to
check if there are any block types specified by the user as stationary via the
``stationaryBlockFlags`` case setting. Using these flags, blocks are gathered from each
assembly which should remain stationary and checked to make sure that both assemblies
have the same number and same height of stationary blocks. If not, return an error.
If all checks pass, the :py:meth:`~armi.reactor.assemblies.Assembly.remove` and
:py:meth:`~armi.reactor.assemblies.Assembly.insert` methods are used to swap the
stationary blocks between the two assemblies.
Once this process is complete, the actual assembly movement can take place. Through this
process, the stationary blocks remain in the same core location.
Parameters
----------
a1 : :py:class:`Assembly `
The first assembly
a2 : :py:class:`Assembly `
The second assembly
See Also
--------
dischargeSwap : swap assemblies where one is outside the core and the other is inside
"""
if a1 is None or a2 is None:
runLog.warning("Cannot swap None assemblies. Check your findAssembly results. Skipping swap")
return
runLog.extra("Swapping {} with {}.".format(a1, a2))
# add assemblies into the moved location
for a in [a1, a2]:
if a not in self.moved:
self.moved.append(a)
oldA1Location = a1.spatialLocator
self._transferStationaryBlocks(a1, a2)
a1.moveTo(a2.spatialLocator)
a2.moveTo(oldA1Location)
def _transferStationaryBlocks(self, assembly1, assembly2):
"""
Exchange the stationary blocks (e.g. grid plate) between the moving assemblies.
These blocks in effect are not moved at all.
"""
# grab stationary block flags
sBFList = self.r.core.stationaryBlockFlagsList
# identify stationary blocks for assembly 1
a1StationaryBlocks = [
[block, block.spatialLocator.k] for block in assembly1 if any(block.hasFlags(sbf) for sbf in sBFList)
]
# identify stationary blocks for assembly 2
a2StationaryBlocks = [
[block, block.spatialLocator.k] for block in assembly2 if any(block.hasFlags(sbf) for sbf in sBFList)
]
# check for any inconsistencies in stationary blocks and ensure alignment
if [block[1] for block in a1StationaryBlocks] != [block[1] for block in a2StationaryBlocks]:
raise ValueError(
"""Different number and/or locations of stationary blocks
between {} (Stationary Blocks: {}) and {} (Stationary Blocks: {}).""".format(
assembly1, a1StationaryBlocks, assembly2, a2StationaryBlocks
)
)
if a1StationaryBlocks and a2StationaryBlocks:
if a1StationaryBlocks[-1][0].p.ztop != a2StationaryBlocks[-1][0].p.ztop:
runLog.warning(
"""Difference in top elevation of stationary blocks
between {} (Stationary Blocks: {}, Elevation at top of stationary blocks {})
and {} (Stationary Blocks: {}, Elevation at top of stationary blocks {}))""".format(
assembly1,
a1StationaryBlocks,
a1StationaryBlocks[-1][0].p.ztop,
assembly2,
a2StationaryBlocks,
a2StationaryBlocks[-1][0].p.ztop,
)
)
# swap stationary blocks
for (assem1Block, assem1BlockIndex), (assem2Block, assem2BlockIndex) in zip(
a1StationaryBlocks, a2StationaryBlocks
):
# remove stationary blocks
assembly1.remove(assem1Block)
assembly2.remove(assem2Block)
# insert stationary blocks
assembly1.insert(assem1BlockIndex, assem2Block)
assembly2.insert(assem2BlockIndex, assem1Block)
@staticmethod
def validateLoc(loc, cycle):
"""Validate a location label from a shuffle YAML file.
Parameters
----------
loc : str
Location label to validate.
cycle : int
Cycle currently being processed, used for context in error messages.
"""
if loc in FuelHandler.DISCHARGE_LOCS:
return
try:
grids.locatorLabelToIndices(loc)
except Exception:
raise InputError(
f"Invalid location label {loc!r} in cycle {cycle} in shuffle YAML. "
"Location labels must be non-empty and contain integers."
)
def dischargeSwap(self, incoming, outgoing, toSfp=False):
"""Removes one assembly from the core and replace it with another assembly.
.. impl:: User-specified blocks can be left in place for the discharge swap.
:id: I_ARMI_SHUFFLE_STATIONARY1
:implements: R_ARMI_SHUFFLE_STATIONARY
Before assemblies are moved, the ``_transferStationaryBlocks`` class method is called to
check if there are any block types specified by the user as stationary via the
``stationaryBlockFlags`` case setting. Using these flags, blocks are gathered from each
assembly which should remain stationary and checked to make sure that both assemblies
have the same number and same height of stationary blocks. If not, return an error.
If all checks pass, the :py:meth:`~armi.reactor.assemblies.Assembly.remove` and
:py:meth:`~armi.reactor.assemblies.Assembly.insert` methods are used to swap the
stationary blocks between the two assemblies.
Once this process is complete, the actual assembly movement can take place. Through this
process, the stationary blocks from the outgoing assembly remain in the original core
position, while the stationary blocks from the incoming assembly are discharged with the
outgoing assembly.
Parameters
----------
incoming : :py:class:`Assembly `
The assembly getting swapped into the core.
outgoing : :py:class:`Assembly `
The assembly getting discharged out the core.
toSfp : bool, optional
If True, store the discharged assembly in the SFP regardless of the
``trackAssems`` setting.
See Also
--------
swapAssemblies : swaps assemblies that are already in the core
"""
runLog.debug("Discharge swapping {} for {}.".format(incoming, outgoing))
if incoming is None or outgoing is None:
runLog.warning("Cannot discharge swap None assemblies. Check your findAssembly calls. Skipping")
return
# add assemblies into the moved location
# keep it unique so we don't get artificially inflated numMoves
for a in [incoming, outgoing]:
if a not in self.moved:
self.moved.append(a)
self._transferStationaryBlocks(incoming, outgoing)
# replace the goingOut guy.
loc = outgoing.spatialLocator
# say it happened at the end of the previous cycle by sending cycle-1
# to removeAssembly, which will look up EOC of last cycle,
# which, coincidentally is the same time we're at right now at BOC.
self.r.core.removeAssembly(outgoing, addToSFP=toSfp)
# adjust the assembly multiplicity so that it does not forget how many it really
# represents. This allows us to discharge an assembly from any location in
# fractional-core models where the central location may only be one assembly,
# whereas other locations are more, and keep proper track of things. In the
# future, this mechanism may be used to handle symmetry in general.
outgoing.p.multiplicity = len(loc.getSymmetricEquivalents()) + 1
if self.r.excore.get("sfp") is not None:
if incoming in self.r.excore["sfp"].getChildren():
# pull it out of the sfp if it's in there.
runLog.extra("removing {0} from the sfp".format(incoming))
self.r.excore["sfp"].remove(incoming)
incoming.p.multiplicity = 1
self.r.core.add(incoming, loc)
def swapCascade(self, assemList):
"""
Perform swaps on a list of assemblies.
Parameters
----------
assemList: list
A list of assemblies to be shuffled.
Notes
-----
[goingOut,inter1,inter2,goingIn] will go to
[inter1, inter2, goingIn, goingOut] in terms of positions
or, in ASCII art::
>---------------v
| |
[A <- B <- C <- D]
"""
# first check for duplicates
for assem in assemList:
if assemList.count(assem) != 1:
runLog.warning(f"{assem} is in the cascade more than once.")
# now swap
levels = len(assemList)
for level in range(levels - 1):
if not assemList[level + 1]:
runLog.info(
f"Skipping level {level + 1} in the cascade because it is None. Be careful, "
"this might cause an unexpected shuffling order."
)
continue
self.swapAssemblies(assemList[0], assemList[level + 1])
def performShuffle(self, shuffleFile, yaml=False):
"""
Execute shuffling instructions from a previous run or YAML file.
Parameters
----------
shuffleFile : str
Path to the shuffle sequence file.
yaml : bool, optional
If True, interpret ``shuffleFile`` as a YAML shuffle sequence.
Returns
-------
moved : list
List of assemblies that moved this cycle.
Notes
-----
Typically the shuffle file from a previous run will be ``caseTitle``-"SHUFFLES.txt".
See Also
--------
doRepeatShuffle : Performs moves as processed by this method
processMoveList : Converts a stored list of moves into a functional list of assemblies to swap
makeShuffleReport : Creates the file that is processed here
"""
# read moves file
cycle = self.r.p.cycle
if cycle == 0:
# if cycle is 0, we are at the beginning of the first cycle
# this is a special case where we don't have any moves
# so we return an empty list
return []
if yaml:
moves, swaps = self.readMovesYaml(shuffleFile)
else:
moves = self.readMoves(shuffleFile)
swaps = {}
# setup the load and loop chains to be run per cycle
moveList = moves[cycle]
swapList = swaps.get(cycle, [])
moveData = self.processMoveList(moveList)
# Now have the move locations
moved = self.doRepeatShuffle(
moveData.loadChains,
moveData.loopChains,
moveData.enriches,
moveData.loadChargeTypes,
moveData.ringPosCycles,
moveData.dischargeDests,
)
# Apply any swaps after performing cascades
for loc1, loc2 in swapList:
a1 = self.r.core.getAssemblyWithStringLocation(loc1)
a2 = self.r.core.getAssemblyWithStringLocation(loc2)
if a1 is None or a2 is None:
runLog.warning(f"Could not perform swap between {loc1} and {loc2}")
continue
self.swapAssemblies(a1, a2)
moved.extend([a1, a2])
self.pendingRotations = moveData.rotations
return moved
@staticmethod
def readMoves(fname):
r"""
Reads a shuffle output file and sets up the moves dictionary.
Parameters
----------
fname : str
The shuffles file to read
Returns
-------
moves : dict
A dictionary of all the moves. Keys are the cycle number. Values are a list
of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects, one for each individual
move that happened in the cycle. ``oldLoc`` and ``newLoc`` are string
representations of the locations and ``enrichList`` is a list of mass
enrichments from bottom to top.
See Also
--------
performShuffle : reads this file and executes the shuffling
outage : creates the moveList in the first place.
makeShuffleReport : writes the file that is read here.
"""
try:
f = open(fname)
except OSError:
raise RuntimeError(
"Could not find/open repeat shuffle file {} in working directory {}".format(fname, os.getcwd())
)
moves = {}
numMoves = 0
for line in f:
if "ycle " in line:
# Used to say "Cycle 1 at 0.0 years". Now says: "Before cycle 1 at 0.0 years" to be more specific.
# This RE allows backwards compatibility.
# Later, we removed the at x years
m = re.search(r"ycle (\d+)", line)
cycle = int(m.group(1))
moves[cycle] = []
elif "assembly" in line:
# this is the new load style where an actual assembly type is written to the shuffle logic
# due to legacy reasons, the assembly type will be put into group 4
pat = (
r"([A-Za-z0-9!\-]+) moved to ([A-Za-z0-9!\-]+) with assembly type "
+ r"([A-Za-z0-9!\s]+)\s*(ringPosCycle=\[.*\])?\s*with enrich list: (.+)"
)
m = re.search(pat, line)
if not m:
raise InputError('Failed to parse line "{0}" in shuffle file'.format(line))
oldLoc = m.group(1)
newLoc = m.group(2)
assemType = m.group(3).strip() # take off any possible trailing whitespace
ringPosCycle = m.group(4) # will be None for legacy shuffleLogic files. (pre 2013-08)
if ringPosCycle:
ringPosCycle = eval(ringPosCycle.split("=")[1]) # extract the assembly ring, position and cycle.
enrichList = [float(i) for i in m.group(5).split()]
moves[cycle].append(AssemblyMove(oldLoc, newLoc, enrichList, assemType, ringPosCycle))
numMoves += 1
elif "moved" in line:
# very old shuffleLogic file.
runLog.warning(
"Using old *.SHUFFLES.txt loading file",
single=True,
label="Using old shuffles file",
)
m = re.search(
"([A-Za-z0-9!]+) moved to ([A-Za-z0-9!]+) with enrich list: (.+)",
line,
)
if not m:
raise InputError('Failed to parse line "{0}" in shuffle file'.format(line))
oldLoc = m.group(1)
newLoc = m.group(2)
enrichList = [float(i) for i in m.group(3).split()]
# old loading style, just assume that there is a booster as our surrogate
moves[cycle].append(AssemblyMove(oldLoc, newLoc, enrichList))
numMoves += 1
f.close()
runLog.info("Read {0} moves over {1} cycles".format(numMoves, len(moves.keys())))
return moves
@staticmethod
def readMovesYaml(fname):
r"""
Read a shuffle file in YAML format.
A cascade with no explicit final location deletes the assembly
by default.
Parameters
----------
fname : str
Path to the YAML-formatted shuffle file.
Returns
-------
moves : dict
Mapping of cycle numbers to lists of
:class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects that
describe the shuffle sequence.
swaps : dict
Mapping of cycle numbers to lists of location-pair tuples describing
assemblies to be swapped.
"""
# 1. load YAML file
try:
with open(fname, "r") as stream:
yaml = YAML(typ="safe")
data = yaml.load(stream)
except DuplicateKeyError as e:
raise InputError(str(e)) from e
except OSError as ee:
raise RuntimeError(
f"Could not find/open repeat shuffle file {fname!r} in working directory {os.getcwd()}: {ee}"
) from ee
# 2. perform various validation tests on the YAML data
if "sequence" not in data:
raise InputError("Shuffle YAML missing required 'sequence' mapping")
moves = {}
swaps = defaultdict(list)
# cycles may be provided in any order; verify only that there are no gaps
cycleNums = {int(c) for c in data["sequence"].keys()}
if cycleNums:
expected = set(range(min(cycleNums), max(cycleNums) + 1))
missing = sorted(expected - cycleNums)
if missing:
if len(missing) == 1:
raise InputError(f"Missing cycle {missing[0]} in shuffle sequence")
raise InputError(f"Missing cycles {missing} in shuffle sequence")
# 3. parse YAML file into shuffle data
for cycleKey, actions in data["sequence"].items():
cycle = int(cycleKey)
moves[cycle] = []
seenLocs = set()
if actions is None and cycle != 0:
runLog.warning(f"Cycle {cycleKey} has no shuffle actions defined, skipping.")
continue
elif cycle == 0:
raise InputError(
"Cycle 0 is not allowed in shuffle YAML. "
"This cycle is reserved for the initial core loading."
"Shuffling is available at the beginning of cycle 1"
)
for action in actions:
allowed = {"cascade", "fuelEnrichment", "extraRotations", "swap", "ringPosCycle"}
unknown = set(action) - allowed
if unknown:
raise InputError(f"Unknown action keys {unknown} in shuffle YAML")
if "cascade" in action:
chain = list(action["cascade"])
if len(chain) < 2:
raise InputError("cascade must contain at least two entries")
if any(not isinstance(item, str) for item in chain):
raise InputError("cascade entries must be strings")
if chain[0] == "SFP":
# move an assembly from the SFP into the Core
assemType = None
locs = chain
if len(locs) < 2:
raise InputError("cascade starting with SFP must include a destination location")
else:
# move an assembly around the Core
assemType = chain[0]
locs = chain[1:]
if not locs:
raise InputError("cascade must contain at least one location after the assembly type")
for loc in locs:
FuelHandler.validateLoc(loc, cycle)
if loc not in FuelHandler.DISCHARGE_LOCS and loc in seenLocs:
raise InputError(f"Location {loc} appears in multiple cascades in cycle {cycle}")
seenLocs.add(loc)
enrich = []
enrichList = action.get("fuelEnrichment", [])
try:
enrich = [float(e) for e in enrichList]
except (TypeError, ValueError):
raise InputError("fuelEnrichment values must be numeric. Got {enrichList}")
if any(e < 0 or e > 1 for e in enrich):
raise InputError("fuelEnrichment values must be between 0 and 1. Got {enrich}")
ringPosCycle = action.get("ringPosCycle")
if locs[0] == "SFP":
if ringPosCycle is None:
raise InputError("ringPosCycle required when loading from SFP")
moves[cycle].append(AssemblyMove("SFP", locs[1], [], None, ringPosCycle))
startIdx = 1
else:
if ringPosCycle is not None:
raise InputError("ringPosCycle is only valid when loading from SFP")
moves[cycle].append(AssemblyMove("LoadQueue", locs[0], enrich, assemType))
startIdx = 0
for i in range(startIdx, len(locs) - 1):
moves[cycle].append(AssemblyMove(locs[i], locs[i + 1]))
if locs[-1] not in FuelHandler.DISCHARGE_LOCS:
moves[cycle].append(AssemblyMove(locs[-1], "Delete"))
elif "swap" in action:
swap = action["swap"]
if not isinstance(swap, list) or len(swap) != 2:
raise InputError("swap must be a list of two location labels, got {swap}")
if any(not isinstance(item, str) for item in swap):
raise InputError("swap entries must be strings, got {swap}")
for loc in swap:
FuelHandler.validateLoc(loc, cycle)
loc1, loc2 = swap
swaps[cycle].append((loc1, loc2))
elif "extraRotations" in action:
for loc, angle in action.get("extraRotations", {}).items():
FuelHandler.validateLoc(loc, cycle)
moves[cycle].append(AssemblyMove(loc, loc, rotation=float(angle)))
else:
raise InputError(f"Unable to process {action} in {cycle}")
return moves, dict(swaps)
@staticmethod
def trackChain(moveList, startingAt, alreadyDone=None):
r"""
Builds a chain of locations based on starting location.
Notes
-----
Takes a moveList and extracts chains. Remembers all it touches.
If A moved to B, C moved to D, and B moved to C, this returns
A, B, C ,D.
Used in some monte carlo physics writers and in performShuffle
Parameters
----------
moveList : list
a list of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove`
objects that occurred at a single outage.
startingAt : str
A location label where the chain would start. This is important because the discharge
moves are built when the SFP is found in a move. This method must find all
assemblies in the chain leading up to this particular discharge.
alreadyDone : list
A list of locations that have already been tracked.
Returns
-------
chain : list
The chain as a location list in order
enrich : list
The axial enrichment distribution of the load assembly.
assemType : str
The type of the assembly
loadName or ringPosCycle : [str, tuple[int, int, int]]
The assembly name of the load assembly, or the ringPosHist identifier
destination : str
Location where the first assembly in the chain is discharged
See Also
--------
performShuffle
processMoveList
"""
if alreadyDone is None:
alreadyDone = []
enrich = None # in case this is a load chain, prep for getting enrich.
loadName = None
assemType = None # in case this is a load chain, prep for getting an assembly type
destination = None
for move in moveList:
fromLoc = move.fromLoc
toLoc = move.toLoc
if toLoc in FuelHandler.DISCHARGE_LOCS and "LoadQueue" in fromLoc:
# skip dummy moves
continue
elif (fromLoc, toLoc) in alreadyDone:
# skip this pair
continue
elif startingAt in fromLoc:
# looking for chain involving toLoc
# back-track the chain of moves
chain = [fromLoc]
destination = toLoc
safeCount = 0 # to break out of crazy loops.
ringPosCycle = None
complete = False
while (
chain[-1] not in ({"LoadQueue"} | FuelHandler.DISCHARGE_LOCS) and not complete and safeCount < 100
):
# look for something going to where the previous one is from
lookingFor = chain[-1]
for innerMove in moveList:
cFromLoc = innerMove.fromLoc
cToLoc = innerMove.toLoc
cEnrichList = innerMove.enrichList
cAssemblyType = innerMove.assemType
cRingPosCycle = innerMove.ringPosCycle
if cToLoc == lookingFor:
chain.append(cFromLoc)
if cFromLoc in ({"LoadQueue"} | FuelHandler.DISCHARGE_LOCS):
# charge-discharge loop complete.
enrich = cEnrichList
ringPosCycle = cRingPosCycle
assemType = cAssemblyType
# break after finding the first predecessor to avoid duplicates
break
if chain[-1] == startingAt:
# non-charging loop complete
complete = True
safeCount += 1
if not safeCount < 100:
raise RuntimeError("Chain tracking got too long. Check moves.\n{0}".format(chain))
# delete the last item, it's loadqueue location or the startingFrom
# location.
chain.pop()
# chain tracked. Can jump out of loop early.
return chain, enrich, assemType, ringPosCycle, destination
# if we get here, the startingAt location was not found.
runLog.warning("No chain found starting at {0}".format(startingAt))
return [], enrich, assemType, loadName, destination
def processMoveList(self, moveList) -> ProcessMoveListResult:
"""
Processes a move list and extracts fuel management loops and charges.
Parameters
----------
moveList : list
A list of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects describing each
move.
Returns
-------
ProcessMoveListResult
Structured information describing the move chains, enrichment
distributions, and other shuffle data. Attributes include:
loadChains : list[list[str]]
Moves that include discharges.
loopChains : list[list[str]]
Moves without discharges.
enriches : list[list[float]]
Axial enrichment distribution for each load assembly.
loadChargeTypes : list[Optional[str]]
Assembly types for each load chain.
loadNames : list[Optional[str]]
Assembly names of loads (e.g., from SFP).
dischargeDests : list[str]
Final destinations for discharged assemblies (e.g., ``SFP`` or ``Delete``).
rotations : list[tuple[str, float]]
Manual rotations to apply (location, degrees).
alreadyDone : list[str]
Locations already processed while tracking chains.
Notes
-----
Used in some Monte Carlo interfaces to convert ARMI moves to their format moves. Also used in
repeat shuffling.
See Also
--------
makeShuffleReport : writes the file that is being processed
performShuffle : uses this to repeat shuffles
"""
alreadyDone = []
loadChains = [] # moves that have discharges
loadChargeTypes = [] # the assembly types (str) to be used in a load chain.
loopChains = [] # moves that don't have discharges
enriches = [] # enrichments of each loadChain
ringPosCycles = [] # assembly ring, position, at cycle (to read from SFP)
dischargeDests = [] # final destinations for discharged assemblies
rotations = []
# first handle all charge/discharge chains by looking for things going to SFP/Delete
for move in moveList:
fromLoc = move.fromLoc
toLoc = move.toLoc
rot = move.rotation
if fromLoc == toLoc:
if rot is not None:
rotations.append((fromLoc, rot))
continue
if toLoc in self.DISCHARGE_LOCS and "LoadQueue" in fromLoc:
# skip dummy moves
continue
elif toLoc in self.DISCHARGE_LOCS:
# discharge. Track chain.
chain, enrichList, assemType, ringPosCycle, dest = FuelHandler.trackChain(moveList, startingAt=fromLoc)
runLog.extra("Load Chain with load assem {0}: {1}".format(assemType, chain))
loadChains.append(chain)
enriches.append(enrichList)
loadChargeTypes.append(assemType)
ringPosCycles.append(ringPosCycle)
dischargeDests.append(dest)
# track all the locations we saw already so we
# don't use them in the loop moves.
alreadyDone.extend(chain)
# go through again, looking for stuff that isn't in chains.
# put them in loop type 3 moves (arbitrary order)
for move in moveList:
fromLoc = move.fromLoc
toLoc = move.toLoc
if fromLoc == toLoc:
# rotation or no-op
continue
if toLoc in self.DISCHARGE_LOCS or fromLoc in ({"LoadQueue"} | self.DISCHARGE_LOCS):
# skip loads/discharges; they're already done.
continue
elif fromLoc in alreadyDone:
# skip repeats
continue
else:
# normal move
chain, _enrichList, _assemType, _loadAssemName, _dest = FuelHandler.trackChain(
moveList, startingAt=fromLoc
)
loopChains.append(chain)
alreadyDone.extend(chain)
runLog.extra("Loop Chain: {0}".format(chain))
return ProcessMoveListResult(
loadChains=loadChains,
loopChains=loopChains,
enriches=enriches,
loadChargeTypes=loadChargeTypes,
ringPosCycles=ringPosCycles,
dischargeDests=dischargeDests,
rotations=rotations,
alreadyDone=alreadyDone,
)
def doRepeatShuffle(self, loadChains, loopChains, enriches, loadChargeTypes, ringPosCycles, dischargeDests):
r"""
Actually does the fuel movements required to repeat a shuffle order.
Parameters
----------
loadChains : list
list of lists of location labels for each load chain (with charge/discharge)
loopChains : list
list of lists of location labels for each loop chain (no charge/discharge)
enriches : list
The block enrichment distribution of each load assembly
loadChargeTypes :list
The types of assemblies that get charged.
ringPosCycles : list
The ring, pos, and cycle of assemblies that get brought into the core (useful for pulling out
of SFP for round 2, etc.)
dischargeDests : list
Final destination for each load chain (e.g., ``SFP`` or ``Delete``)
See Also
--------
performShuffle : coordinates the moves for this cycle
processMoveList : builds the input lists
Notes
-----
This is a helper function for performShuffle
"""
moved = []
# shuffle all of the load chain assemblies (These include discharges to SFP
# and loads from Loadqueue)
# build a lookup table of locations throughout the current core and cache it.
locContents = self.r.core.makeLocationLookup(assemblyLevel=True)
# perform load swaps (with charge/discharge)
for assemblyChain, enrichList, assemblyType, ringPosCycle, dest in zip(
loadChains, enriches, loadChargeTypes, ringPosCycles, dischargeDests
):
# convert the labels into actual assemblies to be swapped
assemblyList = self.r.core.getLocationContents(assemblyChain, assemblyLevel=True, locContents=locContents)
moved.extend(assemblyList)
# go through and swap the assemblies knowing that there is a discharge (first one)
# and a new assembly brought it (last one)
for i in range(0, -(len(assemblyList) - 1), -1):
self.swapAssemblies(assemblyList[i], assemblyList[i - 1])
# Now, everything has been set except the first assembly in the list, which must now be
# replaced with a fresh assembly... but which one? The assemblyType string
# tells us.
# Sometimes enrichment is set on-the-fly by branch searches, so we must
# not only use the proper assembly type but also adjust the enrichment.
if ringPosCycle:
ring, pos, cycle = ringPosCycle
loadAssembly = self.r.core.getAssemblyWithRingPosHist(ring, pos, cycle)
if not loadAssembly:
msg = f"The required assembly located at ring {ring} pos {pos} at cycle {cycle} is not found"
runLog.error(msg)
raise RuntimeError(msg)
else:
# create a new assembly from the BOL assem templates and adjust the enrichment
loadAssembly = self.r.core.createAssemblyOfType(enrichList=enrichList, assemType=assemblyType)
# replace the goingOut guy (for continual feed cases)
runLog.debug("Calling discharge swap with {} and {}".format(loadAssembly, assemblyList[0]))
self.dischargeSwap(loadAssembly, assemblyList[0], toSfp=(dest == "SFP"))
moved.append(loadAssembly)
# shuffle all of the loop chain assemblies (no charge/discharge)
for assemblyChain in loopChains:
# convert the labels into actual assemblies to be swapped
assemblyList = self.r.core.getLocationContents(assemblyChain, assemblyLevel=True, locContents=locContents)
for a in assemblyList:
moved.append(a)
# go through and swap the assemblies knowing that there is a discharge (first one)
# and a new assembly brought it (last one)
# for i in range(0,-(len(assemblyList)-1),-1):
for i in range(0, -(len(assemblyList) - 1), -1):
self.swapAssemblies(assemblyList[i], assemblyList[i + 1])
return moved
def workerOperate(self, cmd):
"""Handle a mpi command on the worker nodes."""
pass
def prepShuffleMap(self):
"""Prepare a table of current locations for plotting shuffle maneuvers."""
self.oldLocations = {}
for a in self.r.core:
self.oldLocations[a.getName()] = a.spatialLocator.getGlobalCoordinates()
def makeShuffleArrows(self):
"""
Build data for plotting all the previous shuffles as arrows.
Returns
-------
arrows : list
Values are (currentCoords, oldCoords) tuples
"""
arrows = []
runLog.extra("Building list of shuffle arrows.")
for a in self.r.core:
currentCoords = a.spatialLocator.getGlobalCoordinates()
oldCoords = self.oldLocations.get(a.getName(), None)
if oldCoords is None:
oldCoords = np.array((-50, -50, 0))
elif any(currentCoords != oldCoords):
arrows.append((oldCoords, currentCoords))
return arrows
================================================
FILE: armi/physics/fuelCycle/hexAssemblyFuelMgmtUtils.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a selection of fuel management utilities that seem generally useful enough to keep in ARMI, but they still only
apply to hex assembly reactors.
Notes
-----
We are keeping these in ARMI even if they appear unused internally.
"""
import math
import typing
import numpy as np
from armi import runLog
from armi.physics.fuelCycle.utils import maxBurnupBlock, maxBurnupLocator
from armi.utils.mathematics import findClosest
if typing.TYPE_CHECKING:
from armi.reactor.assemblies import HexAssembly
def getOptimalAssemblyOrientation(a: "HexAssembly", aPrev: "HexAssembly") -> int:
"""
Get optimal hex assembly orientation/rotation to minimize peak burnup.
Works by placing the highest-burnup pin in the location (of 6 possible locations) with lowest expected pin power. We
evaluated "expected pin power" based on the power distribution in ``aPrev``, the previous assembly located where
``a`` is going. The algorithm goes as follows.
1. Get all the pin powers and ``IndexLocation`` s from the block at the previous location/timenode.
2. Obtain the ``IndexLocation`` of the pin with the highest burnup in the current assembly.
3. For each possible rotation,
- Find the new location with ``HexGrid.rotateIndex``
- Find the index where that location occurs in previous locations
- Find the previous power at that location
4. Return the rotation with the lowest previous power
This algorithm assumes a few things.
1. ``len(HexBlock.getPinCoordinates()) == len(HexBlock.p.linPowByPin)`` and, by extension, ``linPowByPin[i]`` is
found at ``getPinCoordinates()[i]``.
2. Your assembly has at least 60 degree symmetry of fuel pins and powers. This means if we find a fuel pin and
rotate it 60 degrees, there should be another fuel pin at that lattice site. This is mostly a safe assumption
since many hexagonal reactors have at least 60 degree symmetry of fuel pin layout. This assumption holds if you
have a full hexagonal lattice of fuel pins as well.
3. Fuel pins in ``a`` have similar locations in ``aPrev``. This is a safe assumption in that most fuel assemblies
have similar layouts so it's plausible that if ``a`` has a fuel pin at ``(1, 0, 0)`` so does ``aPrev``.
.. impl:: Provide an algorithm for rotating hexagonal assemblies to equalize burnup
:id: I_ARMI_ROTATE_HEX_BURNUP
:implements: R_ARMI_ROTATE_HEX_BURNUP
This method will return a rotation such that the highest-burnup pin moves to the hex location with the lowest
expect pin number. This rotation will be optimal in the sense that it will minimize peak burnup.
Parameters
----------
a : Assembly object
The assembly that is being rotated.
aPrev : Assembly object
The assembly that previously occupied this location (before the last shuffle).
If the assembly "a" was not shuffled, it's sufficient to pass ``a``.
Returns
-------
int
An integer from 0 to 5 representing the number of pi/3 (60 degree) counterclockwise rotations from where ``a``
is currently oriented to the "optimal" orientation
Raises
------
ValueError
If there is insufficient information to determine the rotation of ``a``. This could be due to a lack of fuel
blocks or parameters like ``linPowByPin``.
"""
maxBuBlock = maxBurnupBlock(a)
if maxBuBlock.spatialGrid is None:
msg = f"Block {maxBuBlock} in {a} does not have a spatial grid. Cannot rotate."
runLog.error(msg)
raise ValueError(msg)
maxBuPinLocation = maxBurnupLocator(maxBuBlock)
# No need to rotate if max burnup pin is the center
if maxBuPinLocation.i == 0 and maxBuPinLocation.j == 0:
return 0
if aPrev is not a:
blockAtPreviousLocation = aPrev[a.index(maxBuBlock)]
else:
blockAtPreviousLocation = maxBuBlock
previousLocations = blockAtPreviousLocation.getPinLocations()
previousPowers = blockAtPreviousLocation.p.linPowByPin
if len(previousLocations) != len(previousPowers):
msg = (
f"Inconsistent pin powers and number of pins in {blockAtPreviousLocation}. "
f"Found {len(previousLocations)} locations but {len(previousPowers)} powers."
)
runLog.error(msg)
raise ValueError(msg)
ringPowers = {(loc.i, loc.j): p for loc, p in zip(previousLocations, previousPowers)}
targetGrid = blockAtPreviousLocation.spatialGrid
candidateRotation = 0
candidatePower = ringPowers.get((maxBuPinLocation.i, maxBuPinLocation.j), math.inf)
for rot in range(1, 6):
candidateLocation = targetGrid.rotateIndex(maxBuPinLocation, rot)
newPower = ringPowers.get((candidateLocation.i, candidateLocation.j), math.inf)
if newPower < candidatePower:
candidateRotation = rot
candidatePower = newPower
return candidateRotation
def buildRingSchedule(
maxRingInCore,
chargeRing=None,
dischargeRing=None,
jumpRingFrom=None,
jumpRingTo=None,
coarseFactor=0.0,
):
r"""
Build a ring schedule for shuffling.
Notes
-----
General enough to do convergent, divergent, or any combo, plus jumprings.
The center of the core is ring 1, based on the DIF3D numbering scheme.
Jump ring behavior can be generalized by first building a base ring list
where assemblies get charged to H and discharge from A::
[A, B, C, D, E, F, G, H]
If a jump should be placed where it jumps from ring G to C, reversed back to F, and then discharges from A,
we simply reverse the sublist [C,D,E,F], leaving us with::
[A, B, F, E, D, C, G, H]
A less-complex, more standard convergent-divergent scheme is a subcase of this, where the
sublist [A,B,C,D,E] or so is reversed, leaving::
[E, D, C, B, A, F, G, H]
So the task of this function is simply to determine what subsection, if any, to reverse of
the baselist.
Parameters
----------
maxRingInCore : int
The number of rings in the hex assembly reactor.
chargeRing : int, optional
The peripheral ring into which an assembly enters the core. Default is outermost ring.
dischargeRing : int, optional
The last ring an assembly sits in before discharging. Default is jumpRing-1
jumpRingFrom : int
The last ring an assembly sits in before jumping to the center
jumpRingTo : int, optional
The inner ring into which a jumping assembly jumps. Default is 1.
coarseFactor : float, optional
A number between 0 and 1 where 0 hits all rings and 1 only hits the outer, rJ, center, and rD rings.
This allows coarse shuffling, with large jumps. Default: 0
Returns
-------
ringSchedule : list
A list of rings in order from discharge to charge.
ringWidths : list
A list of integers corresponding to the ringSchedule determining the widths of each ring area
Examples
--------
>>> f.buildRingSchedule(17, 1, jumpRingFrom=14)
([13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, 15, 16, 17],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
"""
if dischargeRing > maxRingInCore:
runLog.warning(
f"Discharge ring {dischargeRing} is outside the core (max {maxRingInCore}). Changing it to be the max ring"
)
dischargeRing = maxRingInCore
if chargeRing > maxRingInCore:
runLog.warning(
f"Charge ring {chargeRing} is outside the core (max {maxRingInCore}). Changing it to be the max ring."
)
chargeRing = maxRingInCore
# process arguments
if dischargeRing is None:
# No discharge ring given, so we default to converging from outside to inside
# and therefore discharging from the center
dischargeRing = 1
if chargeRing is None:
# Charge ring not specified. Since we default to convergent shuffling, we
# must insert the fuel at the periphery.
chargeRing = maxRingInCore
if jumpRingFrom is not None and not (1 < jumpRingFrom < maxRingInCore):
raise ValueError(f"JumpRingFrom {jumpRingFrom} is not in the core.")
if jumpRingTo is not None and not (1 <= jumpRingTo < maxRingInCore):
raise ValueError(f"JumpRingTo {jumpRingTo} is not in the core.")
if chargeRing > dischargeRing and jumpRingTo is None:
# a convergent shuffle with no jumping. By setting
# jumpRingTo to be 1, no jumping will be activated
# in the later logic.
jumpRingTo = 1
elif jumpRingTo is None:
# divergent case. Disable jumpring by putting jumpring at periphery.
jumpRingTo = maxRingInCore
if chargeRing > dischargeRing and jumpRingFrom is not None and jumpRingFrom < jumpRingTo:
raise RuntimeError("Cannot have outward jumps in convergent cases.")
if chargeRing < dischargeRing and jumpRingFrom is not None and jumpRingFrom > jumpRingTo:
raise RuntimeError("Cannot have inward jumps in divergent cases.")
# step 1: build the base rings
numSteps = int((abs(dischargeRing - chargeRing) + 1) * (1.0 - coarseFactor))
# don't let it be smaller than 2 because linspace(1,5,1)= [1], linspace(1,5,2)= [1,5]
numSteps = max(numSteps, 2)
baseRings = [int(ring) for ring in np.linspace(dischargeRing, chargeRing, numSteps)]
# eliminate duplicates.
newBaseRings = []
for br in baseRings:
if br not in newBaseRings:
newBaseRings.append(br)
baseRings = newBaseRings
# build widths
widths = []
for i, ring in enumerate(baseRings[:-1]):
# 0 is the most restrictive, meaning don't even look in other rings.
widths.append(abs(baseRings[i + 1] - ring) - 1)
widths.append(0) # add the last ring with width 0.
# step 2: locate which rings should be reversed to give the jump-ring effect.
if jumpRingFrom is not None:
_closestRingFrom, jumpRingFromIndex = findClosest(baseRings, jumpRingFrom, indx=True)
_closestRingTo, jumpRingToIndex = findClosest(baseRings, jumpRingTo, indx=True)
else:
jumpRingToIndex = 0
# step 3: build the final ring list, potentially with a reversed section
newBaseRings = []
newWidths = []
# add in the non-reversed section before the reversed section
if jumpRingFrom is not None:
newBaseRings.extend(baseRings[:jumpRingToIndex])
newWidths.extend(widths[:jumpRingToIndex])
# add in reversed section that is jumped
newBaseRings.extend(reversed(baseRings[jumpRingToIndex:jumpRingFromIndex]))
newWidths.extend(reversed(widths[jumpRingToIndex:jumpRingFromIndex]))
# add the rest.
newBaseRings.extend(baseRings[jumpRingFromIndex:])
newWidths.extend(widths[jumpRingFromIndex:])
else:
# no jump section. Just fill in the rest.
newBaseRings.extend(baseRings[jumpRingToIndex:])
newWidths.extend(widths[jumpRingToIndex:])
return newBaseRings, newWidths
def buildConvergentRingSchedule(chargeRing, dischargeRing=1, coarseFactor=0.0):
r"""
Builds a ring schedule for convergent shuffling from ``chargeRing`` to ``dischargeRing``.
Parameters
----------
chargeRing : int
The peripheral ring into which an assembly enters the core. A good default is
outermost ring: ``r.core.getNumRings()``.
dischargeRing : int, optional
The last ring an assembly sits in before discharging. If no discharge, this is the one that
gets placed where the charge happens. Default: Innermost ring
coarseFactor : float, optional
A number between 0 and 1 where 0 hits all rings and 1 only hits the outer, rJ, center, and rD rings.
This allows coarse shuffling, with large jumps. Default: 0
Returns
-------
convergent : list
A list of rings in order from discharge to charge.
conWidths : list
A list of integers corresponding to the ringSchedule determining the widths of each ring area
"""
# step 1: build the convergent rings
numSteps = int((chargeRing - dischargeRing + 1) * (1.0 - coarseFactor))
# don't let it be smaller than 2 because linspace(1,5,1)= [1], linspace(1,5,2)= [1,5]
numSteps = max(numSteps, 2)
convergent = [int(ring) for ring in np.linspace(dischargeRing, chargeRing, numSteps)]
# step 2. eliminate duplicates
convergent = sorted(list(set(convergent)))
# step 3. compute widths
conWidths = []
for i, ring in enumerate(convergent[:-1]):
conWidths.append(convergent[i + 1] - ring)
conWidths.append(1)
# step 4. assemble and return
return convergent, conWidths
================================================
FILE: armi/physics/fuelCycle/settings.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings for generic fuel cycle code."""
import importlib.util
from armi.settings import setting, settingsValidation
CONF_ASSEM_ROTATION_STATIONARY = "assemblyRotationStationary"
CONF_ASSEMBLY_ROTATION_ALG = "assemblyRotationAlgorithm"
CONF_CIRCULAR_RING_MODE = "circularRingMode"
CONF_FUEL_HANDLER_NAME = "fuelHandlerName"
CONF_SHUFFLE_SEQUENCE_FILE = "shuffleSequenceFile"
CONF_JUMP_RING_NUM = "jumpRingNum"
CONF_LEVELS_PER_CASCADE = "levelsPerCascade"
CONF_PLOT_SHUFFLE_ARROWS = "plotShuffleArrows"
CONF_RUN_LATTICE_BEFORE_SHUFFLING = "runLatticePhysicsBeforeShuffling"
CONF_SHUFFLE_LOGIC = "shuffleLogic"
def getFuelCycleSettings():
"""Define settings for fuel cycle."""
settings = [
setting.Setting(
CONF_ASSEMBLY_ROTATION_ALG,
default="",
label="Assembly Rotation Algorithm",
description="The algorithm to use to rotate the detail assemblies while shuffling",
options=["", "buReducingAssemblyRotation", "simpleAssemblyRotation"],
enforcedOptions=True,
),
setting.Setting(
CONF_ASSEM_ROTATION_STATIONARY,
default=False,
label="Rotate stationary assems",
description=(
"Whether or not to rotate assemblies that are not shuffled.This can only be True if 'rotation' is true."
),
),
setting.Setting(
CONF_CIRCULAR_RING_MODE,
default=False,
description="Toggle between circular ring definitions to hexagonal ring definitions",
label="Use Circular Rings",
),
setting.Setting(
CONF_RUN_LATTICE_BEFORE_SHUFFLING,
default=False,
description=(
"Forces the Generation of Cross Sections Prior to Shuffling the Fuel Assemblies. "
"Note: This is recommended when performing equilibrium shuffling branching searches."
),
label="Generate XS Prior to Fuel Shuffling",
),
setting.Setting(
CONF_SHUFFLE_LOGIC,
default="",
label="Shuffle Logic",
description=(
"Path to a Python script or dotted module path that handles the fuel shuffling "
"for this case. This is user-defined per run as a dynamic input."
),
),
setting.Setting(
CONF_SHUFFLE_SEQUENCE_FILE,
default="",
label="Shuffle Sequence File",
description="Path to a YAML file defining a custom shuffle sequence",
),
setting.Setting(
CONF_FUEL_HANDLER_NAME,
default="",
label="Fuel Handler Name",
description="The name of the FuelHandler class in the shuffle logic module to activate",
),
setting.Setting(
CONF_PLOT_SHUFFLE_ARROWS,
default=False,
description="Make plots with arrows showing each move.",
label="Plot shuffle arrows",
),
setting.Setting(
CONF_JUMP_RING_NUM,
default=8,
label="Jump Ring Number",
description="The number of hex rings jumped when distributing the feed assemblies in "
"the alternating concentric rings or checkerboard shuffle patterns (convergent / "
"divergent shuffling).",
),
setting.Setting(
CONF_LEVELS_PER_CASCADE,
default=14,
label="Move per cascade",
description="The number of moves made per cascade when performing convergent or "
"divergent shuffle patterns.",
),
]
return settings
def getFuelCycleSettingValidators(inspector):
queries = []
queries.append(
settingsValidation.Query(
lambda: bool(inspector.cs[CONF_SHUFFLE_LOGIC]) ^ bool(inspector.cs[CONF_FUEL_HANDLER_NAME]),
"A value was provided for `fuelHandlerName` or `shuffleLogic`, but not "
"the other. Either both `fuelHandlerName` and `shuffleLogic` should be "
"defined, or neither of them.",
"",
inspector.NO_ACTION,
)
)
queries.append(
settingsValidation.Query(
lambda: " " in inspector.cs[CONF_SHUFFLE_LOGIC],
"Spaces are not allowed in shuffleLogic file location. You have specified {0}. "
"Shuffling will not occur.".format(inspector.cs[CONF_SHUFFLE_LOGIC]),
"",
inspector.NO_ACTION,
)
)
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE]
and not inspector._csRelativePathExists(inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE]),
"The specified shuffle sequence file '{0}' cannot be found.".format(
inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE]
),
"",
inspector.NO_ACTION,
)
)
def _clearShufflingInput():
inspector._assignCS(CONF_SHUFFLE_LOGIC, "")
inspector._assignCS(CONF_FUEL_HANDLER_NAME, "")
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_SHUFFLE_LOGIC]
and not inspector._csRelativePathExists(inspector.cs[CONF_SHUFFLE_LOGIC])
and importlib.util.find_spec(inspector.cs[CONF_SHUFFLE_LOGIC]) is None,
"The specified shuffle logic module or file '{0}' cannot be found. Shuffling will not occur.".format(
inspector.cs[CONF_SHUFFLE_LOGIC]
),
"Clear specified file value?",
_clearShufflingInput,
)
)
return queries
================================================
FILE: armi/physics/fuelCycle/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/physics/fuelCycle/tests/_customFuelHandlerModule.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utilities for :mod:`armi.physics.fuelCycle.fuelHandlerFactory`."""
class MockFileFuelHandler:
"""Fuel handler used when importing from a file path."""
def __init__(self, operator):
self.operator = operator
class MockModuleFuelHandler:
"""Fuel handler used when importing from a module path."""
def __init__(self, operator):
self.operator = operator
================================================
FILE: armi/physics/fuelCycle/tests/test_assemblyRotationAlgorithms.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for tools used to rotate hex assemblies.
Notes
-----
These algorithms are defined in assemblyRotationAlgorithms.py, but they are used in:
``FuelHandler.outage()``.
"""
import copy
import enum
import math
import typing
from unittest import TestCase, mock
import numpy as np
from armi.physics.fuelCycle import assemblyRotationAlgorithms as rotAlgos
from armi.physics.fuelCycle import fuelHandlers
from armi.physics.fuelCycle.hexAssemblyFuelMgmtUtils import (
getOptimalAssemblyOrientation,
)
from armi.physics.fuelCycle.settings import CONF_ASSEM_ROTATION_STATIONARY
from armi.reactor.assemblies import HexAssembly
from armi.reactor.blocks import HexBlock
from armi.reactor.flags import Flags
from armi.reactor.tests import test_reactors
class MockFuelHandler(fuelHandlers.FuelHandler):
"""Implements the entire interface but with empty methods."""
def chooseSwaps(self, *args, **kwargs):
pass
class _PinLocations(enum.IntEnum):
"""Zero-indexed locations for specific points of interest.
If a data vector has an entry to all ``self.N_PINS=169`` pins in the test model,
then ``data[PIN_LOCATIONS.UPPER_RIGHT_VERTEX]`` will access the data for the pin
along the upper right 60 symmetry line. Since we're dealing with rotations here, it
does not need to literally be the pin at the vertex. Just along the symmetry line
to help explain tests.
The use case here is setting the pin or burnup array to be a constant value, but
using a single max or minimum value to determine rotation.
"""
CENTER = 0
UPPER_RIGHT_VERTEX = 1
UPPER_LEFT_VERTEX = 2
DUE_LEFT_VERTEX = 3
LOWER_LEFT_VERTEX = 4
LOWER_RIGHT_VERTEX = 5
DUE_RIGHT_VERTEX = 6
class ShuffleAndRotateTestHelper(TestCase):
"""Fixture class to assist in testing rotation of assemblies via the fuel handler."""
N_PINS = 169
def setUp(self):
self.o, self.r = test_reactors.loadTestReactor()
self.r.core.locateAllAssemblies()
@staticmethod
def ensureBlockHasSpatialGrid(b: HexBlock):
"""If ``b`` does not have a spatial grid, auto create one."""
if b.spatialGrid is None:
b.getPinPitch = mock.Mock(return_value=1.1)
b.autoCreateSpatialGrids()
def setAssemblyPinBurnups(self, a: HexAssembly, burnups: np.ndarray):
"""Prepare the assembly that will be shuffled and rotated."""
peakBu = burnups.max()
for b in a.getChildrenWithFlags(Flags.FUEL):
self.ensureBlockHasSpatialGrid(b)
b.p.percentBuPeak = peakBu
for c in b.getChildrenWithFlags(Flags.FUEL):
c.p.pinPercentBu = burnups
def setAssemblyPinPowers(self, a: HexAssembly, pinPowers: np.ndarray):
"""Prep the assembly that existed at the site a shuffled assembly will occupy."""
for b in a.getChildrenWithFlags(Flags.FUEL):
self.ensureBlockHasSpatialGrid(b)
b.p.linPowByPin = pinPowers
def powerWithMinValue(self, minIndex: int) -> np.ndarray:
"""Create a vector of pin powers with a minimum value at a given index."""
data = np.ones(self.N_PINS)
data[minIndex] = 0
return data
def burnupWithMaxValue(self, maxIndex: int) -> np.ndarray:
"""Create a vector of pin burnups with a maximum value at a given index."""
data = np.zeros(self.N_PINS)
data[maxIndex] = 50
return data
def compareMockedToExpectedRotation(self, nRotations: int, mRotate: mock.Mock, msg: typing.Optional[str] = None):
"""Helper function to check the mocked rotate and compare against expected rotation."""
expectedRadians = nRotations * math.pi / 3
(actualRadians,) = mRotate.call_args.args
self.assertAlmostEqual(actualRadians, expectedRadians, msg=msg)
class TestOptimalAssemblyRotation(ShuffleAndRotateTestHelper):
"""Test the burnup dependent assembly rotation methods."""
def setUp(self):
super().setUp()
self.assembly: HexAssembly = self.r.core.getFirstAssembly(Flags.FUEL)
def test_flatPowerNoRotation(self):
"""If all pin powers are identical, no rotation is suggested."""
burnups = self.burnupWithMaxValue(_PinLocations.UPPER_LEFT_VERTEX)
powers = np.ones_like(burnups)
self.setAssemblyPinBurnups(self.assembly, burnups)
self.setAssemblyPinPowers(self.assembly, powers)
rot = getOptimalAssemblyOrientation(self.assembly, self.assembly)
self.assertEqual(rot, 0)
def test_maxBurnupAtCenterNoRotation(self):
"""If max burnup pin is at the center, no rotation is suggested."""
burnups = self.burnupWithMaxValue(_PinLocations.CENTER)
powers = np.zeros_like(burnups)
self.setAssemblyPinBurnups(self.assembly, burnups)
self.setAssemblyPinPowers(self.assembly, powers)
rot = getOptimalAssemblyOrientation(self.assembly, self.assembly)
self.assertEqual(rot, 0)
def test_oppositeRotation(self):
"""Test a 180 degree rotation is suggested when the max burnup pin is opposite the lowest power pin.
Use the second ring of the hexagon because it's easier to write out pin locations
and check work.
.. test:: Test the burnup equalizing rotation algorithm.
:id: T_ARMI_ROTATE_HEX_BURNUP
:tests: R_ARMI_ROTATE_HEX_BURNUP
:acceptance_criteria: After rotating a hexagonal assembly, confirm the pin with the highest burnup is
in the same sector as pin with the lowest power in the high burnup pin's ring.
Notes
-----
Use zero-indexed pin location not pin ID to assign burnups and powers. Since
we have a single component, ``Block.p.linPowByPin[i] <-> Component.p.pinPercentBu[i]``
"""
shuffledAssembly = self.assembly
previousAssembly = copy.deepcopy(shuffledAssembly)
pairs = (
(_PinLocations.DUE_RIGHT_VERTEX, _PinLocations.DUE_LEFT_VERTEX),
(_PinLocations.UPPER_LEFT_VERTEX, _PinLocations.LOWER_RIGHT_VERTEX),
(_PinLocations.UPPER_RIGHT_VERTEX, _PinLocations.LOWER_LEFT_VERTEX),
(_PinLocations.DUE_LEFT_VERTEX, _PinLocations.DUE_RIGHT_VERTEX),
(_PinLocations.LOWER_RIGHT_VERTEX, _PinLocations.UPPER_LEFT_VERTEX),
(_PinLocations.LOWER_LEFT_VERTEX, _PinLocations.UPPER_RIGHT_VERTEX),
)
for startPin, oppositePin in pairs:
powers = self.powerWithMinValue(oppositePin)
burnups = self.burnupWithMaxValue(startPin)
self.setAssemblyPinBurnups(shuffledAssembly, burnups)
self.setAssemblyPinPowers(previousAssembly, powers)
rot = getOptimalAssemblyOrientation(shuffledAssembly, previousAssembly)
# 180 degrees is three 60 degree rotations
self.assertEqual(rot, 3, msg=f"{startPin=} :: {oppositePin=}")
def test_noBlocksWithBurnup(self):
"""Require at least one block to have burnup."""
with self.assertRaisesRegex(ValueError, "Error finding max burnup"):
getOptimalAssemblyOrientation(self.assembly, self.assembly)
def test_mismatchPinPowersAndLocations(self):
"""Require pin powers and locations to be have the same length."""
powers = np.arange(self.N_PINS + 1)
burnups = np.arange(self.N_PINS)
self.setAssemblyPinBurnups(self.assembly, burnups)
self.setAssemblyPinPowers(self.assembly, powers)
with self.assertRaisesRegex(ValueError, "Inconsistent pin powers and number of pins"):
getOptimalAssemblyOrientation(self.assembly, self.assembly)
class TestFuelHandlerMgmtTools(ShuffleAndRotateTestHelper):
def test_buRotationWithFreshFeed(self):
"""Test that rotation works if a new assembly is swapped with fresh fuel.
Fresh feed assemblies will not exist in the reactor, and various checks that
try to the "previous" assembly's location can fail.
"""
newSettings = {
"fluxRecon": True,
"assemblyRotationAlgorithm": "buReducingAssemblyRotation",
}
self.o.cs = self.o.cs.modified(newSettings=newSettings)
fresh = self.r.core.createFreshFeed(self.o.cs)
self.assertEqual(fresh.lastLocationLabel, HexAssembly.LOAD_QUEUE)
fh = MockFuelHandler(self.o)
fh.chooseSwaps = mock.Mock(side_effect=lambda _: fh.moved.append(fresh))
with mock.patch(
"armi.physics.fuelCycle.assemblyRotationAlgorithms.getOptimalAssemblyOrientation",
) as p:
fh.outage()
# The only moved assembly was most recently outside the core so we have no need to rotate
# Make sure our fake chooseSwaps added the fresh assembly to the moved assemblies
fh.chooseSwaps.assert_called_once()
p.assert_not_called()
def test_buRotationWithStationaryRotation(self):
"""Test that the burnup equalizing rotation algorithm works on non-shuffled assemblies."""
newSettings = {
CONF_ASSEM_ROTATION_STATIONARY: True,
"fluxRecon": True,
"assemblyRotationAlgorithm": "buReducingAssemblyRotation",
}
self.o.cs = self.o.cs.modified(newSettings=newSettings)
# Grab two assemblies that were not moved. One of which will have the detailed information
# needed for rotation
detailedAssem, coarseAssem = self.o.r.core.getChildrenWithFlags(Flags.FUEL)[:2]
self.setAssemblyPinBurnups(detailedAssem, burnups=np.arange(self.N_PINS))
self.setAssemblyPinPowers(detailedAssem, pinPowers=np.arange(self.N_PINS))
detailedAssem.rotate = mock.Mock()
coarseAssem.rotate = mock.Mock()
fh = MockFuelHandler(self.o)
with mock.patch(
"armi.physics.fuelCycle.assemblyRotationAlgorithms.getOptimalAssemblyOrientation",
return_value=5,
) as p:
fh.outage()
p.assert_called_once_with(detailedAssem, detailedAssem)
# Assembly with detailed pin powers and pin burnups will be rotated
detailedAssem.rotate.assert_called_once()
self.compareMockedToExpectedRotation(5, detailedAssem.rotate)
# Assembly without pin level data will not be rotated
coarseAssem.rotate.assert_not_called()
def test_rotateInShuffleQueue(self):
"""Test for expected behavior when multiple assemblies are shuffled and rotated in one outage.
Examine the behavior of three assemblies: ``first -> second -> third``
1. ``first`` is moved to the location of ``second`` and rotated by comparing
``first`` burnup against ``second`` pin powers.
2. ``second`` is moved to the location of ``third`` and rotated by comparing
``second`` burnup against ``third`` pin powers.
where:
* ``first`` burnup is maximized in the upper left direction.
* ``second`` pin power is minimized along the lower left direction.
* ``second`` burnup is maximized in the upper right direction.
* ``third`` pin power is minimized in the direct right direction.
We should expect:
1. ``first`` is rotated from upper left to lower left => two 60 degree CCW rotations.
2. ``second`` is rotated from upper right to direct right => five 60 degree CCW rotations.
"""
newSettings = {
CONF_ASSEM_ROTATION_STATIONARY: False,
"fluxRecon": True,
"assemblyRotationAlgorithm": "buReducingAssemblyRotation",
}
self.o.cs = self.o.cs.modified(newSettings=newSettings)
first, second, third = self.r.core.getChildrenWithFlags(Flags.FUEL)[:3]
firstBurnups = self.burnupWithMaxValue(_PinLocations.UPPER_LEFT_VERTEX)
self.setAssemblyPinBurnups(first, firstBurnups)
secondPowers = self.powerWithMinValue(_PinLocations.LOWER_LEFT_VERTEX)
self.setAssemblyPinPowers(second, pinPowers=secondPowers)
secondBurnups = self.burnupWithMaxValue(_PinLocations.UPPER_RIGHT_VERTEX)
self.setAssemblyPinBurnups(second, burnups=secondBurnups)
thirdPowers = self.powerWithMinValue(_PinLocations.DUE_RIGHT_VERTEX)
self.setAssemblyPinPowers(third, thirdPowers)
# Set the shuffling sequence
# first -> second
# second -> third
second.lastLocationLabel = first.getLocation()
third.lastLocationLabel = second.getLocation()
first.rotate = mock.Mock()
second.rotate = mock.Mock()
third.rotate = mock.Mock()
fh = MockFuelHandler(self.o)
fh.chooseSwaps = mock.Mock(side_effect=lambda _: fh.moved.extend([second, third]))
fh.outage()
first.rotate.assert_called_once()
self.compareMockedToExpectedRotation(2, first.rotate, "First")
second.rotate.assert_called_once()
self.compareMockedToExpectedRotation(5, second.rotate, "Second")
third.rotate.assert_not_called()
class SimpleRotationTests(ShuffleAndRotateTestHelper):
"""Test the simple rotation where assemblies are rotated a fixed amount."""
def test_simpleAssemblyRotation(self):
"""Test rotating assemblies 120 degrees with two rotation events."""
fh = fuelHandlers.FuelHandler(self.o)
newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True}
self.o.cs = self.o.cs.modified(newSettings=newSettings)
hist = self.o.getInterface("history")
assems = hist.o.r.core.getAssemblies(Flags.FUEL)[:5]
# add some detailed assemblies
for a in assems:
hist.detailAssemblyNames.append(a.getName())
b = self.o.r.core.getFirstBlock(Flags.FUEL)
rotNum = b.getRotationNum()
rotAlgos.simpleAssemblyRotation(fh)
rotAlgos.simpleAssemblyRotation(fh)
self.assertEqual(b.getRotationNum(), rotNum + 2)
================================================
FILE: armi/physics/fuelCycle/tests/test_fuelHandlerFactory.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for :mod:`armi.physics.fuelCycle.fuelHandlerFactory`."""
import unittest
from pathlib import Path
from armi.physics.fuelCycle import fuelHandlerFactory
from armi.physics.fuelCycle.settings import CONF_FUEL_HANDLER_NAME, CONF_SHUFFLE_LOGIC
from armi.physics.fuelCycle.tests import _customFuelHandlerModule
class _DummySettings(dict):
"""Minimal stand-in for :class:`armi.settings.Settings`."""
class _DummyOperator:
"""Operator stub that only exposes the settings object."""
def __init__(self, settings):
self.cs = settings
class FuelHandlerFactoryTests(unittest.TestCase):
"""Exercise the custom module import logic."""
def setUp(self):
self.inputDirectory = Path(__file__).resolve().parents[3]
self.settings = _DummySettings()
self.settings.inputDirectory = str(self.inputDirectory)
self.operator = _DummyOperator(self.settings)
def test_filePath(self):
"""Custom handlers can still be loaded from explicit file paths."""
modulePath = Path(__file__).resolve().with_name("_customFuelHandlerModule.py")
self.settings.update(
{
CONF_FUEL_HANDLER_NAME: "MockFileFuelHandler",
CONF_SHUFFLE_LOGIC: str(modulePath),
}
)
handler = fuelHandlerFactory.fuelHandlerFactory(self.operator)
self.assertEqual(handler.__class__.__name__, "MockFileFuelHandler")
def test_modulePath(self):
"""Module-style paths are imported using :mod:`importlib`."""
moduleName = "armi.physics.fuelCycle.tests._customFuelHandlerModule"
self.settings.update(
{
CONF_FUEL_HANDLER_NAME: "MockModuleFuelHandler",
CONF_SHUFFLE_LOGIC: moduleName,
}
)
handler = fuelHandlerFactory.fuelHandlerFactory(self.operator)
self.assertIsInstance(handler, _customFuelHandlerModule.MockModuleFuelHandler)
================================================
FILE: armi/physics/fuelCycle/tests/test_fuelHandlers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests some capabilities of the fuel handling machine.
This test is high enough level that it requires input files to be present. The ones to use
are called armiRun.yaml which is located in armi.tests
"""
import collections
import copy
import os
import tempfile
import unittest
from unittest.mock import PropertyMock, patch
import numpy as np
from armi.physics.fuelCycle import fuelHandlers, settings
from armi.physics.fuelCycle.fuelHandlers import AssemblyMove
from armi.physics.fuelCycle.settings import (
CONF_ASSEM_ROTATION_STATIONARY,
CONF_ASSEMBLY_ROTATION_ALG,
CONF_PLOT_SHUFFLE_ARROWS,
CONF_RUN_LATTICE_BEFORE_SHUFFLING,
CONF_SHUFFLE_SEQUENCE_FILE,
)
from armi.physics.neutronics.crossSectionGroupManager import CrossSectionGroupManager
from armi.physics.neutronics.latticePhysics.latticePhysicsInterface import (
LatticePhysicsInterface,
)
from armi.reactor import assemblies, blocks, components, grids
from armi.reactor.flags import Flags
from armi.reactor.parameters import ParamLocation
from armi.reactor.tests import test_reactors
from armi.reactor.zones import Zone
from armi.settings import caseSettings
from armi.settings.fwSettings.globalSettings import CONF_TRACK_ASSEMS
from armi.testing import TESTING_ROOT
from armi.tests import TEST_ROOT, ArmiTestHelper, mockRunLogs
from armi.utils import directoryChangers
from armi.utils.customExceptions import InputError
class TestReadMovesYamlErrors(unittest.TestCase):
"""Ensure malformed YAML inputs raise informative ``InputError``."""
def _run(self, text):
with tempfile.NamedTemporaryFile("w", suffix=".yaml", delete=False) as tf:
tf.write(text)
fname = tf.name
try:
fuelHandlers.FuelHandler.readMovesYaml(fname)
finally:
os.remove(fname)
def test_missingSequence(self):
yaml_text = "foo: []\n"
with self.assertRaisesRegex(InputError, "sequence"):
self._run(yaml_text)
def test_duplicateCycle(self):
yaml_text = "sequence:\n 1: []\n 1: []\n"
with self.assertRaisesRegex(InputError, r"(?i)\bduplicate key\b"):
self._run(yaml_text)
def test_unknownActionKey(self):
yaml_text = "sequence:\n 1:\n - badAction: []\n"
with self.assertRaisesRegex(InputError, "Unknown action"):
self._run(yaml_text)
def test_badCascade(self):
cases = [
("sequence:\n 1:\n - cascade: ['only']\n", "cascade"),
("sequence:\n 1:\n - cascade: ['outer fuel', 1]\n", "cascade"),
]
for yaml_text, msg in cases:
with self.subTest(yaml_text=yaml_text):
with self.assertRaisesRegex(InputError, msg):
self._run(yaml_text)
def test_badSwap(self):
yaml_text = "sequence:\n 1:\n - swap: ['009-045']\n"
with self.assertRaisesRegex(InputError, "swap"):
self._run(yaml_text)
def test_badFuelEnrichment(self):
cases = [
(
"""sequence:\n 1:\n - cascade: ['outer fuel', '009-045']\n fuelEnrichment: ['a']\n""",
"fuelEnrichment",
),
(
"""sequence:\n 1:\n - cascade: ['outer fuel', '009-045']\n fuelEnrichment: [-1]\n""",
"fuelEnrichment",
),
(
"""sequence:\n 1:\n - cascade: ['outer fuel', '009-045']\n fuelEnrichment: [101]\n""",
"fuelEnrichment",
),
]
for yaml_text, msg in cases:
with self.subTest(yaml_text=yaml_text):
with self.assertRaisesRegex(InputError, msg):
self._run(yaml_text)
def test_rotationInvalidLocation(self):
yaml_text = "sequence:\n 1:\n - extraRotations: {'badLoc': 30}\n"
with self.assertRaisesRegex(InputError, "Invalid location"):
self._run(yaml_text)
def test_duplicateCascadeLocation(self):
yaml_text = (
"sequence:\n 1:\n - cascade: ['outer', '009-045', '008-001']\n"
" - cascade: ['outer', '009-045', '007-002']\n"
)
with self.assertRaisesRegex(InputError, "009-045"):
self._run(yaml_text)
def test_invalidCascadeLocation(self):
yaml_text = "sequence:\n 1:\n - cascade: ['outer', 'badLoc']\n"
with self.assertRaisesRegex(InputError, "Invalid location"):
self._run(yaml_text)
def test_missingCycle(self):
yaml_text = "sequence:\n 1: []\n 3: []\n"
with self.assertRaisesRegex(InputError, "Missing cycle 2"):
self._run(yaml_text)
class TestReadMovesYamlFeatures(unittest.TestCase):
"""Miscellaneous behavior of :meth:`FuelHandler.readMovesYaml`."""
def _read(self, text):
with tempfile.NamedTemporaryFile("w", suffix=".yaml", delete=False) as tf:
tf.write(text)
fname = tf.name
try:
moves, _ = fuelHandlers.FuelHandler.readMovesYaml(fname)
return moves
finally:
os.remove(fname)
def test_cyclesOutOfOrder(self):
yaml_text = "sequence:\n 1: []\n 2: []\n 4: []\n 3: []\n"
moves = self._read(yaml_text)
self.assertEqual(list(moves), [1, 2, 4, 3])
class FuelHandlerTestHelper(ArmiTestHelper):
@classmethod
def setUpClass(cls):
# prepare the input files. This is important so the unit tests run from wherever
# they need to run from.
cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT, dumpOnException=False)
cls.directoryChanger.open()
@classmethod
def tearDownClass(cls):
cls.directoryChanger.close()
def setUp(self):
"""
Build a dummy reactor without using input files.
There are some igniters and feeds but none of these have any number densities.
"""
self.o, self.r = test_reactors.loadTestReactor(
self.directoryChanger.destination,
customSettings={"nCycles": 4, "trackAssems": True},
)
allBlocks = self.r.core.getBlocks()
fakeBu = 30.0 / len(allBlocks)
for bi, b in enumerate(allBlocks):
b.p.flux = 5e10
if b.isFuel():
b.p.percentBu = fakeBu * bi
self.nfeed = len(self.r.core.getAssemblies(Flags.FEED))
self.nigniter = len(self.r.core.getAssemblies(Flags.IGNITER))
self.nSfp = len(self.r.excore["sfp"])
# generate a reactor with assemblies
# generate components with materials
nPins = 271
fuelDims = {"Tinput": 273.0, "Thot": 273.0, "od": 1.0, "id": 0.0, "mult": nPins}
fuel = components.Circle("fuel", "UZr", **fuelDims)
cladDims = {"Tinput": 273.0, "Thot": 273.0, "od": 1.1, "id": 1.0, "mult": nPins}
clad = components.Circle("clad", "HT9", **cladDims)
interDims = {
"Tinput": 273.0,
"Thot": 273.0,
"op": 16.8,
"ip": 16.0,
"mult": 1.0,
}
interSodium = components.Hexagon("interCoolant", "Sodium", **interDims)
# generate a block
self.block = blocks.HexBlock("TestHexBlock")
self.block.setType("fuel")
self.block.setHeight(10.0)
self.block.add(fuel)
self.block.add(clad)
self.block.add(interSodium)
# generate an assembly
self.assembly = assemblies.HexAssembly("TestAssemblyType")
self.assembly.spatialGrid = grids.AxialGrid.fromNCells(1)
for _ in range(1):
self.assembly.add(copy.deepcopy(self.block))
# copy the assembly to make a list of assemblies and have a reference assembly
self.aList = []
for _ in range(6):
self.aList.append(copy.deepcopy(self.assembly))
self.refAssembly = copy.deepcopy(self.assembly)
self.directoryChanger.open()
self.r.core.locateAllAssemblies()
def tearDown(self):
# clean up the test
self.block = None
self.assembly = None
self.aList = None
self.refAssembly = None
self.r = None
self.o = None
self.directoryChanger.close()
class MockLatticePhysicsInterface(LatticePhysicsInterface):
"""A mock lattice physics interface that does nothing for interactBOC."""
name = "MockLatticePhysicsInterface"
def _getExecutablePath(self):
return "/mock/"
def interactBOC(self, cycle=None):
pass
class MockXSGM(CrossSectionGroupManager):
"""A mock cross section group manager that does nothing for interactBOC."""
def interactBOC(self, cycle=None):
pass
class TestFuelHandler(FuelHandlerTestHelper):
@patch("armi.reactor.assemblies.Assembly.getSymmetryFactor")
def test_getParamMax(self, mockGetSymmetry):
a = self.assembly
mockGetSymmetry.return_value = 1
expectedValue = 0.5
a.p["kInf"] = expectedValue
for b in a:
b.p["kInf"] = expectedValue
with patch(
"armi.reactor.parameters.parameterDefinitions.Parameter.location", new_callable=PropertyMock
) as mock_assemblyParameterLocation:
mock_assemblyParameterLocation.return_value = ParamLocation.VOLUME_INTEGRATED
# symmetry factor == 1
res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", True)
self.assertEqual(res, expectedValue)
res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", False)
self.assertEqual(res, expectedValue)
# symmetry factor == 3
mockGetSymmetry.return_value = 3
res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", True)
self.assertAlmostEqual(res, expectedValue * 3)
res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", False)
self.assertAlmostEqual(res, expectedValue * 3)
# not volume integrated and symmetry factor == 3
mock_assemblyParameterLocation.return_value = ParamLocation.AVERAGE
res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", True)
self.assertEqual(res, expectedValue)
res = fuelHandlers.FuelHandler._getParamMax(a, "kInf", False)
self.assertEqual(res, expectedValue)
def test_interactBOC(self):
# set up mock interface
self.o.addInterface(MockLatticePhysicsInterface(self.r, self.o.cs))
self.o.removeInterface(interfaceName="xsGroups")
self.o.addInterface(MockXSGM(self.r, self.o.cs))
# adjust case settings
self.o.cs[CONF_RUN_LATTICE_BEFORE_SHUFFLING] = True
# run fhi.interactBOC
fhi = self.o.getInterface("fuelHandler")
with mockRunLogs.BufferLog() as mock:
fhi.interactBOC()
self.assertIn("lattice physics before fuel management due to the", mock._outputStream)
def test_findHighBu(self):
loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(5, 4)
a = self.r.core.childrenByLocator[loc]
# set burnup way over 1.0, which is otherwise the highest bu in the core
a[0].p.percentBu = 50
fh = fuelHandlers.FuelHandler(self.o)
a1 = fh.findAssembly(param="percentBu", compareTo=100, blockLevelMax=True, typeSpec=None)
self.assertIs(a, a1)
@patch("armi.physics.fuelCycle.fuelHandlers.FuelHandler.chooseSwaps")
def test_outage(self, mockChooseSwaps):
# mock up a fuel handler
fh = fuelHandlers.FuelHandler(self.o)
mockChooseSwaps.return_value = list(self.r.core.getAssemblies())
# edge case: cannot perform two outages on the same FuelHandler
fh.moved = [self.r.core.getFirstAssembly()]
with self.assertRaises(ValueError):
fh.outage(factor=1.0)
# edge case: fail if the shuffle file is missing
fh.moved = []
self.o.cs = self.o.cs.modified(newSettings={"explicitRepeatShuffles": "fakePath"})
with self.assertRaises(RuntimeError):
fh.outage(factor=1.0)
# a successful run
fh.moved = []
self.o.cs = self.o.cs.modified(
newSettings={
"explicitRepeatShuffles": "",
"fluxRecon": True,
CONF_ASSEMBLY_ROTATION_ALG: "simpleAssemblyRotation",
}
)
fh.outage(factor=1.0)
self.assertEqual(len(fh.moved), 0)
def test_outageEdgeCase(self):
"""Check that an error is raised if the list of moved assemblies is invalid."""
class MockFH(fuelHandlers.FuelHandler):
def chooseSwaps(self, factor=1.0):
self.moved = [None]
# mock up a fuel handler
fh = MockFH(self.o)
# test edge case
with self.assertRaises(AttributeError):
fh.outage(factor=1.0)
def test_isAssemblyInAZone(self):
# build a fuel handler
fh = fuelHandlers.FuelHandler(self.o)
# test the default value if there are no zones
a = self.r.core.getFirstAssembly()
self.assertTrue(fh.isAssemblyInAZone(None, a))
# If our assembly isn't in one of the supplied zones
z = Zone("test_isAssemblyInAZone")
self.assertFalse(fh.isAssemblyInAZone([z], a))
# If our assembly IS in one of the supplied zones
z.addLoc(a.getLocation())
self.assertTrue(fh.isAssemblyInAZone([z], a))
def test_width(self):
"""Tests the width capability of findAssembly."""
fh = fuelHandlers.FuelHandler(self.o)
assemsByRing = collections.defaultdict(list)
for a in self.r.core:
assemsByRing[a.spatialLocator.getRingPos()[0]].append(a)
# instantiate reactor power. more power in more outer rings
for ring, power in zip(range(1, 8), range(10, 80, 10)):
aList = assemsByRing[ring]
for a in aList:
sf = a.getSymmetryFactor() # center assembly is only 1/3rd in the core
for b in a:
b.p.power = power / sf
paramName = "power"
# 1 ring outer and inner from ring 3
a = fh.findAssembly(
targetRing=3,
width=(1, 0),
param=paramName,
blockLevelMax=True,
compareTo=100,
)
ring = a.spatialLocator.getRingPos()[0]
self.assertEqual(
ring,
4,
"The highest power ring returned is {0}. It should be {1}".format(ring, 4),
)
a = fh.findAssembly(targetRing=3, width=(1, 0), param=paramName, blockLevelMax=True, compareTo=0)
ring = a.spatialLocator.getRingPos()[0]
self.assertEqual(
ring,
2,
"The lowest power ring returned is {0}. It should be {1}".format(ring, 2),
)
# 2 rings outer from ring 3
a = fh.findAssembly(
targetRing=3,
width=(2, 1),
param=paramName,
blockLevelMax=True,
compareTo=100,
)
ring = a.spatialLocator.getRingPos()[0]
self.assertEqual(
ring,
5,
"The highest power ring returned is {0}. It should be {1}".format(ring, 5),
)
a = fh.findAssembly(targetRing=3, width=(2, 1), param=paramName, blockLevelMax=True, compareTo=0)
ring = a.spatialLocator.getRingPos()[0]
self.assertEqual(
ring,
3,
"The lowest power ring returned is {0}. It should be {1}".format(ring, 3),
)
# 2 rings inner from ring 3
a = fh.findAssembly(
targetRing=3,
width=(2, -1),
param=paramName,
blockLevelMax=True,
compareTo=100,
)
ring = a.spatialLocator.getRingPos()[0]
self.assertEqual(
ring,
3,
"The highest power ring returned is {0}. It should be {1}".format(ring, 3),
)
a = fh.findAssembly(
targetRing=3,
width=(2, -1),
param=paramName,
blockLevelMax=True,
compareTo=0,
)
ring = a.spatialLocator.getRingPos()[0]
self.assertEqual(
ring,
1,
"The lowest power ring returned is {0}. It should be {1}".format(ring, 1),
)
def test_findMany(self):
"""Tests the ``findMany`` and type aspects of the fuel handler."""
fh = fuelHandlers.FuelHandler(self.o)
igniters = fh.findAssembly(typeSpec=Flags.IGNITER | Flags.FUEL, findMany=True)
feeds = fh.findAssembly(typeSpec=Flags.FEED | Flags.FUEL, findMany=True)
fewFeeds = fh.findAssembly(typeSpec=Flags.FEED | Flags.FUEL, findMany=True, maxNumAssems=4)
self.assertEqual(
len(igniters),
self.nigniter,
"Found {0} igniters. Should have found {1}".format(len(igniters), self.nigniter),
)
self.assertEqual(
len(feeds),
self.nfeed,
"Found {0} feeds. Should have found {1}".format(len(igniters), self.nfeed),
)
self.assertEqual(
len(fewFeeds),
4,
"Reduced findMany returned {0} assemblies instead of {1}".format(len(fewFeeds), 4),
)
def test_findInSFP(self):
"""Tests ability to pull from the spent fuel pool."""
fh = fuelHandlers.FuelHandler(self.o)
spent = fh.findAssembly(
findMany=True,
findFromSfp=True,
param="percentBu",
compareTo=100,
blockLevelMax=True,
)
self.assertEqual(
len(spent),
self.nSfp,
"Found {0} assems in SFP. Should have found {1}".format(len(spent), self.nSfp),
)
burnups = [a.getMaxParam("percentBu") for a in spent]
bu = spent[0].getMaxParam("percentBu")
self.assertEqual(
bu,
max(burnups),
"First assembly does not have the highest burnup ({0}). It has ({1})".format(max(burnups), bu),
)
def test_findByCoords(self):
fh = fuelHandlers.FuelHandler(self.o)
assem = fh.findAssembly(coords=(0, 0))
self.o.r.core.sortAssemsByRing()
self.assertIs(assem, self.o.r.core[0])
def test_findWithMinMax(self):
"""Test the complex min/max comparators."""
fh = fuelHandlers.FuelHandler(self.o)
assem = fh.findAssembly(
param="percentBu",
compareTo=100,
blockLevelMax=True,
minParam="percentBu",
minVal=("percentBu", 0.1),
maxParam="percentBu",
maxVal=20.0,
)
# the burnup should be the maximum bu within
# up to a burnup of 20%, which by the simple
# dummy data layout should be the 2/3rd block in the blocklist
lastB = None
for b in self.r.core.iterBlocks(Flags.FUEL):
if b.p.percentBu > 20:
break
lastB = b
expected = lastB.parent
self.assertIs(assem, expected)
# test the impossible: an block with burnup less than 110% of its own burnup
assem = fh.findAssembly(
param="percentBu",
compareTo=100,
blockLevelMax=True,
minParam="percentBu",
minVal=("percentBu", 1.1),
)
self.assertIsNone(assem)
def runShuffling(self, fh):
"""Shuffle fuel and write out a SHUFFLES.txt file."""
fh.attachReactor(self.o, self.r)
# so we don't overwrite the version-controlled armiRun-SHUFFLES.txt
self.o.cs.caseTitle = "armiRun2"
fh.interactBOL()
# expected assembly position history based on shuffling specification of this test.
# do not blindly rebase these reference values. test failures using this dict
# imply that the assembly shuffling definition has changed.
expPosHist = {}
# cycle 1 shuffle, (2, 1) moved to SFP
expPosHist["A0005"] = [(2, 1), ("SFP", "SFP"), ("SFP", "SFP"), ("SFP", "SFP")]
# cycle 1 shuffle, (3, 3) moved to (2, 1) in cascade
# cycle 3 shuffle, (2, 1) moved to (5, 4)
expPosHist["A0018"] = [(3, 3), (2, 1), (2, 1), (5, 4)]
# cycle 1 shuffle, (4, 2) moved to (3, 3) in cascade
expPosHist["A0019"] = [(4, 2), (3, 3), (3, 3), (3, 3)]
# cycle 1 shuffle, (5, 1) moved to (4, 2) in cascade
expPosHist["A0020"] = [(5, 1), (4, 2), (4, 2), (4, 2)]
# cycle 1 shuffle, (6, 7) moved to (5, 1) in cascade
expPosHist["A0044"] = [(6, 7), (5, 1), (5, 1), (5, 1)]
# cycle 1 shuffle, fresh to (6, 7)
# cycle 3 shuffle, (6, 7) moved to (5, 2) in cascade
expPosHist["A0077"] = [("NotCreatedYet", "NotCreatedYet"), (6, 7), (6, 7), (5, 2)]
# cycle 2 shuffle, (2, 2) moved to (5, 3)
expPosHist["A0009"] = [(2, 2), (2, 2), (5, 3), (5, 3)]
# cycle 2 shuffle, (3, 2) moved to (2, 2) in cascade
expPosHist["A0014"] = [(3, 2), (3, 2), (2, 2), (2, 2)]
# cycle 2 shuffle, (4, 1) moved to (3, 2) in cascade
expPosHist["A0015"] = [(4, 1), (4, 1), (3, 2), (3, 2)]
# cycle 2 shuffle, (5, 4) moved to (4, 1) in cascade
expPosHist["A0034"] = [(5, 4), (5, 4), (4, 1), (4, 1)]
# cycle 2 shuffle, (6, 4) moved to (5, 4) in cascade then discharged to SFP
expPosHist["A0040"] = [(6, 4), (6, 4), (5, 4), ("SFP", "SFP")]
# cycle 2 shuffle, fresh to (6, 4)
expPosHist["A0078"] = [("NotCreatedYet", "NotCreatedYet"), ("NotCreatedYet", "NotCreatedYet"), (6, 4), (6, 4)]
# cycle 1 shuffle, (5, 3) moved to SFP
expPosHist["A0029"] = [(5, 3), (5, 3), ("SFP", "SFP"), ("SFP", "SFP")]
# cycle 3 shuffle, (3, 1) moved to (2, 1) in cascade
expPosHist["A0010"] = [(3, 1), (3, 1), (3, 1), (2, 1)]
# cycle 3 shuffle, (4, 3) moved to (3, 1) in cascade
expPosHist["A0024"] = [(4, 3), (4, 3), (4, 3), (3, 1)]
# cycle 3 shuffle, (5, 2) moved to (4, 3) in cascade
expPosHist["A0025"] = [(5, 2), (5, 2), (5, 2), (4, 3)]
# cycle 3 shuffle, fresh to (6, 7)
expPosHist["A0079"] = [
("NotCreatedYet", "NotCreatedYet"),
("NotCreatedYet", "NotCreatedYet"),
("NotCreatedYet", "NotCreatedYet"),
(6, 7),
]
for cycle in range(4):
self.r.p.cycle = cycle
fh.cycle = cycle
fh.manageFuel(cycle)
for a in self.r.excore["sfp"]:
self.assertEqual(a.getLocation(), "SFP")
for b in self.r.core.iterBlocks(Flags.FUEL):
self.assertGreater(b.p.kgHM, 0.0, "b.p.kgHM not populated!")
self.assertGreater(b.p.kgFis, 0.0, "b.p.kgFis not populated!")
# check assemblies in core
for a in self.r.core:
self._checkAssemblyPositionHistory(a, expPosHist)
# check assemblies in SFP
for a in list(self.r.excore["sfp"]):
self._checkAssemblyPositionHistory(a, expPosHist)
# check getter methods based on assembly location history
for aName, posList in expPosHist.items():
for i, rp in enumerate(posList):
if rp[0] is not None and rp[0] not in assemblies.Assembly.NOT_IN_CORE:
r, p = rp
self.assertEqual(self.r.core.getAssemblyWithRingPosHist(r, p, i).getName(), aName)
fh.interactEOL()
def _checkAssemblyPositionHistory(self, a, answerKey):
if a.getName() not in answerKey: # check that location history is the same position
self.assertEqual(len(set(a.p.ringPosHist)), 1)
else:
self.assertListEqual(a.p.ringPosHist, answerKey[a.getName()])
def test_repeatShuffles(self):
"""Loads the ARMI test reactor with a custom shuffle logic file and shuffles assemblies twice.
.. test:: Execute user-defined shuffle operations based on a reactor model.
:id: T_ARMI_SHUFFLE
:tests: R_ARMI_SHUFFLE
Notes
-----
The custom shuffle logic is executed by
:py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel` in
:py:meth:`armi.physics.fuelCycle.tests.test_fuelHandlers.TestFuelHandler.runShuffling`. There are two primary
assertions: spent fuel pool assemblies are in the correct location and the assemblies were shuffled into their
correct locations. This process is repeated twice to ensure repeatability.
"""
# check labels before shuffling:
for a in self.r.excore["sfp"]:
self.assertEqual(a.getLocation(), "SFP")
# do some shuffles
fh = self.o.getInterface("fuelHandler")
self.runShuffling(fh) # changes caseTitle
# Make sure the generated shuffles file matches the tracked one. This will need to be updated if/when more
# assemblies are added to the test reactor but must be done carefully. Do not blindly rebaseline this file.
self.compareFilesLineByLine(
os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.txt"), "armiRun2-SHUFFLES.txt"
)
# store locations of each assembly
firstPassResults = {}
for a in self.r.core:
firstPassResults[a.getLocation()] = a.getName()
self.assertNotIn(a.getLocation(), a.NOT_IN_CORE)
# reset core to BOL state
# reset assembly counter to get the same assem nums.
self.setUp()
newSettings = {CONF_PLOT_SHUFFLE_ARROWS: True}
# now repeat shuffles
newSettings["explicitRepeatShuffles"] = os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.txt")
self.o.cs = self.o.cs.modified(newSettings=newSettings)
fh = self.o.getInterface("fuelHandler")
self.runShuffling(fh)
# make sure the shuffle was repeated perfectly
for a in self.r.core:
self.assertEqual(a.getName(), firstPassResults[a.getLocation()])
for a in self.r.excore["sfp"]:
self.assertEqual(a.getLocation(), "SFP")
# Do some cleanup, since the fuelHandler Interface has code that gets around the TempDirectoryChanger
os.remove("armiRun2-SHUFFLES.txt")
os.remove("armiRun2.shuffles_0.png")
os.remove("armiRun2.shuffles_1.png")
os.remove("armiRun2.shuffles_2.png")
os.remove("armiRun2.shuffles_3.png")
def test_readMoves(self):
"""
Depends on the ``shuffleLogic`` created by ``repeatShuffles``.
See Also
--------
runShuffling : creates the shuffling file to be read in.
"""
numblocks = len(self.r.core.getFirstAssembly())
fh = fuelHandlers.FuelHandler(self.o)
moves = fh.readMoves(os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.txt"))
self.assertEqual(len(moves), 4)
firstMove = moves[1][0]
self.assertEqual(firstMove.fromLoc, "002-001")
self.assertEqual(firstMove.toLoc, "SFP")
self.assertEqual(len(firstMove.enrichList), numblocks)
self.assertEqual(firstMove.assemType, "igniter fuel")
self.assertIsNone(firstMove.ringPosCycle)
# check the move to the SFP
sfpMove = moves[2][-1]
self.assertEqual(sfpMove.fromLoc, "005-003")
self.assertEqual(sfpMove.toLoc, "SFP")
self.assertIsNone(sfpMove.ringPosCycle)
# make sure we fail hard if the file doesn't exist
with self.assertRaises(RuntimeError):
fh.readMoves("totall_fictional_file.txt")
def test_readMovesYaml(self):
fh = fuelHandlers.FuelHandler(self.o)
moves, swaps = fh.readMovesYaml(os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.yaml"))
self.maxDiff = None
expected = {
1: [
AssemblyMove("LoadQueue", "009-045", [0.0, 0.12, 0.14, 0.15, 0.0], "igniter fuel"),
AssemblyMove("009-045", "008-004"),
AssemblyMove("008-004", "007-001"),
AssemblyMove("007-001", "006-005"),
AssemblyMove("006-005", "Delete"),
AssemblyMove("009-045", "009-045", rotation=60.0),
AssemblyMove("LoadQueue", "004-004", [0.0, 0.12, 0.14, 0.15, 0.0], "middle fuel"),
AssemblyMove("004-004", "005-005"),
AssemblyMove("005-005", "006-006"),
AssemblyMove("006-006", "Delete"),
],
2: [
AssemblyMove("LoadQueue", "009-045", [0.0, 0.12, 0.14, 0.15, 0.0], "igniter fuel"),
AssemblyMove("009-045", "008-004"),
AssemblyMove("008-004", "007-001"),
AssemblyMove("007-001", "006-005"),
AssemblyMove("006-005", "Delete"),
AssemblyMove("LoadQueue", "004-004", [0.0, 0.12, 0.14, 0.15, 0.0], "middle fuel"),
AssemblyMove("004-004", "005-005"),
AssemblyMove("005-005", "006-006"),
AssemblyMove("006-006", "Delete"),
AssemblyMove("009-045", "009-045", rotation=60.0),
AssemblyMove("SFP", "005-003", ringPosCycle=[6, 5, 0]),
AssemblyMove("005-003", "SFP"),
],
3: [
AssemblyMove("LoadQueue", "009-045", [0.0, 0.12, 0.14, 0.15, 0.0], "igniter fuel"),
AssemblyMove("009-045", "008-004"),
AssemblyMove("008-004", "007-001"),
AssemblyMove("007-001", "006-005"),
AssemblyMove("006-005", "Delete"),
AssemblyMove("SFP", "002-002", ringPosCycle=[5, 3, 1]),
AssemblyMove("002-002", "SFP"),
],
}
self.assertEqual(moves, expected)
self.assertEqual(swaps, {3: [("009-045", "008-004"), ("007-001", "006-005")]})
def test_performShuffleYamlIntegration(self):
fh = fuelHandlers.FuelHandler(self.o)
yaml_text = """
sequence:
1:
- swap: ["009-045", "008-004"]
- cascade: ["igniter fuel", "009-045", "008-004", "007-001", "006-005"]
fuelEnrichment: [0, 0.12, 0.14, 0.15, 0]
- extraRotations: {"009-045": 60}
"""
with tempfile.NamedTemporaryFile("w", suffix=".yaml", delete=False) as tf:
tf.write(yaml_text)
fname = tf.name
try:
locs = ["009-045", "008-004", "007-001", "006-005"]
before = {loc: self.r.core.getAssemblyWithStringLocation(loc).getName() for loc in locs}
self.r.p.cycle = 1
self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname, CONF_TRACK_ASSEMS: False})
self.r.core._trackAssems = False
fh.outage()
fresh = self.r.core.getAssemblyWithStringLocation("008-004")
self.assertEqual(fresh.getType(), "igniter fuel")
self.assertNotIn(fresh.getName(), before.values())
rotated = self.r.core.getAssemblyWithStringLocation("009-045")
self.assertEqual(rotated.getName(), before["009-045"])
self.assertAlmostEqual(rotated.p.orientation[2], 60.0)
self.assertEqual(
self.r.core.getAssemblyWithStringLocation("007-001").getName(),
before["008-004"],
)
self.assertEqual(
self.r.core.getAssemblyWithStringLocation("006-005").getName(),
before["007-001"],
)
self.assertIsNone(self.r.excore["sfp"].getAssembly(before["006-005"]))
finally:
os.remove(fname)
def test_yamlSfpOverridesTrackAssems(self):
fh = fuelHandlers.FuelHandler(self.o)
yaml_text = """
sequence:
1:
- cascade: ["igniter fuel", "009-045", "SFP"]
fuelEnrichment: [0, 0.12, 0.14, 0.15, 0]
"""
with tempfile.NamedTemporaryFile("w", suffix=".yaml", delete=False) as tf:
tf.write(yaml_text)
fname = tf.name
try:
before = self.r.core.getAssemblyWithStringLocation("009-045").getName()
self.r.p.cycle = 1
self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname, CONF_TRACK_ASSEMS: False})
self.r.core._trackAssems = False
fh.outage()
self.assertFalse(self.r.core._trackAssems)
self.assertIsNotNone(self.r.excore["sfp"].getAssembly(before))
finally:
os.remove(fname)
def test_readMovesYaml_loadFromSfp(self):
assem = self.r.excore["sfp"].getChildren()[0]
# fake the assembly location history
assem.p.ringPosHist = [(2, 3), (4, 5), (5, 7)]
yaml_text = """
sequence:
1:
- cascade: ["SFP", "005-003", "SFP"]
ringPosCycle: [5, 7, 2]
"""
with directoryChangers.TemporaryDirectoryChanger():
fname = "moves.yaml"
with open(fname, "w", encoding="utf-8") as stream:
stream.write(yaml_text)
moves, _ = fuelHandlers.FuelHandler.readMovesYaml(fname)
expected = {
1: [
AssemblyMove("SFP", "005-003", [], None, [5, 7, 2]),
AssemblyMove("005-003", "SFP"),
]
}
self.assertEqual(moves, expected)
def test_performShuffleYaml_loadFromSfp(self):
fh = fuelHandlers.FuelHandler(self.o)
sfpAssem = self.r.excore["sfp"].getChildren()[0]
# fake the assembly location history
ringPosHistInts = [(2, 3), (4, 5), (5, 7)]
sfpAssem.p.ringPosHist = [(str(x).encode(), str(y).encode()) for x, y in ringPosHistInts]
yaml_text = """
sequence:
1:
- cascade: ["SFP", "009-045", "SFP"]
ringPosCycle: [5, 7, 2]
"""
with directoryChangers.TemporaryDirectoryChanger():
fname = "moves.yaml"
with open(fname, "w", encoding="utf-8") as stream:
stream.write(yaml_text)
before = self.r.core.getAssemblyWithStringLocation("009-045").getName()
self.r.p.cycle = 1
self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname})
fh.outage()
assem = self.r.core.getAssemblyWithStringLocation("009-045")
self.assertEqual(assem.getName(), sfpAssem.getName())
cycle0Loc = ("2".encode(), "3".encode())
self.assertEqual(assem.p.ringPosHist[0], cycle0Loc)
self.assertEqual(assem.p.ringPosHist[1], (9, 45))
self.assertEqual(len(assem.p.ringPosHist), 2) # truncated by logic in fuelHandlers
newSfpAssem = self.r.excore["sfp"].getAssembly(before)
self.assertIsNotNone(newSfpAssem)
self.assertEqual(newSfpAssem.p.ringPosHist[0], (9, 45))
def test_performShuffleYaml_loadFromSfp2(self):
fh = fuelHandlers.FuelHandler(self.o)
fname = os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.yaml")
self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname})
# fake the assembly location history
with directoryChangers.TemporaryDirectoryChanger():
# _moves, _ = fh.readMovesYaml()
before1 = self.r.core.getAssemblyWithStringLocation("005-003")
before2 = self.r.core.getAssemblyWithStringLocation("006-005")
for cycle in range(4):
self.r.p.cycle = cycle
fh.outage()
# check that the following ringPosHist exist in the SFP
inSfp = [
[6, 6, 0],
[6, 6, 1],
[6, 5, 1],
[6, 5, 2],
[2, 2, 2],
]
for a in self.r.excore["sfp"].getChildren():
print(a, a.p.ringPosHist)
for ring, pos, cycle in inSfp:
found = False
for a in self.r.excore["sfp"].getChildren():
if a.p.ringPosHist[cycle] == (ring, pos):
found = True
break
self.assertTrue(found, f"ringPosHist == ({ring}, {pos}, {cycle}) not found in SFP!")
# check that SFP is in the ringPosHist of (2, 2) and (5, 3)
# check that the assembly that ended up in 002-002 is the same that started in 005-003
# check that the assembly that ended up in 005-003 is the same that started in 006-005
for loc, refA in [
("002-002", before1),
("005-003", before2),
]:
a = self.r.core.getAssemblyWithStringLocation(loc)
self.assertIn(("SFP", "SFP"), a.p.ringPosHist)
self.assertEqual(
refA.getName(), a.getName(), "Expected {a} to be the same assembly as {refA} based on shuffling!"
)
def test_processMoveList(self):
fh = fuelHandlers.FuelHandler(self.o)
moves = fh.readMoves(os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.txt"))
result = fh.processMoveList(moves[2])
self.assertIn(None, result.ringPosCycles)
self.assertTrue(all("SFP" not in chain for chain in result.loadChains))
self.assertTrue(all("LoadQueue" not in chain for chain in result.loadChains))
self.assertFalse(result.loopChains)
self.assertFalse(result.rotations)
def test_processMoveList_yaml(self):
fh = fuelHandlers.FuelHandler(self.o)
moves, _ = fh.readMovesYaml(os.path.join(TESTING_ROOT, "resources", "armiRun-SHUFFLES.yaml"))
result = fh.processMoveList(moves[1])
self.assertEqual(len(result.loadChains), 2)
self.assertTrue(any(result.enriches))
self.assertTrue(result.rotations)
def test_getFactorList(self):
fh = fuelHandlers.FuelHandler(self.o)
factors, _ = fh.getFactorList(0)
self.assertIn("eqShuffles", factors)
def test_linPowByPin(self):
_fh = fuelHandlers.FuelHandler(self.o)
_hist = self.o.getInterface("history")
newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True}
self.o.cs = self.o.cs.modified(newSettings=newSettings)
assem = self.o.r.core.getFirstAssembly(Flags.FUEL)
b = next(assem.iterBlocks(Flags.FUEL))
b.p.linPowByPin = [1, 2, 3]
self.assertEqual(type(b.p.linPowByPin), np.ndarray)
b.p.linPowByPin = np.array([1, 2, 3])
self.assertEqual(type(b.p.linPowByPin), np.ndarray)
def test_linPowByPinNeutron(self):
_fh = fuelHandlers.FuelHandler(self.o)
_hist = self.o.getInterface("history")
newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True}
self.o.cs = self.o.cs.modified(newSettings=newSettings)
assem = self.o.r.core.getFirstAssembly(Flags.FUEL)
b = next(assem.iterBlocks(Flags.FUEL))
b.p.linPowByPinNeutron = [1, 2, 3]
self.assertEqual(type(b.p.linPowByPinNeutron), np.ndarray)
b.p.linPowByPinNeutron = np.array([1, 2, 3])
self.assertEqual(type(b.p.linPowByPinNeutron), np.ndarray)
def test_linPowByPinGamma(self):
_fh = fuelHandlers.FuelHandler(self.o)
_hist = self.o.getInterface("history")
newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True}
self.o.cs = self.o.cs.modified(newSettings=newSettings)
assem = self.o.r.core.getFirstAssembly(Flags.FUEL)
b = next(assem.iterBlocks(Flags.FUEL))
b.p.linPowByPinGamma = [1, 2, 3]
self.assertEqual(type(b.p.linPowByPinGamma), np.ndarray)
b.p.linPowByPinGamma = np.array([1, 2, 3])
self.assertEqual(type(b.p.linPowByPinGamma), np.ndarray)
def test_transferStationaryBlocks(self):
"""Test the _transferStationaryBlocks method.
.. test:: User-specified blocks can remain in place during shuffling
:id: T_ARMI_SHUFFLE_STATIONARY0
:tests: R_ARMI_SHUFFLE_STATIONARY
"""
# grab stationary block flags
sBFList = self.r.core.stationaryBlockFlagsList
# grab the assemblies
assems = self.r.core.getAssemblies(Flags.FUEL)
# grab two arbitrary assemblies
a1 = assems[1]
a2 = assems[2]
# grab the stationary blocks pre swap
a1PreSwapStationaryBlocks = [
[block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList)
]
a2PreSwapStationaryBlocks = [
[block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList)
]
# swap the stationary blocks
fh = fuelHandlers.FuelHandler(self.o)
fh._transferStationaryBlocks(a1, a2)
# grab the stationary blocks post swap
a1PostSwapStationaryBlocks = [
[block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList)
]
a2PostSwapStationaryBlocks = [
[block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList)
]
# validate the stationary blocks have swapped locations and are aligned
self.assertEqual(a1PostSwapStationaryBlocks, a2PreSwapStationaryBlocks)
self.assertEqual(a2PostSwapStationaryBlocks, a1PreSwapStationaryBlocks)
def test_transStatBlocksBadNumbers(self):
"""
Test the _transferStationaryBlocks method for the case where the input assemblies have different numbers of
stationary blocks.
"""
# grab stationary block flags
sBFList = self.r.core.stationaryBlockFlagsList
# grab the assemblies
assems = self.r.core.getAssemblies(Flags.FUEL)
# grab two arbitrary assemblies
a1 = assems[1]
a2 = assems[2]
# change a block in assembly 1 to be flagged as a stationary block
for block in a1:
if not any(block.hasFlags(sbf) for sbf in sBFList):
a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, sBFList[0])
self.assertTrue(any(block.hasFlags(sbf) for sbf in sBFList))
break
# try to swap stationary blocks between assembly 1 and 2
fh = fuelHandlers.FuelHandler(self.o)
with self.assertRaises(ValueError):
fh._transferStationaryBlocks(a1, a2)
def test_transStatBlockUnaligned(self):
"""
Test the _transferStationaryBlocks method for the case where the input assemblies have unaligned locations of
stationary blocks.
"""
# grab stationary block flags
sBFList = self.r.core.stationaryBlockFlagsList
# grab the assemblies
assems = self.r.core.getAssemblies(Flags.FUEL)
# grab two arbitrary assemblies
a1 = assems[1]
a2 = assems[2]
# move location of a stationary flag in assembly 1
for block in a1:
if any(block.hasFlags(sbf) for sbf in sBFList):
# change flag of first identified stationary block to fuel
a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, Flags.FUEL)
self.assertTrue(a1[block.spatialLocator.k].hasFlags(Flags.FUEL))
# change next or previous block flag to stationary flag
try:
a1[block.spatialLocator.k + 1].setType(a1[block.spatialLocator.k + 1].p.type, sBFList[0])
self.assertTrue(any(a1[block.spatialLocator.k + 1].hasFlags(sbf) for sbf in sBFList))
except Exception:
a1[block.spatialLocator.k - 1].setType(a1[block.spatialLocator.k - 1].p.type, sBFList[0])
self.assertTrue(any(a1[block.spatialLocator.k - 1].hasFlags(sbf) for sbf in sBFList))
break
# try to swap stationary blocks between assembly 1 and 2
fh = fuelHandlers.FuelHandler(self.o)
with self.assertRaises(ValueError):
fh._transferStationaryBlocks(a1, a2)
def test_transStatBlockBadHeights(self):
"""
Test the _transferStationaryBlocks method for the case where the total height of the stationary blocks is
unequal between input assemblies.
"""
# grab stationary block flags
sBFList = self.r.core.stationaryBlockFlagsList
# grab the assemblies
assems = self.r.core.getAssemblies(Flags.FUEL)
# grab two arbitrary assemblies
a1 = assems[1]
a2 = assems[2]
# change height of a stationary block in assembly 1
for block in a1:
if any(block.hasFlags(sbf) for sbf in sBFList):
# change height of first identified stationary block
nomHeight = block.getHeight()
a1[block.spatialLocator.k].setHeight(nomHeight - 1e-5)
# try to swap stationary blocks between assembly 1 and 2
fh = fuelHandlers.FuelHandler(self.o)
with mockRunLogs.BufferLog() as mock:
fh._transferStationaryBlocks(a1, a2)
self.assertIn("top elevation of stationary", mock.getStdout())
def test_dischargeSwap(self):
"""Remove an assembly from the core and replace it with one from the SFP.
.. test:: User-specified blocks can remain in place during shuffling
:id: T_ARMI_SHUFFLE_STATIONARY1
:tests: R_ARMI_SHUFFLE_STATIONARY
"""
# grab stationary block flags
sBFList = self.r.core.stationaryBlockFlagsList
# grab an arbitrary fuel assembly from the core and from the SFP
a1 = self.r.core.getFirstAssembly(Flags.FUEL)
a2 = self.r.excore["sfp"].getChildrenWithFlags(Flags.FUEL)[0]
# grab the stationary blocks pre swap
a1PreSwapStationaryBlocks = [
[block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList)
]
a2PreSwapStationaryBlocks = [
[block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList)
]
# test discharging assembly 1 and replacing with assembly 2
fh = fuelHandlers.FuelHandler(self.o)
fh.dischargeSwap(a2, a1)
self.assertTrue(a1.getLocation() in a1.NOT_IN_CORE)
self.assertTrue(a2.getLocation() not in a2.NOT_IN_CORE)
# grab the stationary blocks post swap
a1PostSwapStationaryBlocks = [
[block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList)
]
a2PostSwapStationaryBlocks = [
[block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList)
]
# validate the stationary blocks have swapped locations correctly and are aligned
self.assertEqual(a1PostSwapStationaryBlocks, a2PreSwapStationaryBlocks)
self.assertEqual(a2PostSwapStationaryBlocks, a1PreSwapStationaryBlocks)
def test_dischargeSwapStationaryBlocks(self):
"""
Test the _transferStationaryBlocks method for the case where the input assemblies have
different numbers as well as unaligned locations of stationary blocks.
"""
# grab stationary block flags
sBFList = self.r.core.stationaryBlockFlagsList
# grab an arbitrary fuel assembly from the core and from the SFP
a1 = self.r.core.getFirstAssembly(Flags.FUEL)
a2 = self.r.excore["sfp"].getChildren(Flags.FUEL)[0]
# change a block in assembly 1 to be flagged as a stationary block
for block in a1:
if not any(block.hasFlags(sbf) for sbf in sBFList):
a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, sBFList[0])
self.assertTrue(any(block.hasFlags(sbf) for sbf in sBFList))
break
# try to discharge assembly 1 and replace with assembly 2
fh = fuelHandlers.FuelHandler(self.o)
with self.assertRaises(ValueError):
fh.dischargeSwap(a2, a1)
# re-initialize assemblies
self.setUp()
a1 = self.r.core.getFirstAssembly(Flags.FUEL)
a2 = self.r.excore["sfp"].getChildren(Flags.FUEL)[0]
# move location of a stationary flag in assembly 1
for block in a1:
if any(block.hasFlags(sbf) for sbf in sBFList):
# change flag of first identified stationary block to fuel
a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, Flags.FUEL)
self.assertTrue(a1[block.spatialLocator.k].hasFlags(Flags.FUEL))
# change next or previous block flag to stationary flag
try:
a1[block.spatialLocator.k + 1].setType(a1[block.spatialLocator.k + 1].p.type, sBFList[0])
self.assertTrue(any(a1[block.spatialLocator.k + 1].hasFlags(sbf) for sbf in sBFList))
except Exception:
a1[block.spatialLocator.k - 1].setType(a1[block.spatialLocator.k - 1].p.type, sBFList[0])
self.assertTrue(any(a1[block.spatialLocator.k - 1].hasFlags(sbf) for sbf in sBFList))
break
# try to discharge assembly 1 and replace with assembly 2
with self.assertRaises(ValueError):
fh.dischargeSwap(a2, a1)
def test_getAssembliesInRings(self):
fh = fuelHandlers.FuelHandler(self.o)
aList0 = fh._getAssembliesInRings([0], Flags.FUEL, False, None, False)
self.assertEqual(len(aList0), 1)
aList1 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, False, None, False)
self.assertEqual(len(aList1), 3)
aList2 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, True, None, False)
self.assertEqual(len(aList2), 3)
aList3 = fh._getAssembliesInRings([0, 1, 2, "SFP"], Flags.FUEL, True, None, False)
self.assertEqual(len(aList3), 4)
aList4 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, False, None, True)
self.assertEqual(len(aList4), 3)
class TestFuelPlugin(unittest.TestCase):
"""Tests that make sure the plugin is being discovered well."""
def test_settingsAreDiscovered(self):
cs = caseSettings.Settings()
nm = settings.CONF_JUMP_RING_NUM
self.assertEqual(cs[nm], 8)
================================================
FILE: armi/physics/fuelCycle/tests/test_hexAssemblyFuelMgmtUtils.py
================================================
# Copyright 2022 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests some fuel handling tools, specific to hex-assembly reactors."""
from armi.physics.fuelCycle import hexAssemblyFuelMgmtUtils as hexUtils
from armi.tests import ArmiTestHelper
from armi.utils import directoryChangers
class TestHexAssemMgmtTools(ArmiTestHelper):
def setUp(self):
self.td = directoryChangers.TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
def test_buildConvergentRingSchedule(self):
schedule, widths = hexUtils.buildConvergentRingSchedule(1, 17, 0)
self.assertEqual(schedule, [1, 17])
self.assertEqual(widths, [16, 1])
schedule, widths = hexUtils.buildConvergentRingSchedule(3, 17, 1)
self.assertEqual(schedule, [3, 17])
self.assertEqual(widths, [14, 1])
schedule, widths = hexUtils.buildConvergentRingSchedule(12, 16, 0.5)
self.assertEqual(schedule, [12, 16])
self.assertEqual(widths, [4, 1])
def test_buildRingSchedule(self):
# simple divergent
schedule, widths = hexUtils.buildRingSchedule(9, 1, 9)
self.assertEqual(schedule, [9, 8, 7, 6, 5, 4, 3, 2, 1])
zeroWidths = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.assertEqual(widths, zeroWidths)
# simple with no jumps
schedule, widths = hexUtils.buildRingSchedule(9, 9, 1, jumpRingTo=1)
self.assertEqual(schedule, [1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(widths, zeroWidths)
# simple with 1 jump
schedule, widths = hexUtils.buildRingSchedule(9, 9, 1, jumpRingFrom=6)
self.assertEqual(schedule, [5, 4, 3, 2, 1, 6, 7, 8, 9])
self.assertEqual(widths, zeroWidths)
# 1 jump plus auto-correction to core size
schedule, widths = hexUtils.buildRingSchedule(9, 1, 17, jumpRingFrom=5)
self.assertEqual(schedule, [6, 7, 8, 9, 5, 4, 3, 2, 1])
self.assertEqual(widths, zeroWidths)
# crash on invalid jumpring
with self.assertRaises(ValueError):
schedule, widths = hexUtils.buildRingSchedule(9, 1, 17, jumpRingFrom=0)
# test 4: Mid way jumping
schedule, widths = hexUtils.buildRingSchedule(9, 1, 9, jumpRingTo=6, jumpRingFrom=3)
self.assertEqual(schedule, [9, 8, 7, 4, 5, 6, 3, 2, 1])
self.assertEqual(widths, zeroWidths)
================================================
FILE: armi/physics/fuelCycle/tests/test_utils.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from unittest import TestCase
import numpy as np
from armi.physics.fuelCycle import utils
from armi.reactor.blocks import Block
from armi.reactor.components import Circle
from armi.reactor.flags import Flags
from armi.reactor.grids import IndexLocation, MultiIndexLocation
class FuelCycleUtilsTests(TestCase):
"""Tests for geometry indifferent fuel cycle routines."""
N_PINS = 169
def setUp(self):
self.block = Block("test block")
self.fuel = Circle(
"test pin",
material="UO2",
Tinput=20,
Thot=20,
mult=self.N_PINS,
id=0.0,
od=1.0,
)
clad = Circle(
"clad",
material="HT9",
Tinput=20,
Thot=300,
id=1.0,
od=1.1,
)
self.block.add(self.fuel)
self.block.add(clad)
# Force no fuel flags
self.fuel.p.flags = Flags.PIN
def test_maxBurnupLocationFromComponents(self):
"""Test that the ``Component.p.pinPercentBu`` parameter can reveal max burnup location."""
self.fuel.spatialLocator = MultiIndexLocation(None)
locations = []
for i in range(self.N_PINS):
loc = IndexLocation(i, 0, 0, None)
self.fuel.spatialLocator.append(loc)
locations.append(loc)
self.fuel.p.pinPercentBu = np.ones(self.N_PINS, dtype=float)
# Pick an arbitrary index for the pin with the most burnup
maxBuIndex = self.N_PINS // 3
self.fuel.p.pinPercentBu[maxBuIndex] *= 2
expectedLoc = locations[maxBuIndex]
actual = utils.maxBurnupLocator(self.block)
self.assertEqual(actual, expectedLoc)
def test_singleLocatorWithBurnup(self):
"""Test that a single component with burnup can be used to find the highest burnup."""
freeComp = Circle("free fuel", material="UO2", Tinput=200, Thot=200, id=0, od=1, mult=1)
freeComp.spatialLocator = IndexLocation(2, 4, 0, None)
freeComp.p.pinPercentBu = [
0.01,
]
loc = utils.maxBurnupLocator([freeComp])
self.assertIs(loc, freeComp.spatialLocator)
def test_maxBurnupLocatorWithNoBurnup(self):
"""Ensure we catch an error if no burnup is found across components."""
with self.assertRaisesRegex(ValueError, "No burnups found"):
utils.maxBurnupLocator([])
def test_maxBurnupLocatorMismatchedData(self):
"""Ensure pin burnup and locations must agree."""
freeComp = Circle("free fuel", material="UO2", Tinput=200, Thot=200, id=0, od=1, mult=1)
freeComp.spatialLocator = IndexLocation(2, 4, 0, None)
freeComp.p.pinPercentBu = [
0.01,
0.02,
]
with self.assertRaisesRegex(ValueError, "Pin burnup.*pin locations.*differ"):
utils.maxBurnupLocator([freeComp])
def test_assemblyHasPinPower(self):
"""Test the ability to check if an assembly has fuel pin powers."""
fakeAssem = [self.block]
# No fuel blocks, no pin power on blocks => no pin powers
self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))
# Yes fuel blocks, no pin power on blocks => no pin powers
self.block.p.flags |= Flags.FUEL
self.assertFalse(utils.assemblyHasFuelPinPowers(fakeAssem))
# Yes fuel blocks, yes pin power on blocks => yes pin powers
self.block.p.linPowByPin = np.arange(self.N_PINS, dtype=float)
self.assertTrue(utils.assemblyHasFuelPinPowers(fakeAssem))
# Yes fuel blocks, yes pin power assigned but all zeros => no pin powers
self.block.p.linPowByPin = np.zeros(self.N_PINS, dtype=float)
self.assertFalse(utils.assemblyHasFuelPinPowers(fakeAssem))
def test_assemblyHasPinBurnups(self):
"""Test the ability to check if an assembly has fuel pin burnup."""
fakeAssem = [self.block]
# No fuel components => no assembly burnups
self.assertFalse(self.block.getChildrenWithFlags(Flags.FUEL))
self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))
# No fuel with burnup => no assembly burnups
self.block.p.flags |= Flags.FUEL
self.fuel.p.flags |= Flags.FUEL
self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))
# Fuel pin has burnup => yes assembly burnup
self.fuel.p.pinPercentBu = np.arange(self.N_PINS, dtype=float)
self.assertTrue(utils.assemblyHasFuelPinBurnup(fakeAssem))
# Fuel pin has empty burnup => no assembly burnup
self.fuel.p.pinPercentBu = np.zeros(self.N_PINS)
self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))
# Yes burnup but no fuel flags => no assembly burnup
self.fuel.p.flags ^= Flags.FUEL
self.assertFalse(self.fuel.hasFlags(Flags.FUEL))
self.fuel.p.pinPercentBu = np.arange(self.N_PINS, dtype=float)
self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))
def test_maxBurnupBlock(self):
"""Test the ability to find maximum burnup block in an assembly."""
reflector = Block("reflector")
assem = [reflector, self.block]
self.block.p.percentBuPeak = 40
expected = utils.maxBurnupBlock(assem)
self.assertIs(expected, self.block)
# add a new block with more burnup higher up the stack
hotter = copy.deepcopy(self.block)
hotter.p.percentBuPeak *= 2
expected = utils.maxBurnupBlock([reflector, self.block, hotter, self.block, reflector])
self.assertIs(expected, hotter)
def test_maxBurnupBlockNoBlocks(self):
"""Ensure a more helpful error is provided for empty sequence."""
with self.assertRaisesRegex(ValueError, "Error finding max burnup"):
utils.maxBurnupBlock([])
def test_maxBurnupBlockNoBurnup(self):
"""Ensure that we will not return a block with zero burnup."""
self.block.p.percentBuPeak = 0.0
with self.assertRaisesRegex(ValueError, "Error finding max burnup"):
utils.maxBurnupBlock([self.block])
================================================
FILE: armi/physics/fuelCycle/utils.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geometric agnostic routines that are useful for fuel cycle analysis on pin-type reactors."""
import operator
import typing
import numpy as np
from armi import runLog
from armi.reactor.flags import Flags
from armi.reactor.grids import IndexLocation, MultiIndexLocation
if typing.TYPE_CHECKING:
from armi.reactor.blocks import Block
from armi.reactor.components import Component
def assemblyHasFuelPinPowers(a: typing.Iterable["Block"]) -> bool:
"""Determine if an assembly has pin powers.
These are necessary for determining rotation and may or may
not be present on all assemblies.
Parameters
----------
a : Assembly
Assembly in question
Returns
-------
bool
If at least one fuel block in the assembly has pin powers.
"""
# Avoid using Assembly.getChildrenWithFlags(Flags.FUEL)
# because that creates an entire list where we may just need the first
# fuel block
fuelBlocks = filter(lambda b: b.hasFlags(Flags.FUEL), a)
return any(b.hasFlags(Flags.FUEL) and np.any(b.p.linPowByPin) for b in fuelBlocks)
def assemblyHasFuelPinBurnup(a: typing.Iterable["Block"]) -> bool:
"""Determine if an assembly has pin burnups.
These are necessary for determining rotation and may or may not
be present on all assemblies.
Parameters
----------
a : Assembly
Assembly in question
Returns
-------
bool
If a block with pin burnup was found.
Notes
-----
Checks if any `Component.p.pinPercentBu`` is set and contains non-zero data
on a fuel component in the block.
"""
# Avoid using Assembly.getChildrenWithFlags(Flags.FUEL)
# because that creates an entire list where we may just need the first
# fuel block. Same for avoiding Block.getChildrenWithFlags.
hasFuelFlags = lambda o: o.hasFlags(Flags.FUEL)
for b in filter(hasFuelFlags, a):
for c in filter(hasFuelFlags, b):
if np.any(c.p.pinPercentBu):
return True
return False
def maxBurnupLocator(
children: typing.Iterable["Component"],
) -> IndexLocation:
"""Find the location of the pin with highest burnup by looking at components.
Parameters
----------
children : iterable[Component]
Iterator over children with a spatial locator and ``pinPercentBu`` parameter
Returns
-------
IndexLocation
Location of the pin with the highest burnup.
Raises
------
ValueError
If no children have burnup, or the burnup and locators differ.
"""
maxBu = 0
maxLocation = None
withBurnupAndLocs = filter(
lambda c: c.spatialLocator is not None and c.p.pinPercentBu is not None,
children,
)
for child in withBurnupAndLocs:
pinBu = child.p.pinPercentBu
if isinstance(child.spatialLocator, MultiIndexLocation):
locations = child.spatialLocator
else:
locations = [child.spatialLocator]
if len(locations) != pinBu.size:
raise ValueError(
f"Pin burnup (n={len(locations)}) and pin locations (n={pinBu.size}) "
f"on {child} differ: {locations=} :: {pinBu=}"
)
myMaxIX = pinBu.argmax()
myMaxBu = pinBu[myMaxIX]
if myMaxBu > maxBu:
maxBu = myMaxBu
maxLocation = locations[myMaxIX]
if maxLocation is not None:
return maxLocation
raise ValueError("No burnups found!")
def maxBurnupBlock(a: typing.Iterable["Block"]) -> "Block":
"""Find the block that contains the pin with the highest burnup."""
buGetter = operator.attrgetter("p.percentBuPeak")
# Discard any blocks with zero burnup
blocksWithBurnup = filter(buGetter, a)
try:
return max(blocksWithBurnup, key=buGetter)
except Exception as ee:
msg = f"Error finding max burnup block from {a}"
runLog.error(msg)
raise ValueError(msg) from ee
================================================
FILE: armi/physics/fuelPerformance/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic fuel performance plugin package.
Fuel performance deals with addressing fuel system limits and predicting behaviors that are coupled
to other physics within the reactor. Often fuel performance models address chemical, thermal and
mechanical behaviors of the fuel system.
The following general phenomena fall into the fuel performance category of physics for solid fuel
(e.g., SFR, LWR, TRISO):
* chemical degradation on the inside of fuel cladding such as fuel-clad chemical interaction (FCCI)
* corrosion or erosion processes on the outside of the fuel cladding
* the fuel-clad mechanical interaction (FCMI) resulting in cladding stress and strain
* pressurization of the fuel pin due to released fission gases
* high temperatures of the fuel which affect material properties and feedback during accident
scenarios
Fuel performance is typically coupled with thermal analysis because the thermal conditions of the
fuel affects the performance and properties of the fuel change with temperature and burnup.
In many cases, fuel performance is coupled with neutronic analysis as well, because the fission
gases are strong neutron absorbers. In some reactors, significant composition changes during
irradiation can influence neutronics as well (e.g. sodium thermal bond being squeezed out of pins).
Finally, fuel temperatures impact the Doppler reactivity coefficient.
"""
from armi.physics.fuelPerformance.plugin import FuelPerformancePlugin # noqa: F401
================================================
FILE: armi/physics/fuelPerformance/executers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Baseline fuel performance related executers and options.
These can be subclassed in fuel performance plugins to perform
fuel performance physics calculations.
Fuel performance is described in the
:py:mod:`Fuel Performance subpackage `
"""
from armi.physics import executers
from armi.physics.fuelPerformance.settings import (
CONF_AXIAL_EXPANSION,
CONF_BOND_REMOVAL,
CONF_CLADDING_STRAIN,
CONF_CLADDING_WASTAGE,
CONF_FGR_REMOVAL,
CONF_FGYF,
CONF_FUEL_PERFORMANCE_ENGINE,
)
class FuelPerformanceOptions(executers.ExecutionOptions):
"""Options relevant to all fuel performance engines."""
def __init__(self, label=None):
executers.ExecutionOptions.__init__(self, label)
self.fuelPerformanceEngine = None
self.axialExpansion = None
self.bondRemoval = None
self.fissionGasRemoval = None
self.claddingWastage = None
self.claddingStrain = None
def fromUserSettings(self, cs):
"""Copy relevant settings values from cs into this object."""
self.fuelPerformanceEngine = cs[CONF_FUEL_PERFORMANCE_ENGINE]
self.axialExpansion = cs[CONF_AXIAL_EXPANSION]
self.bondRemoval = cs[CONF_BOND_REMOVAL]
self.fissionGasRemoval = cs[CONF_FGR_REMOVAL]
self.claddingWastage = cs[CONF_CLADDING_WASTAGE]
self.claddingStrain = cs[CONF_CLADDING_STRAIN]
self.fissionGasYieldFraction = cs[CONF_FGYF]
def fromReactor(self, reactor):
"""Load options from reactor."""
class FuelPerformanceExecuter(executers.DefaultExecuter):
"""
Prep, execute, and process a fuel performance solve.
This uses the ``DefaultExecuter`` with the hope that most
subclasses can use that run loop. As more fuel performance plugins are
built we can reconsider this hierarchy.
"""
def __init__(self, options, reactor):
executers.DefaultExecuter.__init__(self, options, reactor)
================================================
FILE: armi/physics/fuelPerformance/parameters.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameter definitions for fuel performance plugins."""
from armi.reactor import parameters
from armi.reactor.blocks import Block
from armi.reactor.parameters import ParamLocation
from armi.utils import units
def getFuelPerformanceParameterDefinitions():
"""Return ParameterDefinitionCollections for each appropriate ArmiObject."""
return {Block: _getFuelPerformanceBlockParams()}
def _getFuelPerformanceBlockParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE) as pb:
pb.defParam(
"fuelCladLocked",
units=units.UNITLESS,
default=False,
description="Boolean to indicate if the fuel is locked with the clad."
" This is used to determine the expansion constraints for the fuel during"
" thermal and/or burn-up expansion of the fuel and cladding materials.",
)
def gasReleaseFraction(self, value):
if value < 0.0 or value > 1.0:
raise ValueError(f"Cannot set a gas release fraction of {value} outside of the bounds of [0.0, 1.0]")
self._p_gasReleaseFraction = value
pb.defParam(
"gasReleaseFraction",
setter=gasReleaseFraction,
units=units.UNITLESS,
description="Fraction of generated fission gas that no longer exists in the block.",
categories=["eq cumulative shift"],
)
def bondRemoved(self, value):
if value < 0.0 or value > 1.0:
raise ValueError(f"Cannot set a bond removed of {value} outside of the bounds of [0.0, 1.0]")
self._p_bondRemoved = value
pb.defParam(
"bondRemoved",
setter=bondRemoved,
units=units.UNITLESS,
description="Fraction of thermal bond between fuel and clad that has been pushed out.",
categories=["eq cumulative shift"],
)
pb.defParam(
"cladWastage",
units=units.MICRONS,
description="Total cladding wastage from inner and outer surfaces.",
location=ParamLocation.AVERAGE,
categories=["eq cumulative shift"],
)
pb.defParam(
"totalCladStrain",
units=units.PERCENT,
description="Total diametral clad strain.",
categories=["eq cumulative shift"],
)
pb.defParam(
"axialGrowthPct",
units=units.PERCENT,
description="Axial growth percentage",
categories=["eq cumulative shift"],
)
pb.defParam(
"fpPeakFuelTemp",
units=units.DEGC,
description="Fuel performance calculated peak fuel temperature.",
location=ParamLocation.AVERAGE,
)
pb.defParam(
"fpAveFuelTemp",
units=units.DEGC,
description="Fuel performance calculated average fuel temperature.",
location=ParamLocation.AVERAGE,
)
pb.defParam(
"gasPorosity",
units=units.UNITLESS,
description="Fraction of fuel volume that is occupied by gas pores",
default=0.0,
categories=["eq cumulative shift"],
)
pb.defParam(
"liquidPorosity",
units=units.UNITLESS,
description="Fraction of fuel volume that is occupied by liquid filled pores",
default=0.0,
)
return pDefs
================================================
FILE: armi/physics/fuelPerformance/plugin.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic Fuel Performance Plugin."""
from armi import interfaces, plugins
from armi.physics.fuelPerformance import settings
ORDER = interfaces.STACK_ORDER.CROSS_SECTIONS
class FuelPerformancePlugin(plugins.ArmiPlugin):
"""Plugin for fuel performance."""
@staticmethod
@plugins.HOOKIMPL
def exposeInterfaces(cs):
"""Expose the fuel performance interfaces."""
return []
@staticmethod
@plugins.HOOKIMPL
def defineSettings():
"""Define settings for fuel performance."""
return settings.defineSettings()
@staticmethod
@plugins.HOOKIMPL
def defineSettingsValidators(inspector):
"""Define settings inspections for fuel performance."""
return settings.defineValidators(inspector)
@staticmethod
@plugins.HOOKIMPL
def defineParameters():
"""Define parameters for the plugin."""
from armi.physics.fuelPerformance import parameters
return parameters.getFuelPerformanceParameterDefinitions()
================================================
FILE: armi/physics/fuelPerformance/settings.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings related to fuel performance."""
from armi.settings import setting
from armi.settings.settingsValidation import Query
CONF_AXIAL_EXPANSION = "axialExpansion"
CONF_BOND_REMOVAL = "bondRemoval"
CONF_CLADDING_STRAIN = "claddingStrain"
CONF_CLADDING_WASTAGE = "claddingWastage"
CONF_FGR_REMOVAL = "fgRemoval"
CONF_FGYF = "fissionGasYieldFraction"
CONF_FUEL_PERFORMANCE_ENGINE = "fuelPerformanceEngine"
def defineSettings():
"""Define generic fuel performance settings."""
settings = [
setting.Setting(
CONF_FUEL_PERFORMANCE_ENGINE,
default="",
label="Fuel Performance Engine",
description=(
"Fuel performance engine that determines fission gas removal, bond removal,"
" axial growth, wastage, and cladding strain."
),
options=[""],
),
setting.Setting(
CONF_FGYF,
default=0.25,
label="Fission Gas Yield Fraction",
description=(
"The fraction of gaseous atoms produced per fission event, assuming a fission product yield of 2.0"
),
),
setting.Setting(
CONF_AXIAL_EXPANSION,
default=False,
label="Fuel Axial Expansion",
description="Perform axial fuel expansion. This will adjust fuel block lengths.",
),
setting.Setting(
CONF_BOND_REMOVAL,
default=False,
label="Thermal Bond Removal",
description="Toggles fuel performance bond removal. This will remove thermal bond from the fuel.",
),
setting.Setting(
CONF_FGR_REMOVAL,
default=False,
label="Fission Gas Removal",
description="Toggles fuel performance fission gas removal. This will remove fission gas from the fuel.",
),
setting.Setting(
CONF_CLADDING_WASTAGE,
default=False,
label="Cladding Wastage",
description="Evaluate cladding wastage. ",
),
setting.Setting(
CONF_CLADDING_STRAIN,
default=False,
label="Cladding Strain",
description="Evaluate cladding strain. ",
),
]
return settings
def defineValidators(inspector):
return [
Query(
lambda: (
inspector.cs[CONF_AXIAL_EXPANSION]
or inspector.cs[CONF_BOND_REMOVAL]
or inspector.cs[CONF_FGR_REMOVAL]
or inspector.cs[CONF_CLADDING_WASTAGE]
or inspector.cs[CONF_CLADDING_STRAIN]
)
and inspector.cs[CONF_FUEL_PERFORMANCE_ENGINE] == "",
"A fuel performance behavior has been selected but no fuel performance engine is selected.",
"",
inspector.NO_ACTION,
),
]
================================================
FILE: armi/physics/fuelPerformance/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/physics/fuelPerformance/tests/test_executers.py
================================================
# Copyright 2021 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generic fuel performance executers."""
import unittest
from armi.physics.fuelPerformance.executers import (
CONF_BOND_REMOVAL,
FuelPerformanceOptions,
)
from armi.settings.caseSettings import Settings
class TestFuelPerformanceOptions(unittest.TestCase):
def test_fuelPerformanceOptions(self):
fpo = FuelPerformanceOptions("test_fuelPerformanceOptions")
self.assertEqual(fpo.label, "test_fuelPerformanceOptions")
cs = Settings()
fpo.fromUserSettings(cs)
self.assertEqual(fpo.bondRemoval, cs[CONF_BOND_REMOVAL])
================================================
FILE: armi/physics/fuelPerformance/tests/test_fuelPerformancePlugin.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generic fuel performance plugin."""
from armi.physics.fuelPerformance.plugin import FuelPerformancePlugin
from armi.tests.test_plugins import TestPlugin
class TestFuelPerformancePlugin(TestPlugin):
plugin = FuelPerformancePlugin
================================================
FILE: armi/physics/fuelPerformance/tests/test_fuelPerformanceSymmetry.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Audit symmetry-aware parameters in fuel performance.
See Also
--------
armi.testing.symmetryTesting
"""
from armi.physics.fuelPerformance.parameters import getFuelPerformanceParameterDefinitions
from armi.reactor.blocks import Block
from armi.testing import symmetryTesting
class TestFPParamSymmetry(symmetryTesting.BasicArmiSymmetryTestHelper):
def setUp(self):
pluginParameters = getFuelPerformanceParameterDefinitions()
self.blockParamsToTest = pluginParameters[Block]
self.parameterOverrides = {
"gasReleaseFraction": 0.5,
"bondRemoved": 0.5,
}
super().setUp()
================================================
FILE: armi/physics/fuelPerformance/tests/test_fuelPerformanceUtils.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fuel performance utilities."""
import unittest
from armi.physics.fuelPerformance import utils
from armi.reactor.flags import Flags
from armi.reactor.tests import test_blocks
class TestFuelPerformanceUtils(unittest.TestCase):
def test_applyFuelDisplacement(self):
displacement = 0.01
block = test_blocks.loadTestBlock()
fuel = block.getComponent(Flags.FUEL)
originalHotODInCm = fuel.getDimension("od")
utils.applyFuelDisplacement(block, displacement)
finalHotODInCm = fuel.getDimension("od")
self.assertAlmostEqual(finalHotODInCm, originalHotODInCm + 2 * displacement)
def test_gasConductivityCorrection_morph0(self):
temp = 500 # C
porosity = 0.4
# No correction
chi = utils.gasConductivityCorrection(temp, porosity, 0)
ref = 1.0
self.assertAlmostEqual(chi, ref, 5)
def test_gasConductivityCorrection_morph1(self):
temp = 500 # C
porosity = 0.4
# Irregular Porosity, Bauer equation
chi = utils.gasConductivityCorrection(temp, porosity, 1)
ref = (1.0 - porosity) ** (1.5 * 1.00)
self.assertAlmostEqual(chi, ref, 5)
def test_gasConductivityCorrection_morph2(self):
temp = 500 # C
porosity = 0.4
# Irregular Porosity, Bauer equation
chi = utils.gasConductivityCorrection(temp, porosity, 2)
ref = (1.0 - porosity) ** (1.5 * 1.72)
self.assertAlmostEqual(chi, ref, 5)
def test_gasConductivityCorrection_morph3(self):
temp = 500 # C
porosity = 0.4
# Mixed Morphology, low temp
chi = utils.gasConductivityCorrection(temp, porosity, 3)
ref = (1.0 - porosity) ** (1.5 * 1.72)
self.assertAlmostEqual(chi, ref, 5)
# Mixed Morphology, high temp
temp = 700
chi = utils.gasConductivityCorrection(temp, porosity, 3)
ref = (1.0 - porosity) ** (1.5 * 1.00)
self.assertAlmostEqual(chi, ref, 5)
def test_gasConductivityCorrection_morph4(self):
temp = 500 # C
porosity = 0.4
# maxwell-eucken
chi = utils.gasConductivityCorrection(temp, porosity, 4)
ref = (1.0 - porosity) / (1.0 + 1.5 * porosity)
self.assertAlmostEqual(chi, ref, 5)
================================================
FILE: armi/physics/fuelPerformance/utils.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fuel performance utilities."""
from armi.reactor.flags import Flags
def applyFuelDisplacement(block, displacementInCm):
r"""
Expands the fuel radius in a pin by a number of cm.
Assumes there's thermal bond in it to displace.
This adjusts the dimension of the fuel while conserving its mass.
The bond mass is not conserved; it is assumed to be pushed up into the plenum
but the modeling of this is not done yet by this method.
.. warning:: A 0.5% buffer is included to avoid overlaps. This should be analyzed
in detail as a methodology before using in any particular analysis.
.. math::
n V = n\prime V\prime
n\prime = \frac{V}{V\prime} n
"""
clad = block.getComponent(Flags.CLAD)
fuel = block.getComponent(Flags.FUEL)
originalHotODInCm = fuel.getDimension("od")
cladID = clad.getDimension("id")
# do not swell past cladding ID! (actually leave 0.5% buffer for thermal expansion)
newHotODInCm = min(cladID * 0.995, originalHotODInCm + displacementInCm * 2)
fuel.setDimension("od", newHotODInCm, retainLink=True, cold=False)
# reduce number density of fuel to conserve number of atoms (and mass)
fuel.changeNDensByFactor(originalHotODInCm**2 / newHotODInCm**2)
def gasConductivityCorrection(tempInC: float, porosity: float, morphology: int = 2):
"""
Calculate the correction to conductivity for a porous, gas-filled solid.
Parameters
----------
tempInC
temperature in celsius
porosity
fraction of open/total volume
morphology, optional
correlation to use regarding pore morphology (default 2 is irregular
porosity for conservatism)
Returns
-------
chi : float
correction to conductivity due to porosity (should be multiplied)
Notes
-----
Morphology is treated different by different models:
0, no porosity correction
1, bauer equation, spherical porosity
2, bauer equation, irregular porosity
3, bauer equation, mixed morphology, above 660, spherical. Below 660, irregular
4, maxwell-eucken equation, beta=1.5
Source1 : In-Pile Measurement of the Thermal Conductivity of Irradiated Metallic Fuel, T.H. Bauer J.W. Holland.
Nuclear Technology, Vol. 110, 1995. Pages 407-421
Source2 : The Porosity Dependence of the Thermal Conductivity for Nuclear Fuels, G. Ondracek B. Schulz.
Journal of Nuclear Materials, Vol. 46, 1973. Pages 253-258
"""
if morphology == 0:
chi = 1.0
elif morphology == 1:
epsilon = 1.0
chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon)
elif morphology == 2:
epsilon = 1.72
chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon)
elif morphology == 3:
epsilon = 1.0
if tempInC < 660:
epsilon = 1.72
else:
epsilon = 1.00
chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon)
elif morphology == 4:
chi = (1.0 - porosity) / (1.0 + 1.5 * porosity)
return chi
================================================
FILE: armi/physics/neutronics/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The neutronics physics package in the ARMI framework.
Neutronics encompasses the modeling of nuclear chain reactions and their associated transmutation
and decay.
"""
# ruff: noqa: F401
from enum import IntEnum
from armi.physics.neutronics.const import (
ALL,
FLUXFILES,
GAMMA,
INPUTOUTPUT,
NEUTRON,
NEUTRONGAMMA,
RESTARTFILES,
)
from armi.physics.neutronics.plugin import NeutronicsPlugin
# ARC and CCCC cross section file format names
COMPXS = "COMPXS"
PMATRX = "PMATRX"
GAMISO = "GAMISO"
PMATRX_EXT = "pmatrx"
GAMISO_EXT = "gamiso"
ISOTXS = "ISOTXS"
DIF3D = "DIF3D"
# Constants for neutronics calculation types
ADJOINT_CALC = "adjoint"
REAL_CALC = "real"
ADJREAL_CALC = "both"
# Constants for boundary conditions
# All external boundary conditions are set to zero outward current
INFINITE = "Infinite"
# "Planar" external boundaries conditions are set to zero outward current
REFLECTIVE = "Reflective"
# Generalized boundary conditions D * PHI PRIME + A * PHI = 0 where A is user-specified constant,
# D is the diffusion coefficient, PHI PRIME and PHI are the outward current and flux at the
# external boundaries.
GENERAL_BC = "Generalized"
# The following boundary conditions are three approximations of the vacuum boundary condition
# in diffusion theory.
# 'Extrapolated': sets A to 0.4692 (in generalized BC) to have the flux vanishing at
# 0.7104*transport mean free path through linear extrapolation. Derived for plane
# geometries - should be valid for complex geometries unless radius of curvature is
# comparable to the mean free path.
# 'ZeroSurfaceFlux': flux vanishes at the external boundary.
# 'ZeroInwardCurrent': set A to 0.5 (in generalized BC) to have Jminus = 0 at the external boundaries.
EXTRAPOLATED = "Extrapolated"
ZEROFLUX = "ZeroSurfaceFlux"
ZERO_INWARD_CURRENT = "ZeroInwardCurrent"
# Common settings checks
def gammaTransportIsRequested(cs):
"""
Check if gamma transport was requested by the user.
Arguments
---------
cs : ARMI settings object
Object containing the default and user-specified ARMI settings controlling the simulation
Returns
-------
flag : bool
Returns true if gamma transport is requested.
"""
from armi.physics.neutronics.settings import CONF_GLOBAL_FLUX_ACTIVE
return GAMMA in cs[CONF_GLOBAL_FLUX_ACTIVE]
def gammaXsAreRequested(cs):
"""
Check if gamma cross-sections generation was requested by the user.
Arguments
---------
cs : ARMI settings object
Object containing the default and user-specified ARMI settings controlling the simulation.
Returns
-------
flag : bool
Returns true if gamma cross section generation is requested.
"""
from armi.physics.neutronics.settings import CONF_GEN_XS
return GAMMA in cs[CONF_GEN_XS]
def adjointCalculationRequested(cs):
"""Return true if an adjoint calculation is requested based on the ``CONF_NEUTRONICS_TYPE`` setting."""
from armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE
return cs[CONF_NEUTRONICS_TYPE] in [ADJOINT_CALC, ADJREAL_CALC]
def realCalculationRequested(cs):
"""Return true if a real calculation is requested based on the ``CONF_NEUTRONICS_TYPE`` type setting."""
from armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE
return cs[CONF_NEUTRONICS_TYPE] in ["real", "both"]
class LatticePhysicsFrequency(IntEnum):
"""
Enumeration for lattice physics update frequency options.
NEVER = never automatically trigger lattice physics (a custom script could still trigger it)
BOL = Beginning-of-life (c0n0)
BOC = Beginning-of-cycle (c*n0)
everyNode = Every interaction node (c*n*)
firstCoupledIteration = every node + the first coupled iteration at each node
all = every node + every coupled iteration
Notes
-----
firstCoupledIteration only updates the cross sections during the first coupled iteration, but
not on any subsequent iterations. This may be an appropriate approximation in some cases to save
compute time, but each individual user should give careful consideration to whether this is the
behavior they want for a particular application. The main purpose of this setting is to capture
a large change in temperature distribution when running a snapshot at a different power/flow
condition than the original state being loaded from the database.
"""
never = 0
BOL = 1
BOC = 2
everyNode = 3
firstCoupledIteration = 4
all = 5
================================================
FILE: armi/physics/neutronics/const.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constants and Enums.
In an independent file to minimize circular imports.
"""
CONF_CROSS_SECTION = "crossSectionControl"
#
# FAST_FLUX_THRESHOLD_EV is the energy threshold above which neutrons are considered "fast" [eV]
#
FAST_FLUX_THRESHOLD_EV = 100000.0 # eV
# CROSS SECTION LIBRARY GENERATION CONSTANTS
MAXIMUM_XS_LIBRARY_ENERGY = 1.4190675e7 # eV
ULTRA_FINE_GROUP_LETHARGY_WIDTH = 1.0 / 120.0
# LOWEST_ENERGY_EV cannot be zero due to integrating lethargy, and lethargy is undefined at 0.0
# The lowest lower boundary of many group structures such as any WIMS, SCALE or CASMO
# is 1e-5 eV, therefore it is chosen here. This number must be lower than all of the
# defined group structures. The chosen 1e-5 eV is rather arbitrary but expected to be low
# enough to support other group structures. For fast reactors, there will be
# no sensitivity at all to this value since there is no flux in this region.
LOWEST_ENERGY_EV = 1.0e-5
# Highest energy will typically depend on what physics code is being run, but this is
# a decent round number to use.
HIGH_ENERGY_EV = 1.5e07
# Particle types constants
GAMMA = "Gamma"
NEUTRON = "Neutron"
NEUTRONGAMMA = "Neutron and Gamma"
# Constants for neutronics setting controlling saving of files after neutronics calculation
# See setting 'neutronicsOutputsToSave'
ALL = "All"
RESTARTFILES = "Restart files"
INPUTOUTPUT = "Input/Output"
FLUXFILES = "Flux files"
================================================
FILE: armi/physics/neutronics/crossSectionGroupManager.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cross section group manager handles burnup-dependent properties of microscopic cross sections.
Blocks are specified to be in a certain *cross section type* and *burnup group*. Together,
these form the *cross section group*. By advancing blocks by their burnup into
different groups, we capture some of the physical effects related to depletion.
XS types are typically single capital letters like A
BU groups are also capital letters.
A XS group of AB is in XS type ``A`` and burnup group ``B``.
This module groups the blocks according to their XS groups and can determine
which block is to be deemed **representative** of an entire set of blocks in a particular xs group.
Then the representative block is sent to a lattice physics kernel for actual physics
calculations.
Generally, the cross section manager is a attribute of the lattice physics code interface
Examples
--------
csm = CrossSectionGroupManager()
csm._setBuGroupBounds(cs['buGroups'])
csm._setTempGroupBounds(cs['tempGroups']) # or empty list
csm._addXsGroupsFromBlocks(blockList)
csm.createRepresentativeBlocks()
representativeBlockList = csm.representativeBlocks.values()
blockThatRepresentsBA = csm.representativeBlocks['BA']
The class diagram is provided in `xsgm-class-diagram`_
.. _xsgm-class-diagram:
.. pyreverse:: armi.physics.neutronics.crossSectionGroupManager
:align: center
:alt: XSGM class diagram
:width: 90%
Class inheritance diagram for :py:mod:`crossSectionGroupManager`.
"""
import collections
import copy
import os
import string
import sys
import numpy as np
from armi import context, interfaces, runLog
from armi.physics.neutronics import LatticePhysicsFrequency
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.reactor import flags
from armi.reactor.components import basicShapes
from armi.reactor.flags import Flags
from armi.utils import safeCopy
from armi.utils.units import C_TO_K, TRACE_NUMBER_DENSITY
ORDER = interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.CROSS_SECTIONS
def describeInterfaces(cs):
"""Function for exposing interface(s) to other code."""
from armi.physics.neutronics.settings import CONF_NEUTRONICS_KERNEL
if "MCNP" not in cs[CONF_NEUTRONICS_KERNEL]: # MCNP does not use CSGM
return (CrossSectionGroupManager, {})
return None
_ALLOWABLE_XS_TYPE_LIST = list(string.ascii_uppercase + string.ascii_lowercase)
def getXSTypeNumberFromLabel(xsTypeLabel: str) -> int:
"""
Convert a XSID label (e.g. 'AA') to an integer.
Useful for visualizing XS type in XTVIEW.
2-digit labels are supported when there is only one burnup group.
"""
return int("".join(["{:02d}".format(ord(si)) for si in xsTypeLabel]))
def getXSTypeLabelFromNumber(xsTypeNumber: int) -> str:
"""
Convert a XSID label (e.g. 65) to an XS label (e.g. 'A').
Useful for visualizing XS type in XTVIEW.
2-digit labels are supported when there is only one burnup group.
"""
try:
if xsTypeNumber > ord("Z"):
# two digit. Parse
return chr(int(str(xsTypeNumber)[:2])) + chr(int(str(xsTypeNumber)[2:]))
elif xsTypeNumber < ord("A"):
raise ValueError(
f"Cannot convert invalid xsTypeNumber `{xsTypeNumber}` to char. "
"The number must be >= 65 (corresponding to 'A')."
)
else:
return chr(xsTypeNumber)
except ValueError:
runLog.error("Error converting {} to label.".format(xsTypeNumber))
raise
def _checkConsistentNuclides(thisComp, repComp):
"""
Check that thisComp has the same set of nuclides as the analogous component in the
representative block.
This check is somewhat permissive in that it allows for the two components to differ
in nuclides where one of them is at a zero number density.
Warning
-------
This only checks ``consistentNucs`` for ones that are important in SFRs.
"""
consistentNucs = {"PU239", "U238", "U235", "U234", "FE56", "NA23", "O16"}
# ignore anything with zero number density
theseNucs = set(nuc for nuc, ndens in thisComp.getNumberDensities().items() if ndens > 0.0)
thoseNucs = set(nuc for nuc, ndens in repComp.getNumberDensities().items() if ndens > 0.0)
# in the nuclide list of the component, but at a number density of 0.0
# treat this more permissively -- i.e., it could be considered as either having or not having it
theseNucsAtZero = set(nuc for nuc, ndens in thisComp.getNumberDensities().items() if ndens == 0.0)
thoseNucsAtZero = set(nuc for nuc, ndens in repComp.getNumberDensities().items() if ndens == 0.0)
# check for any differences between which `consistentNucs` the components have
diffNucsNonZero = theseNucs.symmetric_difference(thoseNucs).intersection(consistentNucs)
diffNucsAtZero = theseNucsAtZero.symmetric_difference(thoseNucsAtZero).intersection(consistentNucs)
diffNucs = diffNucsNonZero - diffNucsAtZero
if diffNucs:
raise ValueError(
f"Component {thisComp} in block {repComp} and component {thisComp} in block {thisComp.parent} are in the "
f"same location, but nuclides differ by {diffNucs}. \n{theseNucs} \n{thoseNucs}"
)
class BlockCollection(list):
"""
Controls which blocks are representative of a particular cross section type/BU group.
This is a list with special methods.
"""
def __init__(self, allNuclidesInProblem, validBlockTypes=None, averageByComponent=False):
list.__init__(self)
self.allNuclidesInProblem = allNuclidesInProblem
self.weightingParam = None
self.averageByComponent = averageByComponent
# allowed to be independent of fuel component temperatures b/c Doppler
self.avgNucTemperatures = {}
self._validRepresentativeBlockTypes = None
if validBlockTypes:
self._validRepresentativeBlockTypes = []
for t in validBlockTypes:
self._validRepresentativeBlockTypes.append(Flags.fromString(t))
def __repr__(self):
return "<{} with {} blocks>".format(self.__class__.__name__, len(self))
def _getNewBlock(self):
"""
Create a new block instance.
Notes
-----
Should only be used by average because of name (which may not matter)
"""
newBlock = copy.deepcopy(self.getCandidateBlocks()[0])
newBlock.name = "AVG_" + newBlock.getMicroSuffix()
return newBlock
def createRepresentativeBlock(self):
"""Generate a block that best represents all blocks in group."""
self._checkValidWeightingFactors()
representativeBlock = self._makeRepresentativeBlock()
return representativeBlock
def _makeRepresentativeBlock(self):
raise NotImplementedError
def _checkValidWeightingFactors(self):
"""
Verify the validity of the weighting parameter.
.. warning:: Don't mix unweighted blocks (flux=0) w/ weighted ones
"""
if self.weightingParam is None:
weights = [0.0] * len(self.getCandidateBlocks())
else:
weights = [block.p[self.weightingParam] for block in self.getCandidateBlocks()]
anyNonZeros = any(weights)
if anyNonZeros and not all(weights):
# we have at least one non-zero entry and at least one zero. This is bad.
# find the non-zero ones for debugging
zeros = [block for block in self if not block.p[self.weightingParam]]
runLog.error("Blocks with zero `{0}` include: {1}".format(self.weightingParam, zeros))
raise ValueError(
"{0} has a mixture of zero and non-zero weighting factors (`{1}`)\nSee stdout for details".format(
self, self.weightingParam
)
)
def calcAvgNuclideTemperatures(self):
r"""
Calculate the average nuclide temperatures in this collection based on the blocks in the collection.
If a nuclide is in multiple components, that's taken into consideration.
.. math::
T = \frac{\sum{n_i v_i T_i}}{\sum{n_i v_i}}
where :math:`n_i` is a number density, :math:`v_i` is a volume, and :math:`T_i` is a temperature
"""
self.avgNucTemperatures = {}
nvt, nv = self._getNucTempHelper()
for i, nuclide in enumerate(self.allNuclidesInProblem):
nvtCurrent = nvt[i]
nvCurrent = nv[i]
avgTemp = 0.0 if nvCurrent == 0.0 else nvtCurrent / nvCurrent
self.avgNucTemperatures[nuclide] = avgTemp
def _getNucTempHelper(self):
"""
Get temperature averaging numerator and denominator for block collection.
This is abstract; you must override it.
"""
raise NotImplementedError
def getWeight(self, block):
"""Get value of weighting function for this block."""
vol = block.getVolume() or 1.0
if not self.weightingParam:
weight = 1.0
else:
# don't return 0
weight = block.p[self.weightingParam] or 1.0
return weight * vol
def getCandidateBlocks(self):
"""
Get blocks in this collection that are the valid representative type.
Often, peripheral non-fissile blocks (reflectors, control, shields) need cross sections but
cannot produce them alone. You can approximate their cross sections by placing them in certain cross
section groups. However, we do not want these blocks to be included in the spectrum
calculations that produce cross sections. Therefore the subset of valid representative
blocks are used to compute compositions, temperatures, etc.
.. tip:: The proper way to treat non-fuel blocks is to apply a leakage spectrum from fuel onto them.
"""
return [b for b in self if b.hasFlags(self._validRepresentativeBlockTypes)]
def _calcWeightedBurnup(self):
"""
For a blockCollection that represents fuel, calculate the weighted average burnup.
Notes
-----
- Only used for logging purposes
- Burnup needs to be weighted by heavy metal mass instead of volume
"""
weightedBurnup = 0.0
totalWeight = 0.0
for b in self:
# self.getWeight(b) incorporates the volume as does mass, so divide by volume not to double-count
weighting = b.p.massHmBOL * self.getWeight(b) / b.getVolume()
totalWeight += weighting
weightedBurnup += weighting * b.p.percentBu
return 0.0 if totalWeight == 0.0 else weightedBurnup / totalWeight
class MedianBlockCollection(BlockCollection):
"""Returns the median burnup block. This is a simple and often accurate approximation."""
def _makeRepresentativeBlock(self):
"""Get the median burnup block."""
medianBlock = self._getMedianBlock()
# copy so we can adjust LFPs w/o changing the global ones
newBlock = copy.deepcopy(medianBlock)
lfpCollection = medianBlock.getLumpedFissionProductCollection()
if lfpCollection:
lfpCollection = lfpCollection.duplicate()
lfpCollection.setGasRemovedFrac(newBlock.p.gasReleaseFraction)
newBlock.setLumpedFissionProducts(lfpCollection)
else:
runLog.warning("Representative block {0} has no LFPs".format(medianBlock))
self.calcAvgNuclideTemperatures()
return newBlock
def _getNucTempHelper(self):
"""
Return the Median block nuclide temperature terms.
In this case, there's only one block to average, so return its averaging terms.
See Also
--------
calcAvgNuclideTemperatures
"""
medianBlock = self._getMedianBlock()
return getBlockNuclideTemperatureAvgTerms(medianBlock, self.allNuclidesInProblem)
def _getMedianBlock(self):
"""
Return the median burnup Block.
Build list of items for each block when sorted gives desired order
Last item in each tuple is always the block itself (for easy retrieval).
For instance, if you want the median burnup, this list would contain
tuples of (burnup, blockName, block). Blockname is included so
the order is consistent between runs when burnups are equal (e.g. 0).
"""
info = []
for b in self.getCandidateBlocks():
info.append((b.p.percentBu * self.getWeight(b), b.getName(), b))
info.sort()
medianBlockData = info[len(info) // 2]
return medianBlockData[-1]
class AverageBlockCollection(BlockCollection):
"""
Block collection that builds a new block based on others in collection.
Averages number densities, fission product yields, and fission gas
removal fractions.
.. impl:: Create representative blocks using volume-weighted averaging.
:id: I_ARMI_XSGM_CREATE_REPR_BLOCKS0
:implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS
This class constructs new blocks from an existing block list based on a volume-weighted
average. Inheriting functionality from the abstract
:py:class:`Reactor `
object, this class will construct representative blocks using averaged parameters of all
blocks in the given collection. Number density averages can be computed at a component level
or at a block level by default. Average nuclide temperatures and burnup are also included
when constructing a representative block.
"""
def _makeRepresentativeBlock(self):
"""Generate a block that best represents all blocks in group."""
newBlock = self._getNewBlock()
lfpCollection = self._getLFP()
newBlock.setLumpedFissionProducts(lfpCollection)
# check if components are similar
if self._performAverageByComponent():
# set number densities and temperatures on a component basis
for compIndex, c in enumerate(sorted(newBlock.getComponents())):
c.setNumberDensities(self._getAverageComponentNumberDensities(compIndex))
c.temperatureInC = self._getAverageComponentTemperature(compIndex)
else:
newBlock.setNumberDensities(self._getAverageNumberDensities())
newBlock.p.percentBu = self._calcWeightedBurnup()
newBlock.clearCache()
self.calcAvgNuclideTemperatures()
return newBlock
def _getAverageNumberDensities(self):
"""
Get weighted average number densities of the collection.
Returns
-------
numberDensities : dict
nucName, ndens data (atoms/bn-cm)
"""
nuclides = self.allNuclidesInProblem
blocks = self.getCandidateBlocks()
weights = np.array([self.getWeight(b) for b in blocks])
weights /= weights.sum() # normalize by total weight
ndens = weights.dot([b.getNuclideNumberDensities(nuclides) for b in blocks])
return dict(zip(nuclides, ndens))
def _getLFP(self):
"""Find lumped fission product collection."""
b = self.getCandidateBlocks()[0]
return b.getLumpedFissionProductCollection()
def _getNucTempHelper(self):
"""All candidate blocks are used in the average."""
nvt = np.zeros(len(self.allNuclidesInProblem))
nv = np.zeros(len(self.allNuclidesInProblem))
for block in self.getCandidateBlocks():
wt = self.getWeight(block)
nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(block, self.allNuclidesInProblem)
nvt += nvtBlock * wt
nv += nvBlock * wt
return nvt, nv
def _getAverageComponentNumberDensities(self, compIndex):
"""
Get weighted average number densities of a component in the collection.
Returns
-------
numberDensities : dict
nucName, ndens data (atoms/bn-cm)
"""
blocks = self.getCandidateBlocks()
weights = np.array([self.getWeight(b) for b in blocks])
weights /= weights.sum() # normalize by total weight
components = [sorted(b.getComponents())[compIndex] for b in blocks]
nuclides = self._getAllNucs(components)
ndens = weights.dot([c.getNuclideNumberDensities(nuclides) for c in components])
return dict(zip(nuclides, ndens))
def _getAverageComponentTemperature(self, compIndex):
"""
Get weighted average component temperature for the collection.
Notes
-----
Weighting is both by the block weight within the collection and the relative mass of the
Component. The block weight is already scaled by the block volume, so we need to pull that
out of the block weighting because it would effectively be double-counted in the component
mass. b.getHeight() is proportional to block volume, so it is used here as a computationally
cheaper proxy for scaling by block volume.
Returns
-------
numberDensities : dict
nucName, ndens data (atoms/bn-cm)
"""
blocks = self.getCandidateBlocks()
weights = np.array([self.getWeight(b) / b.getHeight() for b in blocks])
weights /= weights.sum() # normalize by total weight
components = [sorted(b.getComponents())[compIndex] for b in blocks]
weightedAvgComponentMass = sum(w * c.getMass() for w, c in zip(weights, components))
if weightedAvgComponentMass == 0.0:
# if there is no component mass (e.g., gap), do a regular average
return np.mean(np.array([c.temperatureInC for c in components]))
else:
return (
weights.dot(np.array([c.temperatureInC * c.getMass() for c in components])) / weightedAvgComponentMass
)
def _performAverageByComponent(self):
"""
Check if block collection averaging can/should be performed by component.
If the components of blocks in the collection are similar and the user has requested
Component-level averaging, return True. Otherwise, return False.
"""
if not self.averageByComponent:
return False
else:
return self._checkBlockSimilarity()
def _checkBlockSimilarity(self):
"""
Check if blocks in the collection have similar components.
If the components of blocks in the collection are similar and the user has requested
Component-level averaging, return True. Otherwise, return False.
"""
cFlags = dict()
for b in self.getCandidateBlocks():
cFlags[b] = [c.p.flags for c in sorted(b.getComponents())]
refB = b
refFlags = cFlags[refB]
for b, compFlags in cFlags.items():
for c, refC in zip(compFlags, refFlags):
if c != refC:
runLog.warning(
"Non-matching block in AverageBlockCollection!\n"
f"{refC} component flags in {refB} does not match {c} in {b}.\n"
f"Number densities will be smeared in representative block."
)
return False
else:
return True
@staticmethod
def _getAllNucs(components):
"""Iterate through components and get all unique nuclides."""
nucs = set()
for c in components:
nucs = nucs.union(c.getNuclides())
return sorted(list(nucs))
def getBlockNuclideTemperature(block, nuclide):
"""Return the average temperature for 1 nuclide."""
tempIntegratedVolume, volume = getBlockNuclideTemperatureAvgTerms(block, [nuclide])
return tempIntegratedVolume / volume if volume > 0 else 0.0
def getBlockNuclideTemperatureAvgTerms(block, allNucNames):
"""
Compute terms (numerator, denominator) of average for this block.
This volume-weights the densities by component volume fraction.
It's important to count zero-density nuclides (i.e. ones like AM242 that are expected to build
up) as trace values at the proper component temperatures.
"""
def getNumberDensitiesWithTrace(component, allNucNames):
"""Needed to make sure temperature of 0-density nuclides in fuel get fuel temperature."""
if component.p.nuclides is None:
return [0.0 for _nuc in allNucNames]
allByteNucs = [nucName.encode() for nucName in allNucNames]
ndens = []
nucCopy = np.array(component.p.nuclides)
nDensCopy = np.array(component.p.numberDensities)
reverseIndex = {nuc: i for i, nuc in enumerate(nucCopy)}
for nuc in allByteNucs:
i = reverseIndex.get(nuc, -1)
if i >= 0:
ndens.append(max(nDensCopy[i], TRACE_NUMBER_DENSITY))
else:
ndens.append(0.0)
return ndens
vol = block.getVolume()
components, volFracs = zip(*block.getVolumeFractions())
# D = CxN matrix of number densities
ndens = np.array([getNumberDensitiesWithTrace(c, allNucNames) for c in components])
# C-length temperature array
temperatures = np.array([c.temperatureInC for c in components])
# multiply each component's values by volume frac, now NxC
nvBlock = ndens.T * np.array(volFracs) * vol
nvt = sum((nvBlock * temperatures).T) # N-length array summing over components.
nv = sum(nvBlock.T) # N-length array
return nvt, nv
class CylindricalComponentsAverageBlockCollection(AverageBlockCollection):
"""
Creates a representative block for the purpose of cross section generation with a one-
dimensional cylindrical model.
.. impl:: Create representative blocks using custom cylindrical averaging.
:id: I_ARMI_XSGM_CREATE_REPR_BLOCKS1
:implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS
This class constructs representative blocks based on a volume-weighted average using
cylindrical blocks from an existing block list. Inheriting functionality from the abstract
:py:class:`Reactor `
object, this class will construct representative blocks using averaged parameters of all
blocks in the given collection. Number density averages are computed at a component level.
Nuclide temperatures from a median block-average temperature are used and the average burnup
is evaluated across all blocks in the block list.
Notes
-----
When generating the representative block within this collection, the geometry is checked against
all other blocks to ensure that the number of components are consistent. This implementation is
intended to be opinionated, so if a user attempts to put blocks that have geometric differences
then this will fail.
This selects a representative block based on the collection of candidates based on the median
Block-average temperatures as an assumption.
"""
def _getNewBlock(self):
newBlock = copy.deepcopy(self._selectCandidateBlock())
newBlock.name = "1D_CYL_AVG_" + newBlock.getMicroSuffix()
return newBlock
def _selectCandidateBlock(self):
"""Selects the candidate block with the median block-average temperature."""
info = []
for b in self.getCandidateBlocks():
info.append((b.getAverageTempInC(), b.getName(), b))
info.sort()
medianBlockData = info[len(info) // 2]
return medianBlockData[-1]
def _makeRepresentativeBlock(self):
"""Build a representative fuel block based on component number densities."""
repBlock = self._getNewBlock()
bWeights = [self.getWeight(b) for b in self.getCandidateBlocks()]
repBlock.p.percentBu = self._calcWeightedBurnup()
componentsInOrder = self._orderComponentsInGroup(repBlock)
for i, (c, allSimilarComponents) in enumerate(zip(sorted(repBlock), componentsInOrder)):
allNucsNames, densities = self._getAverageComponentNucs(allSimilarComponents, bWeights)
for nuc, aDensity in zip(allNucsNames, densities):
c.setNumberDensity(nuc, aDensity)
c.temperatureInC = self._getAverageComponentTemperature(i)
repBlock.clearCache()
self.calcAvgNuclideTemperatures()
return repBlock
@staticmethod
def _checkComponentConsistency(b, repBlock):
"""
Verify that all components being homogenized have same multiplicity and nuclides.
Raises
------
ValueError
When the components in a candidate block do not align with the components in the
representative Block. This check includes component area, component multiplicity, and
nuclide composition.
"""
if len(b) != len(repBlock):
raise ValueError(f"Blocks {b} and {repBlock} have differing number of components and cannot be homogenized")
# NOTE: We are using Fe-56 as a proxy for structure and Na-23 as proxy for coolant, which
# is undesirably SFR-centric. This should be generalized in the future, if possible.
for c, repC in zip(sorted(b), sorted(repBlock)):
_checkConsistentNuclides(c, repC)
if c.p.mult != repC.p.mult:
raise ValueError(
f"Component {repC} in block {repBlock} and component {c} in block {b} must have the same "
f"multiplicity, but they have {repC.p.mult} and {c.p.mult}, respectively."
)
def _getAverageComponentNucs(self, components, bWeights):
"""Compute average nuclide densities by block weights and component area fractions."""
allNucNames = self._getAllNucs(components)
densities = np.zeros(len(allNucNames))
totalWeight = 0.0
for c, bWeight in zip(components, bWeights):
weight = bWeight * c.getArea()
totalWeight += weight
densities += weight * np.array(c.getNuclideNumberDensities(allNucNames))
if totalWeight > 0.0:
weightedDensities = densities / totalWeight
else:
weightedDensities = np.zeros_like(densities)
return allNucNames, weightedDensities
def _orderComponentsInGroup(self, repBlock):
"""Order the components based on dimension and material type within the representative
Block.
"""
for b in self.getCandidateBlocks():
self._checkComponentConsistency(b, repBlock)
componentLists = [list(sorted(b)) for b in self.getCandidateBlocks()]
return [list(comps) for comps in zip(*componentLists)]
def _getNucTempHelper(self):
"""All candidate blocks are used in the average."""
nvt = np.zeros(len(self.allNuclidesInProblem))
nv = np.zeros(len(self.allNuclidesInProblem))
for block in self.getCandidateBlocks():
wt = self.getWeight(block)
nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(block, self.allNuclidesInProblem)
nvt += nvtBlock * wt
nv += nvBlock * wt
return nvt, nv
class CylindricalComponentsDuctHetAverageBlockCollection(CylindricalComponentsAverageBlockCollection):
"""
Creates a representative block for the purpose of cross section generation with a one-
dimensional cylindrical model where all material inside the duct is homogenized.
.. impl:: Create partially heterogeneous representative blocks.
:id: I_ARMI_XSGM_CREATE_REPR_BLOCKS2
:implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS
This class constructs representative blocks based on a volume-weighted average using
cylindrical blocks from an existing block list. Inheriting functionality from the abstract
:py:class:`Reactor `
object, this class will construct representative blocks using averaged parameters of all
blocks in the given collection. Number density averages are computed at a component level.
Nuclide temperatures from a median block-average temperature are used and the average burnup
is evaluated across all blocks in the block list.
The average nuclide temperatures are calculated only for the homogenized region inside of
the duct. For the non-homogenized regions, the MC2 writer uses the component temperatures.
Notes
-----
The representative block for this collection is the same as the parent. The only difference between
the two collection types is that this collection calculates average nuclide temperatures based only
on the components that are inside of the duct.
"""
def _getNewBlock(self):
newBlock = copy.deepcopy(self._selectCandidateBlock())
newBlock.name = "1D_CYL_DUCT_HET_AVG_" + newBlock.getMicroSuffix()
return newBlock
def _makeRepresentativeBlock(self):
"""Build a representative fuel block based on component number densities."""
self.calcAvgNuclideTemperatures()
return CylindricalComponentsAverageBlockCollection._makeRepresentativeBlock(self)
def _getNucTempHelper(self):
"""All candidate blocks are used in the average."""
from armi.reactor.converters.blockConverters import stripComponents
nvt = np.zeros(len(self.allNuclidesInProblem))
nv = np.zeros(len(self.allNuclidesInProblem))
for block in self.getCandidateBlocks():
wt = self.getWeight(block)
# remove the duct and intercoolant from the block before
# calculating average nuclide temps
newBlock, _mixtureFlags = stripComponents(block, Flags.DUCT)
nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(newBlock, self.allNuclidesInProblem)
nvt += nvtBlock * wt
nv += nvBlock * wt
return nvt, nv
class SlabComponentsAverageBlockCollection(BlockCollection):
"""
Creates a representative 1D slab block.
Notes
-----
- Ignores lumped fission products since there is no foreseeable need for burn calculations in 1D
slab geometry since it is used for low power neutronic validation.
- Checks for consistent component dimensions for all blocks in a group and then creates a new
Block.
- Iterates through components of all blocks and calculates component average number densities.
This calculation takes the first component of each block, averages the number densities, and
applies this to the number density to the representative block.
"""
def _getNewBlock(self):
newBlock = copy.deepcopy(self.getCandidateBlocks()[0])
newBlock.name = "1D_SLAB_AVG_" + newBlock.getMicroSuffix()
return newBlock
def _makeRepresentativeBlock(self):
"""Build a representative fuel block based on component number densities."""
repBlock = self._getNewBlock()
bWeights = [self.getWeight(b) for b in self.getCandidateBlocks()]
repBlock.p.percentBu = self._calcWeightedBurnup()
componentsInOrder = self._orderComponentsInGroup(repBlock)
for c, allSimilarComponents in zip(repBlock, componentsInOrder):
allNucsNames, densities = self._getAverageComponentNucs(allSimilarComponents, bWeights)
for nuc, aDensity in zip(allNucsNames, densities):
c.setNumberDensity(nuc, aDensity)
newBlock = self._removeLatticeComponents(repBlock)
return newBlock
def _getNucTempHelper(self):
raise NotImplementedError
@staticmethod
def _getAllNucs(components):
"""Iterate through components and get all unique nuclides."""
nucs = set()
for c in components:
nucs = nucs.union(c.getNuclides())
return sorted(list(nucs))
@staticmethod
def _checkComponentConsistency(b, repBlock, components=None):
"""
Verify that all components being homogenized are rectangular and have consistent dimensions.
Raises
------
ValueError
When the components in a candidate block do not align with the components in the
representative block. This check includes component area, component multiplicity, and
nuclide composition.
TypeError
When the shape of the component is not a rectangle.
"""
comps = b if components is None else components
for c, repC in zip(comps, repBlock):
if not isinstance(c, basicShapes.Rectangle):
raise TypeError(
"The shape of component {} in block {} is invalid and must be a rectangle.".format(c, b)
)
compString = "Component {} in block {} and component {} in block {}".format(repC, repBlock, c, b)
if c.getArea() != repC.getArea():
raise ValueError(
"{} are in the same location, but have differing thicknesses. Check that the "
"thicknesses are defined correctly. Note: This could also be due to "
"thermal expansion".format(compString)
)
_checkConsistentNuclides(c, repC)
if c.p.mult != repC.p.mult:
raise ValueError("{} must have the same multiplicity to homogenize".format(compString))
@staticmethod
def _reverseComponentOrder(block):
"""Move the lattice component to the end of the components list."""
latticeComponents = [c for c in block if c.isLatticeComponent()]
components = [c for c in reversed(block) if not c.isLatticeComponent()]
if len(latticeComponents) > 1:
raise ValueError(
"Block {} contains multiple `lattice` components: {}. Remove the additional "
"lattice components in the reactor blueprints.".format(block, latticeComponents)
)
components.append(latticeComponents[0])
return components
@staticmethod
def _removeLatticeComponents(repBlock):
"""
Remove the lattice component from the representative block.
Notes
-----
- This component does not serve any purpose for XS generation as it contains void material
with zero area.
- Removing this component does not modify the blocks within the reactor.
"""
for c in repBlock.iterComponents():
if c.isLatticeComponent():
repBlock.remove(c)
return repBlock
def _getAverageComponentNucs(self, components, bWeights):
"""Compute average nuclide densities by block weights and component area fractions."""
allNucNames = self._getAllNucs(components)
densities = np.zeros(len(allNucNames))
totalWeight = 0.0
for c, bWeight in zip(components, bWeights):
weight = bWeight * c.getArea()
totalWeight += weight
densities += weight * np.array(c.getNuclideNumberDensities(allNucNames))
if totalWeight > 0.0:
weightedDensities = densities / totalWeight
else:
weightedDensities = np.zeros_like(densities)
return allNucNames, weightedDensities
def _orderComponentsInGroup(self, repBlock):
"""Order the components based on dimension and material type within the representative block."""
orderedComponents = [[] for _ in repBlock]
for b in self.getCandidateBlocks():
if len(b) != len(repBlock):
raise ValueError(
"Blocks {} and {} have differing number of components and cannot be homogenized".format(b, repBlock)
)
try:
self._checkComponentConsistency(b, repBlock)
componentsToAdd = [c for c in b]
except ValueError:
runLog.extra(
"Checking if components in block {} are in the reverse order of the components in the "
"representative block {}".format(b, repBlock)
)
reversedComponentOrder = self._reverseComponentOrder(b)
self._checkComponentConsistency(b, repBlock, components=reversedComponentOrder)
componentsToAdd = [c for c in reversedComponentOrder]
for i, c in enumerate(componentsToAdd):
orderedComponents[i].append(c) # group similar components
return orderedComponents
class FluxWeightedAverageBlockCollection(AverageBlockCollection):
"""Flux-weighted AverageBlockCollection."""
def __init__(self, *args, **kwargs):
AverageBlockCollection.__init__(self, *args, **kwargs)
self.weightingParam = "flux"
class CrossSectionGroupManager(interfaces.Interface):
"""
Looks at the reactor and updates burnup group information based on current burnup.
Contains a :py:class:`BlockCollection` for each cross section group.
Notes
-----
The representative blocks created in the CrossSectionGroupManager are ordered
alphabetically by key.
"""
name = "xsGroups"
_REPR_GROUP = "represented"
_NON_REPR_GROUP = "non-represented"
_PREGEN_GROUP = "pre-generated"
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
self._buGroupBounds = []
self._tempGroupBounds = []
self.representativeBlocks = collections.OrderedDict()
self.avgNucTemperatures = {}
# this turns off updates for when core changes are made, but dont want to re-evaluate XS
# for example if lattice physics was only once per cycle we might not want to re-evaluate groups
self._envGroupUpdatesEnabled = True
self._setBuGroupBounds(self.cs["buGroups"])
self._setTempGroupBounds(self.cs["tempGroups"])
self._unrepresentedXSIDs = []
def interactBOL(self):
"""Called at the Beginning-of-Life of a run, before any cycles start.
.. impl:: The lattice physics interface and cross-section group manager are connected at
BOL.
:id: I_ARMI_XSGM_FREQ0
:implements: R_ARMI_XSGM_FREQ
This method sets the cross-section block averaging method and and logic for whether all
blocks in a cross section group should be used when generating a representative block.
Furthermore, if the control logic for lattice physics frequency updates is set at
beginning-of-life (`BOL`) through the :py:class:`LatticePhysicsInterface
`, the cross-section group manager will
construct representative blocks for each cross-section IDs at the beginning of the
reactor state.
"""
# now that all cs settings are loaded, apply defaults to compound XS settings
from armi.physics.neutronics.settings import (
CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,
CONF_LATTICE_PHYSICS_FREQUENCY,
CONF_XS_BLOCK_REPRESENTATION,
)
self.cs[CONF_CROSS_SECTION].setDefaults(
self.cs[CONF_XS_BLOCK_REPRESENTATION],
self.cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self._latticePhysicsFrequency = LatticePhysicsFrequency[self.cs[CONF_LATTICE_PHYSICS_FREQUENCY]]
if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOL:
self.createRepresentativeBlocks()
def interactBOC(self, cycle=None):
"""
Update representative blocks and block burnup groups.
.. impl:: The lattice physics interface and cross-section group manager are connected at
BOC.
:id: I_ARMI_XSGM_FREQ1
:implements: R_ARMI_XSGM_FREQ
This method updates representative blocks and block burnups at the beginning-of-cycle
for each cross-section ID if the control logic for lattice physics frequency updates is
set at beginning-of-cycle (`BOC`) through the :py:class:`LatticePhysicsInterface
`. At the beginning-of-cycle, the cross-section
group manager will construct representative blocks for each cross-section IDs for the
current reactor state.
Notes
-----
The block list each each block collection cannot be emptied since it is used to derive nuclide temperatures.
"""
if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOC:
self.createRepresentativeBlocks()
def interactEOC(self, cycle=None):
"""EOC interaction.
Clear out big dictionary of all blocks to avoid memory issues and out-of-date representers.
"""
self.clearRepresentativeBlocks()
def interactEveryNode(self, cycle=None, tn=None):
"""Interaction at every time node.
.. impl:: The lattice physics interface and cross-section group manager are connected at
every time node.
:id: I_ARMI_XSGM_FREQ2
:implements: R_ARMI_XSGM_FREQ
This method updates representative blocks and block burnups at every node for each
cross-section ID if the control logic for lattices physics frequency updates is set for
every node (`everyNode`) through the :py:class:`LatticePhysicsInterface
`. At every node, the cross-section group
manager will construct representative blocks for each cross-section ID in the current
reactor state.
"""
if self._latticePhysicsFrequency >= LatticePhysicsFrequency.everyNode:
self.createRepresentativeBlocks()
def interactCoupled(self, iteration):
"""Update cross-section groups on each physics coupling iteration to get latest
temperatures.
.. impl:: The lattice physics interface and cross-section group manager are connected
during coupling.
:id: I_ARMI_XSGM_FREQ3
:implements: R_ARMI_XSGM_FREQ
This method updates representative blocks and block burnups at every node and the first
coupled iteration for each cross-section ID if the control logic for lattices physics
frequency updates is set for the first coupled iteration (``firstCoupledIteration``)
through the
:py:class:`LatticePhysicsInterface `.
The cross-section group manager will construct representative blocks for each
cross-section ID at the first iteration of every time node.
Notes
-----
Updating the cross-section on only the first (i.e., iteration == 0) timenode can be a
reasonable approximation to get new cross sections with some temperature updates but not
have to run lattice physics on each coupled iteration. If the user desires to have the
cross sections updated with every coupling iteration, the ``latticePhysicsFrequency: all``
option.
See Also
--------
:py:meth:`~armi.physics.neutronics.latticePhysics.latticePhysics.LatticePhysicsInterface.interactCoupled`
"""
if (
iteration == 0 and self._latticePhysicsFrequency == LatticePhysicsFrequency.firstCoupledIteration
) or self._latticePhysicsFrequency == LatticePhysicsFrequency.all:
self.createRepresentativeBlocks()
def clearRepresentativeBlocks(self):
"""Clear the representative blocks."""
runLog.extra("Clearing representative blocks")
self.representativeBlocks = collections.OrderedDict()
self.avgNucTemperatures = {}
def _setBuGroupBounds(self, buGroupBounds):
"""
Set the burnup group structure.
Parameters
----------
buGroupBounds : list
List of upper burnup values in percent.
Raises
------
ValueError
If the provided burnup groups are invalid
"""
lastBu = 0.0
# validate structure
for upperBu in buGroupBounds:
if upperBu <= 0 or upperBu > 100:
raise ValueError("Burnup group upper bound {0} is invalid".format(upperBu))
if upperBu < lastBu:
raise ValueError("Burnup groups must be ascending")
lastBu = upperBu
self._buGroupBounds = buGroupBounds + [float("inf")]
def _setTempGroupBounds(self, tempGroupBounds):
"""Set the temperature group structure."""
lastTemp = -C_TO_K
# validate structure
for upperTemp in tempGroupBounds:
if upperTemp < -C_TO_K:
raise ValueError("Temperature boundary is below absolute zero {0}.format(upperTemp)")
if upperTemp < lastTemp:
raise ValueError("Temp groups must be ascending")
lastTemp = upperTemp
self._tempGroupBounds = tempGroupBounds + [float("inf")]
def _updateEnvironmentGroups(self, blockList):
"""
Update the burnup group of each block based on its burnup and temperature .
If only one burnup group exists, then this is skipped so as to accommodate the possibility
of 2-character xsGroup values (useful for detailed V&V models w/o depletion).
See Also
--------
armi.reactor.blocks.Block.getMicroSuffix
"""
if not self._envGroupUpdatesEnabled:
runLog.debug("Skipping burnup group update of {0} blocks because it is disabled".format(len(blockList)))
return
numBuGroups = len(self._buGroupBounds)
if numBuGroups == 1 and len(self._tempGroupBounds) == 1:
# dont set block.p.envGroupNum since all 1 group and we want to support 2 char xsGroup
return
runLog.debug("Updating env groups of {0} blocks".format(len(blockList)))
for block in blockList:
bu = block.p.percentBu
for buIndex, upperBu in enumerate(self._buGroupBounds):
if bu <= upperBu:
buGroupVal = buIndex
tempGroupVal = 0
isotope = self._initializeXsID(block.getMicroSuffix()).xsTempIsotope
if isotope and len(self._tempGroupBounds) > 1:
# if statement saves this somewhat expensive calc if we are not doing temp groups
tempC = getBlockNuclideTemperature(block, isotope)
for tempIndex, upperTemp in enumerate(self._tempGroupBounds):
if tempC <= upperTemp:
tempGroupVal = tempIndex
break
# this ordering groups like-temperatures together in group number
block.p.envGroupNum = tempGroupVal * numBuGroups + buGroupVal
break
def _addXsGroupsFromBlocks(self, blockCollectionsByXsGroup, blockList):
"""
Build all the cross section groups based on their XS type and Env group.
Also ensures that their Env group is up to date with their environment.
"""
self._updateEnvironmentGroups(blockList)
for b in blockList:
xsID = b.getMicroSuffix()
xsSettings = self._initializeXsID(xsID)
if self.cs["tempGroups"] and xsSettings.blockRepresentation == MEDIAN_BLOCK_COLLECTION:
runLog.warning(
"Median block currently only consider median burnup block, and "
"not median temperature block in group"
)
blockCollectionType = blockCollectionFactory(xsSettings, self.r.blueprints.allNuclidesInProblem)
group = blockCollectionsByXsGroup.get(xsID, blockCollectionType)
group.append(b)
blockCollectionsByXsGroup[xsID] = group
return blockCollectionsByXsGroup
def _initializeXsID(self, xsID):
"""Initialize a new xs id."""
if xsID not in self.cs[CONF_CROSS_SECTION]:
runLog.debug("Initializing XS ID {}".format(xsID), single=True)
return self.cs[CONF_CROSS_SECTION][xsID]
def xsTypeIsPregenerated(self, xsID):
"""Return True if the cross sections for the given ``xsID`` is pre-generated."""
return self.cs[CONF_CROSS_SECTION][xsID].xsIsPregenerated
def fluxSolutionIsPregenerated(self, xsID):
"""Return True if an external flux solution file for the given ``xsID`` is pre-generated."""
return self.cs[CONF_CROSS_SECTION][xsID].fluxIsPregenerated
def _copyPregeneratedXSFile(self, xsID):
# stop a race condition to copy files between all processors
if context.MPI_RANK != 0:
return
for xsFileLocation, xsFileName in self._getPregeneratedXsFileLocationData(xsID):
dest = os.path.join(os.getcwd(), xsFileName)
runLog.extra(
"Copying pre-generated XS file {} from {} for XS ID {}".format(
xsFileName, os.path.dirname(xsFileLocation), xsID
)
)
# Prevent copy error if the path and destination are the same.
if xsFileLocation != dest:
safeCopy(xsFileLocation, dest)
def _copyPregeneratedFluxSolutionFile(self, xsID):
# stop a race condition to copy files between all processors
if context.MPI_RANK != 0:
return
fluxFileLocation, fluxFileName = self._getPregeneratedFluxFileLocationData(xsID)
dest = os.path.join(os.getcwd(), fluxFileName)
runLog.extra(
"Copying pre-generated flux solution file {} from {} for XS ID {}".format(
fluxFileName, os.path.dirname(fluxFileLocation), xsID
)
)
# Prevent copy error if the path and destination are the same.
if fluxFileLocation != dest:
safeCopy(fluxFileLocation, dest)
def _getPregeneratedXsFileLocationData(self, xsID):
"""
Gather the pre-generated cross section file data and check that the files exist.
Notes
-----
Multiple files can exist on the `file location` setting for a single XS ID. This checks that all files exist
and returns a list of tuples (file path, fileName).
"""
fileData = []
filePaths = self.cs[CONF_CROSS_SECTION][xsID].xsFileLocation
for filePath in filePaths:
filePath = os.path.abspath(filePath)
if not os.path.exists(filePath) or os.path.isdir(filePath):
raise ValueError(
"External cross section path for XS ID {} is not a valid file location {}".format(xsID, filePath)
)
fileName = os.path.basename(filePath)
fileData.append((filePath, fileName))
return fileData
def _getPregeneratedFluxFileLocationData(self, xsID):
"""Gather the pre-generated flux solution file data and check that the files exist."""
filePath = self.cs[CONF_CROSS_SECTION][xsID].fluxFileLocation
filePath = os.path.abspath(filePath)
if not os.path.exists(filePath) or os.path.isdir(filePath):
raise ValueError(
"External cross section path for XS ID {} is not a valid file location {}".format(xsID, filePath)
)
fileName = os.path.basename(filePath)
return (filePath, fileName)
def createRepresentativeBlocks(self):
"""Get a representative block from each cross-section ID managed here.
.. impl:: Create collections of blocks based on cross-section type and burn-up group.
:id: I_ARMI_XSGM_CREATE_XS_GROUPS
:implements: R_ARMI_XSGM_CREATE_XS_GROUPS
This method constructs the representative blocks and block burnups
for each cross-section ID in the reactor model. Starting with the making of cross-section groups, it will
find candidate blocks and create representative blocks from that selection.
"""
representativeBlocks = {}
self.avgNucTemperatures = {}
runLog.extra("Generating representative blocks for XS")
blockCollectionsByXsGroup = self.makeCrossSectionGroups()
for xsID, collection in blockCollectionsByXsGroup.items():
numCandidateBlocks = len(collection.getCandidateBlocks())
if self.xsTypeIsPregenerated(xsID):
self._copyPregeneratedXSFile(xsID)
continue
if numCandidateBlocks > 0:
runLog.debug("Creating representative block for {}".format(xsID))
if self.fluxSolutionIsPregenerated(xsID):
self._copyPregeneratedFluxSolutionFile(xsID)
reprBlock = collection.createRepresentativeBlock()
representativeBlocks[xsID] = reprBlock
self.avgNucTemperatures[xsID] = collection.avgNucTemperatures
self.representativeBlocks = collections.OrderedDict(sorted(representativeBlocks.items()))
self._checkForUnrepresentedXSIDs(blockCollectionsByXsGroup)
self._modifyUnrepresentedXSIDs(blockCollectionsByXsGroup)
self._summarizeGroups(blockCollectionsByXsGroup)
def createRepresentativeBlocksUsingExistingBlocks(self, blockList, originalRepresentativeBlocks):
"""
Create a new set of representative blocks using provided blocks.
This uses an input list of blocks and creates new representative blocks for these blocks based on the
compositions and temperatures of their original representative blocks.
Notes
-----
This is required for computing Doppler, Voided-Doppler, Temperature, and Voided-Temperature reactivity
coefficients, where the composition of the representative block must remain the same, but only the
temperatures within the representative blocks are to be modified.
Parameters
----------
blockList : list
A list of blocks defined within the core
originalRepresentativeBlocks : dict
A dict of unperturbed representative blocks that the new representative blocks are formed from
keys: XS group ID (e.g., "AA")
values: representative block for the XS group
Returns
-------
blockCollectionByXsGroup : dict
Mapping between XS IDs and the new block collections
modifiedReprBlocks : dict
Mapping between XS IDs and the new representative blocks
origXSIDsFromNew : dict
Mapping of original XS IDs to new XS IDs. New XS IDs are created to
represent a modified state (e.g., a Doppler temperature perturbation).
Raises
------
ValueError
If passed list arguments are empty
"""
if not blockList:
raise ValueError("A block list was not supplied to create new representative blocks")
if not originalRepresentativeBlocks:
raise ValueError(
"New representative blocks cannot be created because a list of unperturbed "
"representative blocks was not provided"
)
newBlockCollectionsByXsGroup = collections.OrderedDict()
blockCollectionByXsGroup = self.makeCrossSectionGroups()
modifiedReprBlocks, origXSIDsFromNew = self._getModifiedReprBlocks(blockList, originalRepresentativeBlocks)
if not modifiedReprBlocks:
return None
for newXSID in modifiedReprBlocks:
oldXSID = origXSIDsFromNew[newXSID]
oldBlockCollection = blockCollectionByXsGroup[oldXSID]
# create a new block collection that inherits all of the properties
# and settings from oldBlockCollection.
validBlockTypes = oldBlockCollection._validRepresentativeBlockTypes
if validBlockTypes is not None and len(validBlockTypes) > 0:
validBlockTypes = [
flags._toString(Flags, flag) for flag in oldBlockCollection._validRepresentativeBlockTypes
]
newBlockCollection = oldBlockCollection.__class__(
oldBlockCollection.allNuclidesInProblem,
validBlockTypes=validBlockTypes,
averageByComponent=oldBlockCollection.averageByComponent,
)
newBlockCollectionsByXsGroup[newXSID] = newBlockCollection
# clean up any unrepresented XS IDs
self._checkForUnrepresentedXSIDs(blockCollectionByXsGroup)
self._modifyUnrepresentedXSIDs(blockCollectionByXsGroup)
return newBlockCollectionsByXsGroup, modifiedReprBlocks, origXSIDsFromNew
def _getModifiedReprBlocks(self, blockList, originalRepresentativeBlocks):
"""
Create a new representative block for each unique XS ID on blocks to be modified.
Returns
-------
modifiedReprBlocks : dict
Mapping between the new XS IDs and the new representative blocks
origXSIDsFromNew : dict
Mapping between the new representative block XS IDs and the original representative block XS IDs
"""
modifiedBlockXSTypes = collections.OrderedDict()
modifiedReprBlocks = collections.OrderedDict()
origXSIDsFromNew = collections.OrderedDict()
for b in blockList:
origXSID = b.getMicroSuffix()
# Filter out the pre-generated XS IDs
if origXSID not in originalRepresentativeBlocks:
if self.xsTypeIsPregenerated(origXSID):
runLog.warning(
"A modified representative block for XS ID `{}` cannot be created because it is "
"mapped to a pre-generated cross section set. Please ensure that this "
"approximation is valid for the analysis.".format(origXSID),
single=True,
)
else:
origXSType = origXSID[0]
if origXSType not in modifiedBlockXSTypes.keys():
nextXSType = self.getNextAvailableXsTypes(excludedXSTypes=modifiedBlockXSTypes.values())[0]
modifiedBlockXSTypes[origXSType] = nextXSType
newXSID = modifiedBlockXSTypes[origXSType] + origXSID[1] # New XS Type + Old Burnup Group
origXSIDsFromNew[newXSID] = origXSID
# Create new representative blocks based on the original XS IDs
for newXSID, origXSID in origXSIDsFromNew.items():
runLog.extra(
"Creating representative block `{}` with composition from representative block `{}`".format(
newXSID, origXSID
)
)
newXSType = newXSID[0]
newReprBlock = copy.deepcopy(originalRepresentativeBlocks[origXSID])
newReprBlock.p.xsType = newXSType
newReprBlock.name = "AVG_{}".format(newXSID)
modifiedReprBlocks[newXSID] = newReprBlock
# Update the XS types of the blocks that will be modified
for b in blockList:
if b.getMicroSuffix() == origXSID:
b.p.xsType = newXSType
# copy XS settings to new XS ID
self.cs[CONF_CROSS_SECTION][newXSID] = copy.deepcopy(self.cs[CONF_CROSS_SECTION][origXSID])
self.cs[CONF_CROSS_SECTION][newXSID].xsID = newXSID
return modifiedReprBlocks, origXSIDsFromNew
def _checkForUnrepresentedXSIDs(self, blockCollectionsByXsGroup):
"""
Check for unrepresented XS IDs after self._updateEnvironmentGroups() has been called.
Parameters
----------
blockCollectionsByXsGroup: dict[str, BlockCollection]
Dict of BlockCollection keyed by the XS group they belong to.
Notes
-----
This should be run after :meth:`CrossSectionGroupManager._updateEnvironmentGroups`, which resets
``b.p.envGroup`` and can result in unrepresented cross section IDs. This is usually invoked
as a result of a call to :meth:`CrossSectionGroupManager.makeCrossSectionGroups`
"""
self._unrepresentedXSIDs = []
for xsID, collection in blockCollectionsByXsGroup.items():
if self.xsTypeIsPregenerated(xsID) or len(collection.getCandidateBlocks()) > 0:
continue
else:
runLog.debug(
"No candidate blocks in group for {} (with a valid representative block flag). "
"Will apply different environment group".format(xsID)
)
self._unrepresentedXSIDs.append(xsID)
def getNextAvailableXsTypes(self, howMany=1, excludedXSTypes=None):
"""Return the next however many available xs types.
Parameters
----------
howMany : int, optional
The number of requested xs types
excludedXSTypes : list, optional
A list of cross section types to exclude from using
Raises
------
ValueError
If there are no available XS types to be allocated
"""
allocatedXSTypes = set()
for b in self.r.core.getBlocks(includeAll=True):
allocatedXSTypes.add(b.p.xsType)
if excludedXSTypes is not None:
for xsType in excludedXSTypes:
allocatedXSTypes.add(xsType)
availableXsTypes = sorted(list(set(_ALLOWABLE_XS_TYPE_LIST).difference(allocatedXSTypes)))
if len(availableXsTypes) < howMany:
raise ValueError(
"There are not enough available xs types. {} have been allocated, {} are available, and "
"{} have been requested.".format(len(allocatedXSTypes), len(availableXsTypes), howMany)
)
# check for lower-case on case-insensitive file system
if sys.platform.startswith("win"):
allXSTypes = allocatedXSTypes.union(set(availableXsTypes[:howMany]))
allCaps = {c.capitalize() for c in allXSTypes}
if len(allCaps) != len(allXSTypes):
runLog.warning(
"Mixing upper and lower-case XS group types on a Windows system, which is not "
"case-sensitive. There is a chance that ARMI could overwrite previously "
"generated XS files, which could cause mysterious and/or unpredictable errors."
)
return availableXsTypes[:howMany]
def _getMissingBlueprintBlocks(self, blockCollectionsByXsGroup):
"""
Gets all blocks with suffixes not yet represented.
(for blocks in assemblies in the blueprints but not in the core).
Notes
-----
Certain cases (ZPPR validation cases) need to run cross sections for assemblies not in
the core to get by region cross sections and flux factors.
"""
missingBlueprintBlocks = []
blockList = []
for a in self.r.blueprints.assemblies.values():
blockList.extend(b for b in a)
self._updateEnvironmentGroups(blockList)
for b in blockList:
if b.getMicroSuffix() not in blockCollectionsByXsGroup:
b2 = copy.deepcopy(b)
missingBlueprintBlocks.append(b2)
return missingBlueprintBlocks
def makeCrossSectionGroups(self):
"""Make cross section groups for all blocks in reactor and unrepresented blocks from blueprints."""
bCollectXSGroup = {} # clear old groups (in case some are no longer existent)
bCollectXSGroup = self._addXsGroupsFromBlocks(bCollectXSGroup, self.r.core.getBlocks())
# add blocks that are defined in blueprints, but not in core
bCollectXSGroup = self._addXsGroupsFromBlocks(bCollectXSGroup, self._getMissingBlueprintBlocks(bCollectXSGroup))
blockCollectionsByXsGroup = collections.OrderedDict(sorted(bCollectXSGroup.items()))
return blockCollectionsByXsGroup
def _getAlternateEnvGroup(self, missingXsType):
"""Get a substitute block to use since there are no blocks with flags for xs gen."""
for otherXsID in self.representativeBlocks:
repType, repEnvGroup = otherXsID
if repType == missingXsType:
return repEnvGroup
def _modifyUnrepresentedXSIDs(self, blockCollectionsByXsGroup):
"""
Adjust the xsID of blocks in the groups that are not represented.
Try to just adjust the burnup group up to something that is represented
(can happen to structure in AA when only AB, AC, AD still remain,
but if some fresh AA happened to be added it might be needed).
"""
# No blocks in in this ID had a valid representative block flag (such as `fuel` for default),
# so nothing valid to run lattice physics on...
for xsID in self._unrepresentedXSIDs:
missingXsType, _missingEnvGroup = xsID
nonRepBlocks = blockCollectionsByXsGroup.get(xsID)
if nonRepBlocks:
newEnvGroup = self._getAlternateEnvGroup(missingXsType)
if newEnvGroup:
# there were no blocks flagged to xs gen even though there were some not suitable for
# generation in the group so can't make XS and use different.
runLog.warning(
"Changing XSID of {0} blocks from {1} to {2}".format(
len(nonRepBlocks), xsID, missingXsType[0] + newEnvGroup
)
)
for b in nonRepBlocks:
b.p.envGroup = newEnvGroup
else:
runLog.warning(
"No representative blocks with XS type {0} exist in the core. "
"There were also no similar blocks to use. "
"These XS cannot be generated and must exist in the working "
"directory or the run will fail.".format(xsID)
)
def _summarizeGroups(self, blockCollectionsByXsGroup):
"""Summarize current contents of the XS groups."""
from armi.physics.neutronics.settings import CONF_XS_BLOCK_REPRESENTATION
runLog.extra("Cross section group manager summary")
runLog.extra("Averaging performed by `{0}`".format(self.cs[CONF_XS_BLOCK_REPRESENTATION]))
for xsID, blocks in blockCollectionsByXsGroup.items():
if blocks:
xsIDGroup = self._getXsIDGroup(xsID)
if xsIDGroup == self._REPR_GROUP:
reprBlock = self.representativeBlocks.get(xsID)
xsSettings = self._initializeXsID(reprBlock.getMicroSuffix())
temp = self.avgNucTemperatures[xsID].get(xsSettings.xsTempIsotope, "N/A")
runLog.extra(
(
"XS ID {} contains {:4d} blocks, with avg burnup {} "
"and avg fuel temp {}, represented by: {:65s}"
).format(
xsID,
len(blocks),
reprBlock.p.percentBu,
temp,
reprBlock,
)
)
elif xsIDGroup == self._NON_REPR_GROUP:
runLog.extra(
"XS ID {} contains {:4d} blocks, but no representative block.".format(xsID, len(blocks))
)
elif xsIDGroup == self._PREGEN_GROUP:
xsFileNames = [y for _x, y in self._getPregeneratedXsFileLocationData(xsID)]
runLog.extra(
"XS ID {} contains {:4d} blocks, represented by: {}".format(xsID, len(blocks), xsFileNames)
)
else:
raise ValueError("No valid group for XS ID {}".format(xsID))
def _getXsIDGroup(self, xsID):
if self.xsTypeIsPregenerated(xsID):
return self._PREGEN_GROUP
elif xsID in self.representativeBlocks.keys():
return self._REPR_GROUP
elif xsID in self._unrepresentedXSIDs:
return self._NON_REPR_GROUP
return None
def disableEnvGroupUpdates(self):
"""
Turn off updating Env groups based on environment.
Useful during reactivity coefficient calculations to be consistent with ref. run.
See Also
--------
enableEnvGroupUpdates
"""
runLog.extra("Environment xs group updating disabled")
wasEnabled = self._envGroupUpdatesEnabled
self._envGroupUpdatesEnabled = False
return wasEnabled
def enableEnvGroupUpdates(self):
"""
Turn on updating Env groups based on environment.
See Also
--------
disableEnvGroupUpdates
"""
runLog.extra("Environment xs group updating enabled")
self._envGroupUpdatesEnabled = True
def getNucTemperature(self, xsID, nucName):
"""
Return the temperature (in C) of the nuclide in the group with specified xsID.
Notes
-----
Returns None if the xsID or nucName are not in the average nuclide temperature dictionary
`self.avgNucTemperatures`
"""
if xsID not in self.avgNucTemperatures:
return None
return self.avgNucTemperatures[xsID].get(nucName, None)
def updateNuclideTemperatures(self, blockCollectionByXsGroup=None):
"""
Recompute nuclide temperatures for the block collections within the core.
Parameters
----------
blockCollectionByXsGroup : dict, optional
Mapping between the XS IDs in the core and the block collections. Note that providing this as
an argument will only update the average temperatures of these XS IDs/block collections and will
result in other XS ID average temperatures not included to be discarded.
Notes
-----
This method does not update any properties of the representative blocks.
Temperatures are obtained from the BlockCollection class rather than the representative block.
"""
self.avgNucTemperatures = {}
blockCollectionsByXsGroup = blockCollectionByXsGroup or self.makeCrossSectionGroups()
runLog.info(
"Updating representative block average nuclide temperatures for the following XS IDs: {}".format(
blockCollectionsByXsGroup.keys()
)
)
for xsID, collection in blockCollectionsByXsGroup.items():
collection.calcAvgNuclideTemperatures()
self.avgNucTemperatures[xsID] = collection.avgNucTemperatures
runLog.extra("XS ID: {}, Collection: {}".format(xsID, collection))
# String constants
MEDIAN_BLOCK_COLLECTION = "Median"
AVERAGE_BLOCK_COLLECTION = "Average"
FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION = "FluxWeightedAverage"
SLAB_COMPONENTS_BLOCK_COLLECTION = "ComponentAverage1DSlab"
CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION = "ComponentAverage1DCylinder"
CYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION = "ComponentAverage1DCylinderDuctHeterogeneous"
# Mapping between block collection string constants and their
# respective block collection classes.
BLOCK_COLLECTIONS = {
MEDIAN_BLOCK_COLLECTION: MedianBlockCollection,
AVERAGE_BLOCK_COLLECTION: AverageBlockCollection,
FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION: FluxWeightedAverageBlockCollection,
SLAB_COMPONENTS_BLOCK_COLLECTION: SlabComponentsAverageBlockCollection,
CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION: CylindricalComponentsAverageBlockCollection,
CYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION: CylindricalComponentsDuctHetAverageBlockCollection,
}
def blockCollectionFactory(xsSettings, allNuclidesInProblem):
"""Build a block collection based on user settings and input."""
blockRepresentation = xsSettings.blockRepresentation
if (blockRepresentation == CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION) and xsSettings.ductHeterogeneous:
blockRepresentation = CYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION
validBlockTypes = xsSettings.validBlockTypes
averageByComponent = xsSettings.averageByComponent
return BLOCK_COLLECTIONS[blockRepresentation](
allNuclidesInProblem,
validBlockTypes=validBlockTypes,
averageByComponent=averageByComponent,
)
================================================
FILE: armi/physics/neutronics/crossSectionSettings.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The data structures and schema of the cross section modeling options.
These are advanced/compound settings that are carried along in the normal cs
object but aren't simple key/value pairs.
The cs object could either hold the base data (dicts) and create instances
of these data structure objects as needed, or the settings system could actually
hold instances of these data structures. It is most convenient to let the cs
object hold actual instances of these data.
See detailed docs in `:doc: Lattice Physics `.
"""
from enum import Enum
from typing import Dict, Union
import voluptuous as vol
from armi import context, runLog
from armi.physics.neutronics import crossSectionGroupManager
from armi.physics.neutronics.crossSectionGroupManager import BLOCK_COLLECTIONS
from armi.settings import Setting
CONF_BLOCK_REPRESENTATION = "blockRepresentation"
CONF_MEMORY_REQUIREMENT = "requiredRAM"
CONF_BLOCKTYPES = "validBlockTypes"
CONF_BUCKLING = "criticalBuckling"
CONF_DRIVER = "driverID"
CONF_EXTERNAL_DRIVER = "externalDriver"
CONF_EXTERNAL_RINGS = "numExternalRings"
CONF_XS_FILE_LOCATION = "xsFileLocation"
CONF_EXTERNAL_FLUX_FILE_LOCATION = "fluxFileLocation"
CONF_GEOM = "geometry"
CONF_HOMOGBLOCK = "useHomogenizedBlockComposition"
CONF_INTERNAL_RINGS = "numInternalRings"
CONF_MERGE_INTO_CLAD = "mergeIntoClad"
CONF_MERGE_INTO_FUEL = "mergeIntoFuel"
CONF_MESH_PER_CM = "meshSubdivisionsPerCm"
CONF_REACTION_DRIVER = "nuclideReactionDriver"
CONF_XSID = "xsID"
CONF_XS_EXECUTE_EXCLUSIVE = "xsExecuteExclusive"
CONF_XS_PRIORITY = "xsPriority"
CONF_COMPONENT_AVERAGING = "averageByComponent"
CONF_XS_MAX_ATOM_NUMBER = "xsMaxAtomNumber"
CONF_MIN_DRIVER_DENSITY = "minDriverDensity"
CONF_DUCT_HETEROGENEOUS = "ductHeterogeneous"
CONF_TRACE_ISOTOPE_THRESHOLD = "traceIsotopeThreshold"
CONF_XS_TEMP_ISOTOPE = "xsTempIsotope"
class XSGeometryTypes(Enum):
"""
Data structure for storing the available geometry options
within the framework.
"""
ZERO_DIMENSIONAL = 1
ONE_DIMENSIONAL_SLAB = 2
ONE_DIMENSIONAL_CYLINDER = 4
TWO_DIMENSIONAL_HEX = 8
@classmethod
def _mapping(cls):
mapping = {
cls.ZERO_DIMENSIONAL: "0D",
cls.ONE_DIMENSIONAL_SLAB: "1D slab",
cls.ONE_DIMENSIONAL_CYLINDER: "1D cylinder",
cls.TWO_DIMENSIONAL_HEX: "2D hex",
}
return mapping
@classmethod
def getStr(cls, typeSpec: Enum):
"""
Return a string representation of the given ``typeSpec``.
Examples
--------
XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL) == "0D"
XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX) == "2D hex"
"""
geometryTypes = list(cls)
if typeSpec not in geometryTypes:
raise TypeError(f"{typeSpec} not in {geometryTypes}")
return cls._mapping()[cls[typeSpec.name]]
XS_GEOM_TYPES = {
XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL),
XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB),
XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER),
XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX),
}
# This dictionary defines the valid set of inputs based on
# the geometry type within the ``XSModelingOptions``
_VALID_INPUTS_BY_GEOMETRY_TYPE = {
XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL): {
CONF_XSID,
CONF_GEOM,
CONF_BUCKLING,
CONF_DRIVER,
CONF_BLOCKTYPES,
CONF_BLOCK_REPRESENTATION,
CONF_EXTERNAL_FLUX_FILE_LOCATION,
CONF_COMPONENT_AVERAGING,
CONF_XS_EXECUTE_EXCLUSIVE,
CONF_XS_PRIORITY,
CONF_XS_MAX_ATOM_NUMBER,
CONF_XS_TEMP_ISOTOPE,
},
XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB): {
CONF_XSID,
CONF_GEOM,
CONF_MESH_PER_CM,
CONF_BLOCKTYPES,
CONF_BLOCK_REPRESENTATION,
CONF_EXTERNAL_FLUX_FILE_LOCATION,
CONF_COMPONENT_AVERAGING,
CONF_XS_EXECUTE_EXCLUSIVE,
CONF_XS_PRIORITY,
CONF_XS_MAX_ATOM_NUMBER,
CONF_MIN_DRIVER_DENSITY,
CONF_XS_TEMP_ISOTOPE,
},
XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER): {
CONF_XSID,
CONF_GEOM,
CONF_MERGE_INTO_CLAD,
CONF_MERGE_INTO_FUEL,
CONF_DRIVER,
CONF_HOMOGBLOCK,
CONF_INTERNAL_RINGS,
CONF_EXTERNAL_RINGS,
CONF_MESH_PER_CM,
CONF_BLOCKTYPES,
CONF_BLOCK_REPRESENTATION,
CONF_EXTERNAL_FLUX_FILE_LOCATION,
CONF_COMPONENT_AVERAGING,
CONF_XS_EXECUTE_EXCLUSIVE,
CONF_XS_PRIORITY,
CONF_XS_MAX_ATOM_NUMBER,
CONF_MIN_DRIVER_DENSITY,
CONF_DUCT_HETEROGENEOUS,
CONF_TRACE_ISOTOPE_THRESHOLD,
CONF_XS_TEMP_ISOTOPE,
},
XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX): {
CONF_XSID,
CONF_GEOM,
CONF_BUCKLING,
CONF_EXTERNAL_DRIVER,
CONF_DRIVER,
CONF_REACTION_DRIVER,
CONF_EXTERNAL_RINGS,
CONF_BLOCK_REPRESENTATION,
CONF_EXTERNAL_FLUX_FILE_LOCATION,
CONF_COMPONENT_AVERAGING,
CONF_XS_EXECUTE_EXCLUSIVE,
CONF_XS_PRIORITY,
CONF_XS_MAX_ATOM_NUMBER,
CONF_MIN_DRIVER_DENSITY,
CONF_XS_TEMP_ISOTOPE,
},
}
_SINGLE_XS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_GEOM): vol.All(str, vol.In(XS_GEOM_TYPES)),
vol.Optional(CONF_BLOCK_REPRESENTATION): vol.All(
str,
vol.In(
set(BLOCK_COLLECTIONS.keys()),
),
),
vol.Optional(CONF_DRIVER): str,
vol.Optional(CONF_BUCKLING): bool,
vol.Optional(CONF_REACTION_DRIVER): str,
vol.Optional(CONF_BLOCKTYPES): [str],
vol.Optional(CONF_HOMOGBLOCK): bool,
vol.Optional(CONF_EXTERNAL_DRIVER): bool,
vol.Optional(CONF_INTERNAL_RINGS): vol.Coerce(int),
vol.Optional(CONF_EXTERNAL_RINGS): vol.Coerce(int),
vol.Optional(CONF_MERGE_INTO_CLAD): [str],
vol.Optional(CONF_MERGE_INTO_FUEL): [str],
vol.Optional(CONF_XS_FILE_LOCATION): [str],
vol.Optional(CONF_EXTERNAL_FLUX_FILE_LOCATION): str,
vol.Optional(CONF_MESH_PER_CM): vol.Coerce(float),
vol.Optional(CONF_XS_EXECUTE_EXCLUSIVE): bool,
vol.Optional(CONF_XS_PRIORITY): vol.Coerce(float),
vol.Optional(CONF_XS_MAX_ATOM_NUMBER): vol.Coerce(int),
vol.Optional(CONF_MIN_DRIVER_DENSITY): vol.Coerce(float),
vol.Optional(CONF_COMPONENT_AVERAGING): bool,
vol.Optional(CONF_DUCT_HETEROGENEOUS): bool,
vol.Optional(CONF_TRACE_ISOTOPE_THRESHOLD): vol.Coerce(float),
vol.Optional(CONF_XS_TEMP_ISOTOPE): str,
vol.Optional(CONF_MEMORY_REQUIREMENT): vol.Coerce(float),
}
)
_XS_SCHEMA = vol.Schema({vol.All(str, vol.Length(min=1, max=2)): _SINGLE_XS_SCHEMA})
class XSSettings(dict):
"""
Container for holding multiple cross section settings based on their XSID.
This is intended to be stored as part of a case settings and to be
used for cross section modeling within a run.
Notes
-----
This is a specialized dictionary that functions in a similar manner as a
defaultdict where if a key (i.e., XSID) is missing then a default will
be set. If a missing key is being added before the ``setDefaults`` method
is called then this will produce an error.
This cannot just be a defaultdict because the creation of new cross
section settings are dependent on user settings.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self._blockRepresentation = None
self._validBlockTypes = None
def __repr__(self):
return f"<{self.__class__.__name__} with XS IDs {self.keys()}>"
def __getitem__(self, xsID):
"""
Return the stored settings of the same xs type and the lowest burnup group if they exist.
Notes
-----
1. If ``AA`` and ``AB`` exist, but ``AC`` is created, then the intended behavior
is that ``AC`` settings will be set to the settings in ``AA``.
2. If only ``YZ`` exists and ``YA`` is created, then the intended behavior is that
``YA`` settings will NOT be set to the settings in ``YZ``
3. Requirements for using the existing cross section settings:
a. The existing XS ID must match the current XS ID.
b. The current xs burnup group must be larger than the lowest burnup group for the
existing XS ID
c. If 3a. and 3b. are not met, then the default cross section settings will be
set for the current XS ID
"""
if xsID in self:
return dict.__getitem__(self, xsID)
# exact key not present so give lowest env group key, eg AA or BA as the source for
# settings since users do not typically provide all combinations of second chars explicitly
xsType = xsID[0]
envGroup = xsID[1]
existingXsOpts = [xsOpt for xsOpt in self.values() if xsOpt.xsType == xsType and xsOpt.envGroup < envGroup]
if not any(existingXsOpts):
return self._getDefault(xsID)
else:
return sorted(existingXsOpts, key=lambda xsOpt: xsOpt.envGroup)[0]
def setDefaults(self, blockRepresentation, validBlockTypes):
"""
Set defaults for current and future xsIDs based user settings.
This must be delayed after read-time since the settings affecting this may not be loaded yet and could still be
at their own defaults when this input is being processed. Thus, defaults are set at a later time.
Parameters
----------
blockRepresentation : str
Valid options are provided in ``CrossSectionGroupManager.BLOCK_COLLECTIONS``
validBlockTypes : list of str or bool
This configures which blocks (by their type) the cross section group manager will merge together to create a
representative block. If set to ``None`` or ``True`` then all block types in the XS ID will be considered. If
set to ``False`` then a default of ["fuel"] will be used. If set to a list of strings then the specific list
will be used. A typical input may be ["fuel"] to just consider the fuel blocks.
See Also
--------
armi.physics.neutronics.crossSectionGroupManager.CrossSectionGroupManager.interactBOL : calls this
"""
self._blockRepresentation = blockRepresentation
self._validBlockTypes = validBlockTypes
for _xsId, xsOpt in self.items():
xsOpt.setDefaults(blockRepresentation, validBlockTypes)
xsOpt.validate()
def _getDefault(self, xsID):
"""
Process the optional ``crossSectionControl`` setting.
This input allows users to override global defaults for specific cross section IDs (xsID).
To simplify downstream handling of the various XS controls, we build a full data structure here
that should fully define the settings for each individual cross section ID.
"""
# Only check since the state of the underlying cross section dictionary does not
# get broadcasted to worker nodes. This check is only relevant for the first time
# this is called and when called by the head node.
if context.MPI_RANK == 0:
if self._blockRepresentation is None:
raise ValueError(
f"The defaults of {self} have not been set. Call ``setDefaults`` first "
"before attempting to add a new XS ID."
)
xsOpt = XSModelingOptions(xsID, geometry=XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL))
xsOpt.setDefaults(self._blockRepresentation, self._validBlockTypes)
xsOpt.validate()
return xsOpt
class XSModelingOptions:
"""
Cross section modeling options for a particular XS ID.
Attributes
----------
xsID : str
Cross section ID that is two characters maximum (i.e., AA).
geometry: str
The geometry modeling approximation for regions of the core with
this assigned xsID. This is required if the ``xsFileLocation``
attribute is not provided. This cannot be set if the ``xsFileLocation``
is provided.
xsFileLocation: list of str or None
This should be a list of paths where the cross sections for this
xsID can be copied from. This is required if the ``geometry``
attribute is not provided. This cannot be set if the ``geometry``
is provided.
fluxFileLocation: str or None
This should be a path where a pre-calculated flux solution
for this xsID can be copied from. The ``geometry`` attribute
must be provided with this input.
validBlockTypes: str or None
This is a configuration option for how the cross section group manager
determines which blocks/regions to manage as part of the same collection
for the current xsID. If this is set to ``None`` then all blocks/regions
with the current xsID will be considered.
blockRepresentation : str
This is a configuration option for how the cross section group manager
will select how to create a representative block based on the collection
within the same xsID. See: ``crossSectionGroupManager.BLOCK_COLLECTIONS``.
driverID : str
This is a lattice physics configuration option used to determine which
representative block can be used as a "fixed source" driver for another
composition. This is particularly useful for non-fuel or highly subcritical
regions.
criticalBuckling : bool
This is a lattice physics configuration option used to enable or disable
the critical buckling search option.
nuclideReactionDriver : str
This is a lattice physics configuration option that is similar to the
``driverID``, but rather than applying the source from a specific
representative block, the neutron source is taken from a single
nuclides fission spectrum (i.e., U235). This is particularly useful
for configuring SERPENT 2 lattice physics calculations.
externalDriver : bool
This is a lattice physics configuration option that can be used
to determine if the fixed source problem is internally driven
or externally driven by the ``driverID`` region. Externally
driven means that the region will be placed on the outside of the
current xsID block/region. If this is False then the driver
region will be "inside" (i.e., an inner ring in a cylindrical
model).
useHomogenizedBlockComposition : bool
This is a lattice physics configuration option that is useful for
modeling spatially dependent problems (i.e., 1D/2D). If this is
True then the representative block for the current xsID will be
be a homogenized region. If this is False then the block will be
represented in the geometry type selected. This is mainly used for
1D cylindrical problems.
numInternalRings : int
This is a lattice physics configuration option that is used to
specify the number of grid-based rings for the representative block.
numExternalRings : int
This is a lattice physics configuration option that is used to
specify the number of grid-based rings for the driver block.
mergeIntoClad : list of str
This is a lattice physics configuration option that is a list of component
names to merge into a "clad" component. This is highly-design specific
and is sometimes used to merge a "gap" or low-density region into
a "clad" region to avoid numerical issues.
mergeIntoFuel : list of str
This is a lattice physics configuration option that is a list of component
names to merge into a "fuel" component. This is highly-design specific
and is sometimes used to merge a "gap" or low-density region into
a "fuel" region to avoid numerical issues.
meshSubdivisionsPerCm : float
This is a lattice physics configuration option that can be used to control
subregion meshing of the representative block in 1D problems.
xsExecuteExclusive : bool
The mpi task that results from this xsID will reserve a full processor and
no others will allocate to it. This is useful for time balancing when you
have one task that takes much longer than the others.
xsPriority: int
The priority of the mpi tasks that results from this xsID. Lower priority
will execute first. starting longer jobs first is generally more efficient.
xsMaxAtomNumber : int
The maximum atom number to model for infinite dilute isotopes in lattice physics.
This is used to avoid modeling isotopes with a large atomic number
(e.g., fission products) as a depletion product of an isotope with a much
smaller atomic number.
averageByComponent: bool
Controls whether the representative block averaging is performed on a
component-by-component basis or on the block as a whole. If True, the
resulting representative block will have component compositions that
largely reflect those of the underlying blocks in the collection. If
False, the number densities of some nuclides in the individual
components may not be reflective of those of the underlying components
due to the block number density "dehomogenization".
minDriverDensity: float
The minimum number density for nuclides included in driver material for a 1D
lattice physics model.
ductHeterogeneous : bool
This is a lattice physics configuration option used to enable a partially
heterogeneous approximation for a 1D cylindrical model. Everything inside of the
duct will be treated as homogeneous.
traceIsotopeThreshold : float
This is a lattice physics configuration option used to enable a separate 0D fuel
cross section calculation for trace fission products when using a 1D cross section
model. This can significantly reduce the memory and run time required for the 1D
model. The setting takes a float value that represents the number density cutoff
for isotopes to be considered "trace". If no value is provided, the default is 0.0.
xsTempIsotope: str
The isotope whose temperature is interrogated when placing a block in a temperature cross section group.
See `tempGroups`. "U238" is default since it tends to be dominant doppler isotope in most reactors.
requiredRAM: float
The amount of available memory needed to run this cross section model.
Notes
-----
Not all default attributes may be useful for your specific application and you may
require other types of configuration options. These are provided as examples since
the base ``latticePhysicsInterface`` does not implement models that use these. For
additional options, consider subclassing the base ``Setting`` object and using this
model as a template.
"""
def __init__(
self,
xsID,
geometry=None,
xsFileLocation=None,
fluxFileLocation=None,
validBlockTypes=None,
blockRepresentation=None,
driverID=None,
criticalBuckling=None,
nuclideReactionDriver=None,
externalDriver=None,
useHomogenizedBlockComposition=None,
numInternalRings=None,
numExternalRings=None,
mergeIntoClad=None,
mergeIntoFuel=None,
meshSubdivisionsPerCm=None,
xsExecuteExclusive=None,
xsPriority=None,
xsMaxAtomNumber=None,
averageByComponent=False,
minDriverDensity=0.0,
ductHeterogeneous=False,
traceIsotopeThreshold=0.0,
xsTempIsotope="U238",
requiredRAM=0.0,
):
self.xsID = xsID
self.geometry = geometry
self.xsFileLocation = xsFileLocation
self.validBlockTypes = validBlockTypes
self.blockRepresentation = blockRepresentation
# These are application specific, feel free use them
# in your own lattice physics plugin(s).
self.fluxFileLocation = fluxFileLocation
self.driverID = driverID
self.criticalBuckling = criticalBuckling
self.nuclideReactionDriver = nuclideReactionDriver
self.externalDriver = externalDriver
self.useHomogenizedBlockComposition = useHomogenizedBlockComposition
self.numInternalRings = numInternalRings
self.numExternalRings = numExternalRings
self.mergeIntoClad = mergeIntoClad
self.mergeIntoFuel = mergeIntoFuel
self.meshSubdivisionsPerCm = meshSubdivisionsPerCm
self.xsMaxAtomNumber = xsMaxAtomNumber
self.minDriverDensity = minDriverDensity
self.averageByComponent = averageByComponent
self.ductHeterogeneous = ductHeterogeneous
self.traceIsotopeThreshold = traceIsotopeThreshold
# these are related to execution
self.xsExecuteExclusive = xsExecuteExclusive
self.xsPriority = xsPriority
self.xsTempIsotope = xsTempIsotope
self.requiredRAM = requiredRAM
def __repr__(self):
if self.xsIsPregenerated:
suffix = f"Pregenerated: {self.xsIsPregenerated}"
else:
suffix = f"Geometry Model: {self.geometry}"
if self.fluxIsPregenerated:
suffix = f"{suffix}, External Flux Solution: {self.fluxFileLocation}"
return f"<{self.__class__.__name__}, XSID: {self.xsID}, {suffix}>"
def __iter__(self):
return iter(self.__dict__.items())
@property
def xsType(self):
"""Return the single-char cross section type indicator."""
return self.xsID[0]
@property
def envGroup(self):
"""Return the single-char burnup group indicator."""
return self.xsID[1]
@property
def xsIsPregenerated(self):
"""True if this points to a pre-generated XS file."""
return self.xsFileLocation is not None
@property
def fluxIsPregenerated(self):
"""True if this points to a pre-generated flux solution file."""
return self.fluxFileLocation is not None
def serialize(self):
"""Return as a dictionary without ``CONF_XSID`` and with ``None`` values excluded."""
doNotSerialize = [CONF_XSID]
return {key: val for key, val in self if key not in doNotSerialize and val is not None}
def validate(self):
"""
Performs validation checks on the inputs and provides warnings for option inconsistencies.
Raises
------
ValueError
When the mutually exclusive ``xsFileLocation`` and ``geometry`` attributes
are provided or when neither are provided.
"""
# Check for valid inputs when the file location is supplied.
if self.xsFileLocation:
if self.geometry is not None:
runLog.warning(
f"Either file location or geometry inputs in {self} should be given, but not both. "
"The file location setting will take precedence over the geometry inputs. "
"Remove one or the other in the `crossSectionSettings` input to fix this warning."
)
if self.xsFileLocation is None or self.fluxFileLocation is not None:
if self.geometry is None:
raise ValueError(f"{self} is missing a geometry input or a file location.")
invalids = []
if self.xsFileLocation is not None:
for var, val in self:
# Skip these attributes since they are valid options
# when the ``xsFileLocation`` attribute`` is set.
if var in [CONF_XSID, CONF_XS_FILE_LOCATION, CONF_BLOCK_REPRESENTATION]:
continue
if val is not None:
invalids.append((var, val))
if invalids:
runLog.debug(f"The following inputs in {self} are not valid when the file location is set:")
for var, val in invalids:
runLog.debug(f"\tAttribute: {var}, Value: {val}")
# Check for valid inputs when the geometry is supplied.
invalids = []
if self.geometry is not None:
validOptions = _VALID_INPUTS_BY_GEOMETRY_TYPE[self.geometry]
for var, val in self:
if var not in validOptions and val is not None:
invalids.append((var, val))
if invalids:
runLog.debug(f"The following inputs in {self} are not valid when `{self.geometry}` geometry type is set:")
for var, val in invalids:
runLog.debug(f"\tAttribute: {var}, Value: {val}")
runLog.debug(f"The valid options for the `{self.geometry}` geometry are: {validOptions}")
def setDefaults(self, blockRepresentation, validBlockTypes):
"""
This sets the defaults based on some recommended values based on the geometry type.
Parameters
----------
blockRepresentation : str
Valid options are provided in ``CrossSectionGroupManager.BLOCK_COLLECTIONS``
validBlockTypes : list of str or bool
This configures which blocks (by their type) the cross section group manager will merge together to create a
representative block. If set to ``None`` or ``True`` then all block types in the XS ID will be considered. If
set to ``False`` then a default of ["fuel"] will be used. If set to a list of strings then the specific list
will be used. A typical input may be ["fuel"] to just consider the fuel blocks.
Notes
-----
These defaults are application-specific and design specific. They are included to provide an example and are
tuned to fit the internal needs of TerraPower. Consider a separate implementation/subclass if you would like
different behavior.
"""
if type(validBlockTypes) is bool:
validBlockTypes = None if validBlockTypes else ["fuel"]
else:
validBlockTypes = validBlockTypes
defaults = {}
if self.xsIsPregenerated:
allowableBlockCollections = [
crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION,
crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION,
crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION,
]
defaults = {
CONF_XS_FILE_LOCATION: self.xsFileLocation,
CONF_BLOCK_REPRESENTATION: blockRepresentation,
}
elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL):
allowableBlockCollections = [
crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION,
crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION,
crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION,
]
bucklingSearch = not self.fluxIsPregenerated
defaults = {
CONF_GEOM: self.geometry,
CONF_BUCKLING: bucklingSearch,
CONF_DRIVER: "",
CONF_BLOCK_REPRESENTATION: blockRepresentation,
CONF_BLOCKTYPES: validBlockTypes,
CONF_EXTERNAL_FLUX_FILE_LOCATION: self.fluxFileLocation,
}
elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB):
allowableBlockCollections = [
crossSectionGroupManager.SLAB_COMPONENTS_BLOCK_COLLECTION,
]
defaults = {
CONF_GEOM: self.geometry,
CONF_MESH_PER_CM: 1.0,
CONF_BLOCK_REPRESENTATION: crossSectionGroupManager.SLAB_COMPONENTS_BLOCK_COLLECTION,
CONF_BLOCKTYPES: validBlockTypes,
}
elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER):
allowableBlockCollections = [crossSectionGroupManager.CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION]
defaults = {
CONF_GEOM: self.geometry,
CONF_DRIVER: "",
CONF_MERGE_INTO_CLAD: ["gap"],
CONF_MERGE_INTO_FUEL: [],
CONF_MESH_PER_CM: 1.0,
CONF_INTERNAL_RINGS: 0,
CONF_EXTERNAL_RINGS: 1,
CONF_HOMOGBLOCK: False,
CONF_BLOCK_REPRESENTATION: crossSectionGroupManager.CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION,
CONF_BLOCKTYPES: validBlockTypes,
CONF_DUCT_HETEROGENEOUS: False,
CONF_TRACE_ISOTOPE_THRESHOLD: 0.0,
}
elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX):
allowableBlockCollections = [
crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION,
crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION,
crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION,
]
defaults = {
CONF_GEOM: self.geometry,
CONF_BUCKLING: False,
CONF_EXTERNAL_DRIVER: True,
CONF_DRIVER: "",
CONF_REACTION_DRIVER: None,
CONF_EXTERNAL_RINGS: 1,
CONF_BLOCK_REPRESENTATION: blockRepresentation,
}
defaults[CONF_XS_EXECUTE_EXCLUSIVE] = False
defaults[CONF_XS_PRIORITY] = 5
defaults[CONF_COMPONENT_AVERAGING] = False
defaults[CONF_MEMORY_REQUIREMENT] = 0.0
for attrName, defaultValue in defaults.items():
currentValue = getattr(self, attrName)
if currentValue is None:
setattr(self, attrName, defaultValue)
else:
if attrName == CONF_BLOCK_REPRESENTATION:
if currentValue not in allowableBlockCollections:
raise ValueError(
f"Invalid block collection type `{currentValue}` assigned "
f"for {self.xsID}. Expected one of the "
f"following: {allowableBlockCollections}"
)
self.validate()
def serializeXSSettings(xsSettingsDict: Union[XSSettings, Dict]) -> Dict[str, Dict]:
"""
Return a serialized form of the ``XSSettings`` as a dictionary.
Notes
-----
Attributes that are not set (i.e., set to None) will be skipped.
"""
if not isinstance(xsSettingsDict, dict):
raise TypeError(f"Expected a dictionary for {xsSettingsDict}")
output = {}
for xsID, xsOpts in xsSettingsDict.items():
# Setting the value to an empty dictionary
# if it is set to a None or an empty
# dictionary.
if not xsOpts:
continue
if isinstance(xsOpts, XSModelingOptions):
xsIDVals = xsOpts.serialize()
elif isinstance(xsOpts, dict):
xsIDVals = {
config: confVal for config, confVal in xsOpts.items() if config != CONF_XSID and confVal is not None
}
else:
raise TypeError(
f"{xsOpts} was expected to be a ``dict`` or "
f"``XSModelingOptions`` options type but is type {type(xsOpts)}"
)
output[str(xsID)] = xsIDVals
return output
class XSSettingDef(Setting):
"""
Custom setting object to manage the cross section dictionary-like inputs.
Notes
-----
This uses the ``xsSettingsValidator`` schema to validate the inputs
and will automatically coerce the value into a ``XSSettings`` dictionary.
"""
def __init__(self, name):
description = "Data structure defining how cross sections are created"
label = "Cross section control"
default = XSSettings()
options = None
schema = xsSettingsValidator
enforcedOptions = False
subLabels = None
isEnvironment = False
oldNames = None
Setting.__init__(
self,
name,
default,
description,
label,
options,
schema,
enforcedOptions,
subLabels,
isEnvironment,
oldNames,
)
def dump(self):
"""Return a serialized version of the ``XSSetting`` object."""
return serializeXSSettings(self._value)
def xsSettingsValidator(xsSettingsDict: Dict[str, Dict]) -> XSSettings:
"""
Returns a ``XSSettings`` object if validation is successful.
Notes
-----
This provides two levels of checks. The first check is that the attributes
provided as user input contains the correct key/values and the values are
of the correct type. The second check uses the ``XSModelingOptions.validate``
method to check for input inconsistencies and provides warnings if there
are any issues.
"""
xsSettingsDict = serializeXSSettings(xsSettingsDict)
xsSettingsDict = _XS_SCHEMA(xsSettingsDict)
vals = XSSettings()
for xsID, inputParams in xsSettingsDict.items():
if not inputParams:
continue
xsOpt = XSModelingOptions(xsID, **inputParams)
xsOpt.validate()
vals[xsID] = xsOpt
return vals
================================================
FILE: armi/physics/neutronics/diffIsotxs.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to compare ISOTXS files."""
from armi import runLog
from armi.cli.entryPoint import EntryPoint
class CompareIsotxsLibraries(EntryPoint):
"""Compare two ISOTXS files."""
name = "diff-isotxs"
def addOptions(self):
self.parser.add_argument(
"reference",
help="Reference ISOTXS for comparison. Percent differences are given in relation to this file.",
)
self.parser.add_argument(
"comparisonFiles",
nargs="+",
help="ISOTXS files to compare to the reference",
)
self.parser.add_argument(
"--nuclidesNames",
"-n",
nargs="+",
help="For the interaction types identified only compare these nuclides.",
)
self.parser.add_argument(
"--interactions",
"-i",
nargs="+",
help="Compare the cross sections for these interactins and specified nuclides.",
)
self.parser.add_argument(
"--fluxFile",
"-f",
help="Mcc3 file containing flux_bg (broad group flux) for single-group comparison.",
)
def invoke(self):
from armi.nuclearDataIO import isotxs, xsLibraries
runLog.setVerbosity(0)
refIsotxs = isotxs.readBinary(self.args.reference)
for fname in self.args.comparisonFiles:
cmpIsotxs = isotxs.readBinary(fname)
xsLibraries.compare(refIsotxs, cmpIsotxs)
================================================
FILE: armi/physics/neutronics/energyGroups.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Energy group structures for multigroup neutronics calculations."""
import copy
import itertools
import math
import numpy as np
from armi import runLog
from armi.physics.neutronics.const import (
FAST_FLUX_THRESHOLD_EV,
HIGH_ENERGY_EV,
MAXIMUM_XS_LIBRARY_ENERGY,
ULTRA_FINE_GROUP_LETHARGY_WIDTH,
)
from armi.utils.mathematics import findNearestValue
def getFastFluxGroupCutoff(eGrpStruc):
"""
Given a constant "fast" energy threshold, return which ARMI energy group
index contains this threshold.
.. impl:: Return the energy group index which contains a given energy threshold.
:id: I_ARMI_EG_FE
:implements: R_ARMI_EG_FE
This function returns the energy group within a given group structure
that contains the fast flux threshold energy. The threshold energy is
imported from the :py:mod:`constants ` in
the neutronics module, where it is defined as 100 keV. This is a
standard definition for fast flux. This function also calculates and
returns the fraction of the threshold energy group that is above the 100
keV threshold.
"""
gThres = -1
for g, eV in enumerate(eGrpStruc):
if eV < FAST_FLUX_THRESHOLD_EV:
gThres = g
break
dE = eGrpStruc[gThres - 1] - eGrpStruc[gThres] # eV
fastFluxFracInG = (eGrpStruc[gThres - 1] - FAST_FLUX_THRESHOLD_EV) / dE
return gThres - 1, fastFluxFracInG
def _flatten(*numbers):
result = []
for item in numbers:
if isinstance(item, int):
result.append(item)
else:
result.extend(item)
return result
def _create_anl_energies_with_group_lethargies(*group_lethargies):
anl_energy_max = MAXIMUM_XS_LIBRARY_ENERGY
en = anl_energy_max
energies = []
for ee in _flatten(*group_lethargies):
energies.append(en)
en *= math.e ** (-ee * ULTRA_FINE_GROUP_LETHARGY_WIDTH)
return energies
def getGroupStructure(name):
"""
Return descending neutron energy group upper bounds in eV for a given
structure name.
.. impl:: Provide the neutron energy group bounds for a given group structure.
:id: I_ARMI_EG_NE
:implements: R_ARMI_EG_NE
There are several built-in group structures that are defined in this
module, which are stored in a dictionary. This function takes a group
structure name as an input parameter, which it uses as a key for the
group structure dictionary. If the group structure name is valid, it
returns a copy of the energy group structure resulting from the
dictionary lookup. Otherwise, it throws an error.
Notes
-----
Copy of the group structure is return so that modifications of the energy
bounds does not propagate back to the `GROUP_STRUCTURE` dictionary.
"""
try:
return copy.copy(GROUP_STRUCTURE[name])
except KeyError as ke:
runLog.error(
'Could not find groupStructure with the name "{}".\nChoose one of: {}'.format(
name, ", ".join(GROUP_STRUCTURE.keys())
)
)
raise ke
def getGroupStructureType(neutronEnergyBoundsInEv):
"""Return neutron energy group structure name for a given set of neutron energy group bounds in eV."""
neutronEnergyBoundsInEv = np.array(neutronEnergyBoundsInEv)
for groupStructureType in GROUP_STRUCTURE:
refNeutronEnergyBoundsInEv = np.array(getGroupStructure(groupStructureType))
if len(refNeutronEnergyBoundsInEv) != len(neutronEnergyBoundsInEv):
continue
if np.allclose(refNeutronEnergyBoundsInEv, neutronEnergyBoundsInEv, 1e-5):
return groupStructureType
raise ValueError(
"Neutron energy group structure type does not exist for the given neutron energy bounds: {}".format(
neutronEnergyBoundsInEv
)
)
GROUP_STRUCTURE = {}
"""
Energy groups for use in multigroup neutronics.
Values are the upper bound of each energy in eV from highest energy to lowest
(because neutrons typically downscatter...)
:meta hide-value:
"""
GROUP_STRUCTURE["2"] = [HIGH_ENERGY_EV, 6.25e-01]
# for calculating fast flux
GROUP_STRUCTURE["FastFlux"] = [HIGH_ENERGY_EV, FAST_FLUX_THRESHOLD_EV]
# Nuclear Reactor Engineering: Reactor Systems Engineering, Vol. 1
GROUP_STRUCTURE["4gGlasstoneSesonske"] = [HIGH_ENERGY_EV, 5.00e04, 5.00e02, 6.25e-01]
# http://serpent.vtt.fi/mediawiki/index.php/CASMO_4-group_structure
GROUP_STRUCTURE["CASMO4"] = [HIGH_ENERGY_EV, 8.21e05, 5.53e03, 6.25e-01]
GROUP_STRUCTURE["CASMO12"] = [
HIGH_ENERGY_EV,
2.23e06,
8.21e05,
5.53e03,
4.81e01,
4.00e00,
6.25e-01,
3.50e-01,
2.80e-01,
1.40e-01,
5.80e-02,
3.00e-02,
]
# For typically for use with MCNP will need conversion to MeV, and ordering from low to high.
# reference: https://www.sciencedirect.com/science/article/pii/S0149197022003778
# reference: https://mcnp.lanl.gov/pdf_files/TechReport_2017_LANL_LA-UR-17-29981_WernerArmstrongEtAl.pdf
GROUP_STRUCTURE["CINDER63"] = [
2.5000e7,
2.0000e7,
1.6905e7,
1.4918e7,
1.0000e7,
6.0650e6,
4.9658e6,
3.6788e6,
2.8651e6,
2.2313e6,
1.7377e6,
1.3534e6,
1.1080e6,
8.2085e5,
6.3928e5,
4.9790e5,
3.8870e5,
3.0200e5,
1.8320e5,
1.1110e5,
6.7380e4,
4.0870e4,
2.5540e4,
1.9890e4,
1.5030e4,
9.1190e3,
5.5310e3,
3.3550e3,
2.8400e3,
2.4040e3,
2.0350e3,
1.2340e3,
7.4850e2,
4.5400e2,
2.7540e2,
1.6700e2,
1.0130e2,
6.1440e1,
3.7270e1,
2.2600e1,
1.3710e1,
8.3150,
5.0430,
3.0590,
1.8550,
1.1250,
6.8300e-1,
4.1400e-1,
2.5100e-1,
1.5200e-1,
1.0000e-1,
8.0000e-2,
6.7000e-2,
5.8000e-2,
5.0000e-2,
4.2000e-2,
3.5000e-2,
3.0000e-2,
2.5000e-2,
2.0000e-2,
1.5000e-2,
1.0000e-2,
5.0000e-3,
]
# Group structures below here are derived from Appendix E in
# https://www.osti.gov/biblio/1483949-mc2-multigroup-cross-section-generation-code-fast-reactor-analysis-nuclear
GROUP_STRUCTURE["ANL9"] = _create_anl_energies_with_group_lethargies(222, 120, itertools.repeat(180, 5), 540, 300)
GROUP_STRUCTURE["ANL33"] = _create_anl_energies_with_group_lethargies(42, itertools.repeat(60, 28), 90, 240, 29, 1)
GROUP_STRUCTURE["ANL70"] = _create_anl_energies_with_group_lethargies(42, itertools.repeat(30, 67), 29, 1)
# fmt: off
GROUP_STRUCTURE["ANL116"] = _create_anl_energies_with_group_lethargies(
15*[6] + [3] + 2*[6] + [3] + [12] + 3*[6] + 3*[12] + 2*[6] + 2*[12] + [4] + [6] + [2] +
[12] + 2*[6] + [12] + 2*[6] +2*[12] + [6] + [12] + 2*[6] + 6*[12] + [6] + 4*[12] + 4*[6] +
5*[12] + [6] + 3*[12] + [6] + 2*[30] + 2*[15] + [30] + 4*[15] + [18] + [12] + 5*[30] +
[24] + [12] + [24] + [19] + [11] + [18] + [24] + 3*[18] + 2*[12] + 14*[60] + 2*[30] + [29] + [1]
)
GROUP_STRUCTURE["ANL230"] = _create_anl_energies_with_group_lethargies(
[
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 3,
6, 6, 6, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 6, 6,
6, 6, 2, 2, 1, 1, 2, 2, 2, 6, 6, 3, 3, 3, 3, 6, 6, 3, 3,
3, 3, 6, 6, 6, 6, 3, 3, 6, 6, 6, 3, 2, 1, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6,
3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3,
6, 6, 6, 6, 6, 6, 6, 15, 15, 15, 15, 9, 6, 6, 9, 15, 15, 15, 3,
3, 9, 15, 9, 6, 3, 3, 9, 3, 12, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 12, 12, 6, 6, 12, 12, 12, 7, 5, 6, 6, 12, 12, 12, 12, 6, 6, 12,
12, 6, 6, 6, 6, 6, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 6, 24, 10, 20,
29, 1,
]
)
# Reactor agnostic. Similar to ANL1041 but with 6 UFGs grouped together.
# More likely to not error out on memory than 703
GROUP_STRUCTURE["348"] = _create_anl_energies_with_group_lethargies(itertools.repeat(6, 346), 5, 1)
# Note that at one point the MC2 manual was inconsistent with the code itself
GROUP_STRUCTURE["ANL703"] = _create_anl_energies_with_group_lethargies(
[
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 2, 2,
2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,
1,
]
)
# fmt: on
GROUP_STRUCTURE["ANL1041"] = _create_anl_energies_with_group_lethargies(itertools.repeat(2, 1041))
GROUP_STRUCTURE["ANL2082"] = _create_anl_energies_with_group_lethargies(itertools.repeat(1, 2082))
def _create_multigroup_structures_on_finegroup_energies(multigroup_energy_bounds, finegroup_energy_bounds):
"""Set energy group bounds to the nearest ultra-fine group boundaries."""
modifiedEnergyBounds = set()
modifiedEnergyBounds.add(max(finegroup_energy_bounds))
for energyBound in multigroup_energy_bounds[1:]:
modifiedEnergyBounds.add(findNearestValue(finegroup_energy_bounds, energyBound))
return sorted(modifiedEnergyBounds, reverse=True)
def _create_anl_energies_with_group_energies(group_energy_bounds):
"""Set energy group bounds to the nearest ultra-fine group boundaries."""
ufgEnergies = _create_anl_energies_with_group_lethargies(itertools.repeat(1, 2082))
return _create_multigroup_structures_on_finegroup_energies(group_energy_bounds, ufgEnergies)
"""
Taken from Section A3.1 SHEM-361 in
Ngeleka, Tholakele Prisca. "Examination and improvement of the SHEM energy
group structure for HTR and deep burn HTR design and analysis." (2012).
"""
GROUP_STRUCTURE["SHEM361"] = [
19640300,
14918200,
13840300,
11618300,
9999990,
9048360,
8187300,
7408170,
6703190,
6065300,
4965850,
4065690,
3328710,
2725310,
2231300,
1901390,
1636540,
1405770,
1336940,
1286960,
1162050,
1051150,
951119,
860006,
706511,
578443,
494002,
456021,
412501,
383884,
320646,
267826,
230014,
195008,
164999,
140000,
122773,
115624,
94664.5,
82297.4,
67379.4,
55165.6,
49915.9,
40867.7,
36978.6,
33459.6,
29281,
27394.4,
26100.1,
24999.1,
22699.4,
18584.7,
16200.5,
14899.7,
13603.7,
11137.7,
9118.81,
7465.85,
6112.52,
5004.51,
4097.35,
3481.07,
2996.18,
2700.24,
2397.29,
2084.1,
1811.83,
1586.2,
1343.58,
1134.67,
1064.32,
982.494,
909.681,
832.218,
748.517,
677.287,
646.837,
612.834,
600.099,
592.941,
577.146,
539.204,
501.746,
453.999,
419.094,
390.76,
371.703,
353.575,
335.323,
319.928,
295.922,
288.327,
284.888,
276.468,
268.297,
256.748,
241.796,
235.59,
224.325,
212.108,
200.958,
195.996,
193.078,
190.204,
188.877,
187.559,
186.251,
184.952,
183.295,
175.229,
167.519,
163.056,
154.176,
146.657,
139.504,
132.701,
126.229,
120.554,
117.577,
116.524,
115.48,
112.854,
110.288,
105.646,
103.038,
102.115,
101.605,
101.098,
100.594,
97.3287,
93.3256,
88.7741,
83.9393,
79.3679,
76.3322,
73.5595,
71.8869,
69.0682,
66.8261,
66.4929,
66.1612,
65.8312,
65.5029,
65.046,
64.5923,
63.6306,
62.3083,
59.925,
57.0595,
54.06,
52.9895,
51.7847,
49.2591,
47.5173,
46.2053,
45.2904,
44.1721,
43.1246,
42.1441,
41.227,
39.7295,
38.7874,
37.7919,
37.3038,
36.8588,
36.4191,
36.0568,
35.698,
34.5392,
33.0855,
31.693,
27.8852,
24.6578,
22.5356,
22.3788,
22.1557,
22.0011,
21.7018,
21.4859,
21.336,
21.2296,
21.1448,
21.0604,
20.9763,
20.7676,
20.6847,
20.6021,
20.5199,
20.4175,
20.2751,
20.0734,
19.5974,
19.3927,
19.1997,
19.0848,
17.9591,
17.759,
17.5648,
17.4457,
16.8305,
16.5501,
16.0498,
15.7792,
14.8662,
14.7301,
14.5952,
14.4702,
14.2505,
14.0496,
13.546,
13.3297,
12.6,
12.4721,
12.3086,
12.1302,
11.9795,
11.8153,
11.7094,
11.5894,
11.2694,
11.0529,
10.8038,
10.5793,
9.50002,
9.14031,
8.97995,
8.80038,
8.67369,
8.52407,
8.30032,
8.13027,
7.97008,
7.83965,
7.73994,
7.60035,
7.38015,
7.13987,
6.99429,
6.91778,
6.87021,
6.83526,
6.8107,
6.79165,
6.77605,
6.75981,
6.74225,
6.71668,
6.63126,
6.60611,
6.58829,
6.57184,
6.55609,
6.53907,
6.51492,
6.48178,
6.43206,
6.35978,
6.28016,
6.16011,
6.05991,
5.96014,
5.80021,
5.72015,
5.61979,
5.53004,
5.48817,
5.41025,
5.38003,
5.32011,
5.21008,
5.10997,
4.93323,
4.76785,
4.4198,
4.30981,
4.21983,
4,
3.88217,
3.71209,
3.54307,
3.14211,
2.88405,
2.77512,
2.74092,
2.7199,
2.70012,
2.64004,
2.62005,
2.59009,
2.55,
2.46994,
2.33006,
2.27299,
2.21709,
2.15695,
2.0701,
1.98992,
1.90008,
1.77997,
1.66895,
1.58803,
1.51998,
1.44397,
1.41001,
1.38098,
1.33095,
1.29304,
1.25094,
1.21397,
1.16999,
1.14797,
1.12997,
1.11605,
1.10395,
1.09198,
1.07799,
1.03499,
1.02101,
1.00904,
0.996501,
0.981959,
0.96396,
0.944022,
0.919978,
0.880024,
0.800371,
0.719999,
0.624999,
0.594993,
0.55499,
0.520011,
0.475017,
0.431579,
0.390001,
0.352994,
0.325008,
0.305012,
0.279989,
0.254997,
0.231192,
0.20961,
0.190005,
0.161895,
0.137999,
0.119995,
0.104298,
0.0897968,
0.0764969,
0.0651999,
0.0554982,
0.0473019,
0.0402999,
0.0343998,
0.0292989,
0.0249394,
0.0200104,
0.01483,
0.0104505,
0.00714526,
0.00455602,
0.0024999,
]
# Energy bounds of ARMI33 and ARMI45 are modified to the nearest ultra-fine group boundaries
GROUP_STRUCTURE["ARMI33"] = _create_anl_energies_with_group_energies(
[
1.4190e07,
1.0000e07,
6.0650e06,
3.6780e06,
2.2313e06,
1.3530e06,
8.2080e05,
4.9787e05,
3.0190e05,
1.8310e05,
1.1109e05,
6.7370e04,
4.0860e04,
2.4788e04,
1.5030e04,
9.1180e03,
5.5308e03,
3.3540e03,
2.0340e03,
1.2341e03,
7.4850e02,
4.5390e02,
3.0432e02,
1.4860e02,
9.1660e01,
6.7904e01,
4.0160e01,
2.2600e01,
1.3709e01,
8.3150e00,
4.0000e00,
5.4000e-01,
4.1400e-01,
]
)
# Energy bounds of SHEM33_361 is ANL33 modified to the nearest SHEM361 fine group boundaries
GROUP_STRUCTURE["SHEM33_361"] = _create_multigroup_structures_on_finegroup_energies(
GROUP_STRUCTURE["ANL33"], GROUP_STRUCTURE["SHEM361"]
)
GROUP_STRUCTURE["ARMI45"] = _create_anl_energies_with_group_energies(
[
1.419e07,
1.000e07,
6.065e06,
4.966e06,
3.679e06,
2.865e06,
2.231e06,
1.738e06,
1.353e06,
1.108e06,
8.209e05,
6.393e05,
4.979e05,
3.887e05,
3.020e05,
1.832e05,
1.111e05,
6.738e04,
4.087e04,
2.554e04,
1.989e04,
1.503e04,
9.119e03,
5.531e03,
3.355e03,
2.840e03,
2.404e03,
2.035e03,
1.234e03,
7.485e02,
4.540e02,
2.754e02,
1.670e02,
1.013e02,
6.144e01,
3.727e01,
2.260e01,
1.371e01,
8.315e00,
5.043e00,
3.059e00,
1.855e00,
1.125e00,
6.830e-01,
4.140e-01,
]
)
"""
Taken from Table 5.1 of "GAMSOR: Gamma Source Preparation and DIF3D Flux Solution",
ANL/NE-16/50 Rev 2.0, M.A. Smith, C.H. Lee, R.N. Hill, Aug 30 2022.
"""
GROUP_STRUCTURE["ANL21G"] = [
2.0e7,
1.0e7,
8.0e6,
7.0e6,
6.0e6,
5.0e6,
4.0e6,
3.0e6,
2.5e6,
2.0e6,
1.5e6,
1.0e6,
7.0e5,
4.5e5,
3.0e5,
1.5e5,
1.0e5,
7.5e4,
4.5e4,
3.0e4,
2.0e4,
]
"""
Taken from Table 5.2 of "GAMSOR: Gamma Source Preparation and DIF3D Flux Solution",
ANL/NE-16/50 Rev 2.0, M.A. Smith, C.H. Lee, R.N. Hill, Aug 30 2022.
"""
GROUP_STRUCTURE["ANL94G"] = [
2.000e07,
1.400e07,
1.200e07,
1.100e07,
1.060e07,
1.000e07,
9.500e06,
9.000e06,
8.500e06,
8.000e06,
7.750e06,
7.500e06,
7.250e06,
7.000e06,
6.750e06,
6.500e06,
6.250e06,
6.000e06,
5.750e06,
5.500e06,
5.400e06,
5.200e06,
5.000e06,
4.700e06,
4.500e06,
4.400e06,
4.200e06,
4.000e06,
3.900e06,
3.800e06,
3.650e06,
3.500e06,
3.333e06,
3.166e06,
3.000e06,
2.833e06,
2.666e06,
2.500e06,
2.333e06,
2.166e06,
2.000e06,
1.875e06,
1.750e06,
1.660e06,
1.600e06,
1.500e06,
1.420e06,
1.330e06,
1.250e06,
1.200e06,
1.125e06,
1.000e06,
9.000e05,
8.650e05,
8.250e05,
8.000e05,
7.500e05,
7.000e05,
6.750e05,
6.500e05,
6.250e05,
6.000e05,
5.750e05,
5.500e05,
5.250e05,
5.000e05,
4.500e05,
4.250e05,
4.000e05,
3.750e05,
3.500e05,
3.250e05,
3.000e05,
2.600e05,
2.200e05,
1.900e05,
1.600e05,
1.500e05,
1.400e05,
1.200e05,
1.000e05,
9.000e04,
8.000e04,
7.500e04,
6.500e04,
6.000e04,
5.500e04,
4.500e04,
4.000e04,
3.500e04,
3.000e04,
2.000e04,
1.500e04,
1.000e04,
]
================================================
FILE: armi/physics/neutronics/fissionProductModel/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Fission product model subpackage."""
import os
from armi.context import RES
REFERENCE_LUMPED_FISSION_PRODUCT_FILE = os.path.join(RES, "referenceFissionProducts.dat")
================================================
FILE: armi/physics/neutronics/fissionProductModel/fissionProductModel.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the implementation of the ``FissionProductModel`` interface.
This ``FissionProductModel`` class implements the management of fission products within
the reactor core and can be extended to support more general applications. Currently, the
fission product model supports explicit modeling of fission products in each of the
blocks/components, independent management of lumped fission products for each
blocks/components within the core, or global management of lumped fission products
where the fission products between all blocks/components are shared and are modified
together.
Within the framework, there is a coupling between the management of the fission products
through this model to neutronics evaluations of flux and depletion calculations.
When using a Monte Carlo solver, such as MCNP (i.e., there is an interface that is attached
to the operator that has a name of "mcnp"), the fission products will always be treated
independently and fission products (either explicit or lumped) will be added to all
blocks/components in the core. The reason for this is that Monte Carlo solvers, like MCNP,
may implement their own coupling between flux and depletion evaluations and having the
initialization of these fission products in each block/component independently will
allow that solver to manage the inventory over time.
When determining which fission product model to use (either explicit or lumped) it is
important to consider which cross section data is available to the flux and/or depletion
solvers, and what level of fidelity is required for the analysis. This is where decisions
as a developer/user need to be made, and the implementation of this specific model may
not be, in general, accurate for any reactor system. It is dependent on which plugins
are implemented and the requirements of the individual flux/depletion solver.
Lumped fission products are generally useful for fast reactor applications, especially
in fuel cycle calculations or scoping evaluations where the tracking of the detailed
nuclide inventory would not have substantial impacts on core reactivity predictions.
This is typically done by collapsing all fission products into lumped nuclides, like
``LFP35``, ``LFP38``, ``LFP39``, ``LFP40``, and ``LFP41``. This is the implementation
in the framework, which is discussed a bit more in the ``fpModel`` setting. These
lumped fission products are separated into different bins that represent the fission
product yields from U-235, U-238, Pu-239, Pu-240, and Pu-241/Am-241, respectively. The
exact binning of which fission events from which target nuclides is specified by the
``burn-chain.yaml`` file, which can be modified by a user/developer. When selecting this
modeling option, the blocks/components will have these ``LFP`` nuclides in the number
density dictionaries. The key thing here is that these lumped nuclides do not exist
in nature and therefore do not have nuclear data directly available in cross section
evaluations, like ENDF/B. If the user wishes to consider these nuclides in the flux/depletion
evaluations, then cross sections for these ``LFP`` nuclides will need to be prepared. Generally
speaking, the the ``crossSectionGroupManager`` and the ``latticePhysicsInterface`` could be
used to implement this for cross section generation codes, like NJOY, CASMO, MC2-3, Serpent,
etc.
.. warning::
The lumped fission product model and the ``burn-chain.yaml`` data may not be directly
applicable to light water reactor systems, especially if there are strong reactivity
impacts with fission products like ``Xe`` and ``Sm`` that need to be tracked independently.
A user/developer may update the ``referenceFissionProducts.dat`` data file to exclude
these important nuclides from the lumped fission product models if need be, but this
would also require updating the ``burn-chain.yaml`` file as well as updating the
``nuclideFlags`` specification within the reactor blueprints input.
A further simplified option for lumped fission product treatment that is available is to
treat all fission products explicitly as ``Mo-99``. This is not guaranteed to be an accurate
treatment of the fission products from a reactivity/depletion perspective, but it is
available for quick scoping evaluations and model building.
Finally, the explicit fission product modeling aims to include as many nuclides on the
blocks/components as the user wishes to consider, but the nuclides that are modeled
must be compatible with the plugins that are implemented for the application. When using this
option, the user should look to set the ``fpModelLibrary`` setting.
- If this setting is not set, then it is expected that the user will need to manually add
all nuclides to the ``nuclideFlags`` section of the reactor core blueprints.
- If the ``fpModelLibrary`` is selected then this will automatically add to the
``nuclideFlags`` input using :py:func:`isotopicOptions.autoUpdateNuclideFlags`
and this class will initialize all added nuclides to have zero number densities.
.. warning::
The explicit fission product model is being implemented with the vision of using
generating multi-group cross sections for nuclides that are added with the
``fpModelLibrary`` setting with follow-on depletion calculations that will be managed by
a detailed depletion solver, like ORIGEN. There are many caveats to how this model
is initialized and may not be an out-of-the-box general solution.
"""
from armi import interfaces, runLog
from armi.physics.neutronics.fissionProductModel import lumpedFissionProduct
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FP_MODEL,
CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT,
)
from armi.reactor.flags import Flags
NUM_FISSION_PRODUCTS_PER_LFP = 2.0
ORDER = interfaces.STACK_ORDER.AFTER + interfaces.STACK_ORDER.PREPROCESSING
def describeInterfaces(_cs):
"""Function for exposing interface(s) to other code."""
return (FissionProductModel, {})
class FissionProductModel(interfaces.Interface):
"""Coordinates the fission product model on the reactor."""
name = "fissionProducts"
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
self._globalLFPs = lumpedFissionProduct.lumpedFissionProductFactory(self.cs)
@property
def _explicitFissionProducts(self):
return self.cs[CONF_FP_MODEL] == "explicitFissionProducts"
@property
def _useGlobalLFPs(self):
return not (self.cs[CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT] or self._explicitFissionProducts)
@property
def _fissionProductBlockType(self):
"""
Set the block type that the fission products will be applied to.
Notes
-----
Some Monte Carlo codes require all nuclides to be consistent in all
materials when assemblies are shuffled. This requires that fission
products be consistent across all blocks, even if fission products are
not generated when the block is depleted.
"""
return None if self.getInterface("mcnp") is not None else Flags.FUEL
def interactBOL(self):
interfaces.Interface.interactBOL(self)
if self._explicitFissionProducts:
self.setAllComponentFissionProducts()
else:
self.setAllBlockLFPs()
def setAllComponentFissionProducts(self):
"""
Initialize all nuclides for each ``DEPLETABLE`` component in the core.
Notes
-----
This should be called when explicit fission product modeling is enabled to
ensure that all isotopes are initialized on the depletable components within
the reactor data model so that there is some density as a starting point.
When explicit fission products are enabled and the user has not already included
all fission products in the blueprints (in ``nuclideFlags``), the ``fpModelLibrary`` setting is used
to autofill all the nuclides in a given library into the ``blueprints.allNuclidesInProblem``
list. All nuclides that were not manually initialized by the user are added to
the ``DEPLETABLE`` components throughout every block in the core.
The ``DEPLETABLE`` flag is based on the user adding this explicitly in the blueprints,
or is based on the user setting a nuclide to ``burn: true`` in the blueprint ``nuclideFlags``.
See Also
--------
armi.reactor.blueprints.isotopicOptions.autoUpdateNuclideFlags
armi.reactor.blueprints.isotopicOptions.getAllNuclideBasesByLibrary
"""
for b in self.r.core.getBlocks(includeAll=True):
b.setLumpedFissionProducts(None)
for c in b.getComponents(Flags.DEPLETABLE):
# Add all isotopes in problem at 0.0 density
updatedNDens = c.getNumberDensities()
# self.r.blueprints.allNuclidesInProblem contains ~everything in ENDF if _explicitFissionProducts
for nuc in self.r.blueprints.allNuclidesInProblem:
if nuc in updatedNDens:
continue
updatedNDens[nuc] = 0.0
c.updateNumberDensities(updatedNDens)
def setAllBlockLFPs(self):
"""
Sets all the block lumped fission products attributes.
See Also
--------
armi.reactor.components.Component.setLumpedFissionProducts
"""
for b in self.r.core.getBlocks(self._fissionProductBlockType, includeAll=True):
if self._useGlobalLFPs:
b.setLumpedFissionProducts(self.getGlobalLumpedFissionProducts())
else:
independentLFPs = self.getGlobalLumpedFissionProducts().duplicate()
b.setLumpedFissionProducts(independentLFPs)
def getGlobalLumpedFissionProducts(self):
r"""
Lookup the detailed fission product object associated with a xsType and burnup group.
See Also
--------
armi.physics.neutronics.isotopicDepletion.depletion.DepletionInterface.buildFissionProducts
armi.reactor.blocks.Block.getLumpedFissionProductCollection : same thing, but block-level compatible. Use this
"""
return self._globalLFPs
def setGlobalLumpedFissionProducts(self, lfps):
r"""
Lookup the detailed fission product object associated with a xsType and burnup group.
See Also
--------
armi.reactor.blocks.Block.getLumpedFissionProductCollection : same thing, but block-level compatible. Use this
"""
self._globalLFPs = lfps
def interactBOC(self, cycle=None):
if self._explicitFissionProducts:
self.setAllComponentFissionProducts()
else:
self.setAllBlockLFPs()
def interactDistributeState(self):
if self._explicitFissionProducts:
self.setAllComponentFissionProducts()
else:
self.setAllBlockLFPs()
def getAllFissionProductNames(self):
"""
Find all fission product names from the lumped fission product collection.
Notes
-----
This considers all LFP collections, whether they are global, block-level,
or a mix of these.
"""
runLog.debug("Gathering all possible fission products that are modeled.")
fissionProductNames = []
lfpCollections = []
# get all possible lfp collections (global + block-level)
for b in self.r.core.getBlocks(Flags.FUEL, includeAll=True):
lfpCollection = b.getLumpedFissionProductCollection()
if lfpCollection and lfpCollection not in lfpCollections:
lfpCollections.append(lfpCollection)
# get all possible FP names in each LFP collection
for lfpCollection in lfpCollections:
for fpName in lfpCollection.getAllFissionProductNames():
if fpName not in fissionProductNames:
fissionProductNames.append(fpName)
return fissionProductNames
def removeFissionGasesFromBlocks(self):
"""
Return False to indicate that no fission products are being removed.
Notes
-----
This should be implemented on an application-specific model.
"""
runLog.warning(f"Fission gas removal is not implemented in {self}")
return False
================================================
FILE: armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings related to the fission product model."""
from armi.physics.neutronics import fissionProductModel
from armi.settings import setting
CONF_FP_MODEL = "fpModel"
CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT = "makeAllBlockLFPsIndependent"
CONF_LFP_COMPOSITION_FILE_PATH = "lfpCompositionFilePath"
CONF_FISSION_PRODUCT_LIBRARY_NAME = "fpModelLibrary"
def defineSettings():
"""Define settings for the plugin."""
settings = [
setting.Setting(
CONF_FP_MODEL,
default="infinitelyDilute",
label="Fission Product Model",
description=(
"This setting is used to determine how fission products are treated in an "
"analysis. By choosing `noFissionProducts`, no fission products will be added. By "
"selecting, `infinitelyDilute`, lumped fission products will be initialized to a "
"very small number on the blocks/components that require them. By choosing `MO99`, "
"the fission products will be represented only by Mo-99. This is a simplistic "
"assumption that is commonly used by fast reactor analyses in scoping calculations "
"and is not necessarily a great assumption for depletion evaluations. Finally, by "
"choosing `explicitFissionProducts` the fission products will be added explicitly "
"to the blocks/components that are depletable. This is useful for detailed tracking "
"of fission products."
),
options=[
"noFissionProducts",
"infinitelyDilute",
"MO99",
"explicitFissionProducts",
],
),
setting.Setting(
CONF_FISSION_PRODUCT_LIBRARY_NAME,
default="",
label="Fission Product Library",
description=(
f"This setting should be used when `{CONF_FP_MODEL}` is set to "
"`explicitFissionProducts`. It is used in conjunction with any nuclideFlags "
"defined in the blueprints to configure all the nuclides that are modeled within "
"the core. Selecting any library option will add all nuclides from the selected "
"library to the model so that analysts do not need to change their inputs when "
"modifying the fission product treatment for calculations."
),
options=[
"",
"MC2-3",
],
),
setting.Setting(
CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT,
default=False,
label="Use Independent LFPs",
description=(
"Flag to make all blocks have independent lumped fission products. Note that this "
"is forced to be True when the `explicitFissionProducts` modeling option is "
"selected or an interface named `mcnp` is on registered on the operator stack."
),
),
setting.Setting(
CONF_LFP_COMPOSITION_FILE_PATH,
default=fissionProductModel.REFERENCE_LUMPED_FISSION_PRODUCT_FILE,
label="LFP Definition File",
description=(
"Path to the file that contains lumped fission product composition definitions "
"(e.g. equilibrium yields). This is unused when the `explicitFissionProducts` or "
"`MO99` modeling options are selected."
),
),
]
return settings
def getFissionProductModelSettingValidators(inspector):
"""The standard helper method, to provide validators to the fission product model."""
# Import the Query class here to avoid circular imports.
from armi.settings.settingsValidation import Query
queries = []
queries.append(
Query(
lambda: inspector.cs[CONF_FP_MODEL] != "explicitFissionProducts"
and not bool(inspector.cs["initializeBurnChain"]),
(
"The burn chain is not being initialized and the fission product model is not set "
"to `explicitFissionProducts`. This will likely fail."
),
f"Would you like to set the `{CONF_FP_MODEL}` to `explicitFissionProducts`?",
lambda: inspector._assignCS(CONF_FP_MODEL, "explicitFissionProducts"),
)
)
queries.append(
Query(
lambda: inspector.cs[CONF_FP_MODEL] != "explicitFissionProducts"
and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] != "",
(
"The explicit fission product model is disabled and the fission product model "
"library is set. This will have no impact on the results, but it is best to "
f"disable the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option."
),
"Would you like to do this?",
lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, ""),
)
)
queries.append(
Query(
lambda: inspector.cs[CONF_FP_MODEL] == "explicitFissionProducts"
and bool(inspector.cs["initializeBurnChain"]),
(
"The explicit fission product model is enabled, but initializing the burn chain is "
"also enabled. This will likely fail."
),
"Would you like to disable the burn chain initialization?",
lambda: inspector._assignCS("initializeBurnChain", False),
)
)
queries.append(
Query(
lambda: inspector.cs[CONF_FP_MODEL] == "explicitFissionProducts"
and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] == "",
(
"The explicit fission product model is enabled and the fission product model "
"library is disabled. May result in no fission product nuclides being added to the "
"case, unless these have manually added in `nuclideFlags`."
),
(
f"Would you like to set the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option to be "
"equal to the default implementation of MC2-3?."
),
lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, "MC2-3"),
)
)
return queries
================================================
FILE: armi/physics/neutronics/fissionProductModel/lumpedFissionProduct.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The lumped fission product (LFP) module deals with representing LFPs and loading
them from files.
These are generally managed by the
:py:mod:`~armi.physics.neutronics.fissionProductModel.fissionProductModel.FissionProductModel`
"""
import os
from armi import runLog
from armi.nucDirectory import elements, nuclideBases
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FP_MODEL,
CONF_LFP_COMPOSITION_FILE_PATH,
)
class LumpedFissionProduct:
r"""
Lumped fission product.
The yields are in number fraction and they sum to 2.0 in general so a
fission of an actinide results in one LFP, which represents 2 real FPs.
This object is a data structure and works a lot like a dictionary in terms
of accessing and modifying the data.
The yields are indexed by nuclideBase -- in self.yld the yield fraction is
indexed by nuclideBases of the individual fission product isotopes
Examples
--------
>>> fpd = FissionProductDefinitionFile(stream)
>>> lfp = fpd.createSingleLFPFromFile("LFP39")
>>> lfp[]
2.9773e-05
See Also
--------
armi.reactor.blocks.Block.getLumpedFissionProductCollection : how you should access these.
"""
def __init__(self, name=None):
"""
Make an LFP.
Parameters
----------
name : str, optional
A name for the LFP. Will be overwritten if you load from file. Provide only
if you are spinning your own custom LFPs.
"""
self.name = name
self.yld = {}
def duplicate(self):
"""Make a copy of this w/o using deepcopy."""
new = self.__class__(self.name)
for key, val in self.yld.items():
new.yld[key] = val
return new
def __getitem__(self, fissionProduct):
"""
Return the yield of a particular fission product.
This allows the LFP to be accessed via indexing, like this: ``lfp[fp]``
Returns
-------
yld : yield of the fission product.
"""
return self.yld.get(fissionProduct, 0.0)
def __setitem__(self, key, val):
from armi.physics.neutronics.fissionProductModel.fissionProductModel import (
NUM_FISSION_PRODUCTS_PER_LFP,
)
if val < 0.0:
raise ValueError(f"Cannot set the yield of {key} in {self} to be less than zero as this is non-physical.")
if val > NUM_FISSION_PRODUCTS_PER_LFP:
raise ValueError(
f"Cannot set the yield of {key} in {self} to be greater than {NUM_FISSION_PRODUCTS_PER_LFP}."
)
self.yld[key] = val
def __contains__(self, item):
return item in self.yld
def __repr__(self):
return f""
def keys(self):
return self.yld.keys()
def values(self):
return self.yld.values()
def items(self):
for nuc in self.keys():
yield nuc, self[nuc]
def getGaseousYieldFraction(self):
"""Return the yield fraction of the gaseous nuclides."""
yld = 0.0
for nuc in self.keys():
if not isGas(nuc):
continue
yld += self[nuc]
return yld
def getTotalYield(self):
"""
Get the fractional yield of all nuclides in this lumped fission product.
Accounts for any fission gas that may be removed.
Returns
-------
total yield of all fps
"""
return sum([self[nuc] for nuc in self.yld])
def getMassFracs(self):
"""
Return a dictionary of mass fractions indexed by nuclide.
Returns
-------
massFracs : dict
mass fractions (floats) of LFP masses
"""
massFracs = {}
for nuc in self.keys():
massFracs[nuc] = self.getMassFrac(nuclideBase=nuc)
return massFracs
def getMassFrac(self, nucName=None, nuclideBase=None):
"""
Return the mass fraction of the given nuclide.
Returns
-------
nuclide mass fraction (float)
"""
massFracDenom = self.getMassFracDenom()
if not nuclideBase:
nuclideBase = nuclideBases.byName[nucName]
return self.__getitem__(nuclideBase) * (nuclideBase.weight / massFracDenom)
def getMassFracDenom(self):
"""
See Also
--------
armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct.getMassFrac
"""
massFracDenom = 0.0
for nuc in self.keys():
massFracDenom += self[nuc] * nuc.weight
return massFracDenom
class LumpedFissionProductCollection(dict):
"""
A set of lumped fission products.
Typically there would be one of these on a block or on a global level.
"""
def __init__(self):
self.collapsible = False
def duplicate(self):
new = self.__class__()
for lfpName, lfp in self.items():
new[lfpName] = lfp.duplicate()
return new
def getLumpedFissionProductNames(self):
return self.keys()
def getAllFissionProductNames(self):
"""Gets names of all fission products in this collection."""
fpNames = set()
for lfp in self.values():
for fp in lfp.keys():
fpNames.add(fp.name)
return sorted(fpNames)
def getAllFissionProductNuclideBases(self):
"""Gets names of all fission products in this collection."""
nucs = set()
for _lfpName, lfp in self.items():
for fp in lfp.keys():
nucs.add(fp)
return sorted(nucs)
def getNumberDensities(self, objectWithParentDensities=None, densFunc=None):
"""
Gets all FP number densities in collection.
Parameters
----------
objectWithParentDensities : ArmiObject
object (probably block) that can be called with getNumberDensity('LFP35'), etc. to get densities of LFPs.
densFunc : function, optional
Optional method to extract LFP densities
Returns
-------
fpDensities : dict
keys are fp names, vals are fission product number density in atoms/bn-cm.
"""
if not densFunc:
densFunc = lambda lfpName: objectWithParentDensities.getNumberDensity(lfpName)
fpDensities = {}
for lfpName, lfp in self.items():
lfpDens = densFunc(lfpName)
for fp, fpFrac in lfp.items():
fpDensities[fp.name] = fpDensities.get(fp.name, 0.0) + fpFrac * lfpDens
return fpDensities
def getMassFrac(self, oldMassFrac=None):
"""Returns the mass fraction vector of the collection of lumped fission products."""
if not oldMassFrac:
raise ValueError("You must define a massFrac vector")
massFrac = {}
for lfpName, lfp in self.items():
lfpMFrac = oldMassFrac[lfpName]
for nuc, mFrac in lfp.getMassFracs().items():
try:
massFrac[nuc] += lfpMFrac * mFrac
except KeyError:
massFrac[nuc] = lfpMFrac * mFrac
return massFrac
class FissionProductDefinitionFile:
"""
Reads a file that has definitions of one or more LFPs in it to produce LFPs.
The format for this file is as follows::
LFP35 GE73 5.9000E-06
LFP35 GE74 1.4000E-05
LFP35 GE76 1.6000E-04
LFP35 AS75 8.9000E-05
and so on
Examples
--------
>>> fpd = FissionProductDefinitionFile(stream)
>>> lfps = fpd.createLFPsFromFile()
The path to this file is specified by the `lfpCompositionFilePath` user setting.
"""
def __init__(self, stream):
self.stream = stream
def createLFPsFromFile(self):
"""
Read the file and create LFPs from the contents.
Returns
-------
lfps : list
List of LumpedFissionProducts contained in the file
"""
lfps = LumpedFissionProductCollection()
for lfpLines in self._splitIntoIndividualLFPLines():
lfp = self._readOneLFP(lfpLines)
lfps[lfp.name] = lfp
return lfps
def createSingleLFPFromFile(self, name):
"""Read one LFP from the file."""
lfpLines = self._splitIntoIndividualLFPLines(name)
lfp = self._readOneLFP(lfpLines[0]) # only one LFP expected. Use it.
return lfp
def _splitIntoIndividualLFPLines(self, lfpName=None):
"""
The lfp file can contain one or more LFPs. This splits them.
Ignores DUMPs.
Parameters
----------
lfpName : str, optional
Restrict to just these names if desired.
Returns
-------
allLFPLines : list of list
each entry is a list of lines that define one LFP
"""
lines = self.stream.readlines()
allLFPLines = []
thisLFPLines = []
lastName = None
for line in lines:
name = line.split()[0]
if "DUMP" in name or (lfpName and lfpName not in name):
continue
if lastName and name != lastName:
allLFPLines.append(thisLFPLines)
thisLFPLines = []
thisLFPLines.append(line)
lastName = name
if thisLFPLines:
allLFPLines.append(thisLFPLines)
return allLFPLines
def _readOneLFP(self, linesOfOneLFP):
lfp = LumpedFissionProduct()
totalYield = 0.0
for line in linesOfOneLFP:
data = line.split()
parent = data[0]
nucLibId = data[1]
nuc = nuclideBases.byName[nucLibId]
yld = float(data[2])
lfp.yld[nuc] = yld
totalYield += yld
lfp.name = parent # e.g. LFP38
runLog.debug("Loaded {0} {1} nuclides for a total yield of {2}".format(len(lfp.yld), lfp.name, totalYield))
return lfp
def lumpedFissionProductFactory(cs):
"""Build lumped fission products."""
if cs[CONF_FP_MODEL] == "explicitFissionProducts":
return None
if cs[CONF_FP_MODEL] == "MO99":
return _buildMo99LumpedFissionProduct()
lfpPath = cs[CONF_LFP_COMPOSITION_FILE_PATH]
if not lfpPath or not os.path.exists(lfpPath):
raise ValueError(
f"The fission product reference file does not exist or is not a valid path. Path provided: {lfpPath}"
)
runLog.extra(f"Loading global lumped fission products (LFPs) from {lfpPath}")
with open(lfpPath) as lfpStream:
lfpFile = FissionProductDefinitionFile(lfpStream)
lfps = lfpFile.createLFPsFromFile()
return lfps
def _buildMo99LumpedFissionProduct():
"""
Build a dummy MO-99 LFP collection.
This is a very bad FP approximation from a physics standpoint but can be very useful
for rapid-running test cases.
"""
mo99 = nuclideBases.byName["MO99"]
mo99LFPs = LumpedFissionProductCollection()
for lfp in nuclideBases.where(lambda nb: isinstance(nb, nuclideBases.LumpNuclideBase)):
# Not all lump nuclides bases defined are fission products, so ensure that only fission
# products are considered.
if not ("FP" in lfp.name or "REGN" in lfp.name):
continue
mo99FP = LumpedFissionProduct(lfp.name)
mo99FP[mo99] = 2.0
mo99LFPs[lfp.name] = mo99FP
return mo99LFPs
def isGas(nuc):
"""True if nuclide is considered a gas."""
# ruff: noqa: SIM110
for element in elements.getElementsByChemicalPhase(elements.ChemicalPhase.GAS):
if element == nuc.element:
return True
return False
================================================
FILE: armi/physics/neutronics/fissionProductModel/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/physics/neutronics/fissionProductModel/tests/test_fissionProductModel.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the fission product module to ensure all FP are available."""
import unittest
from armi.physics.neutronics.fissionProductModel import fissionProductModel
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FISSION_PRODUCT_LIBRARY_NAME,
CONF_FP_MODEL,
)
from armi.physics.neutronics.fissionProductModel.tests import test_lumpedFissionProduct
from armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface import (
isDepletable,
)
from armi.reactor.flags import Flags
from armi.reactor.tests.test_reactors import (
buildOperatorOfEmptyHexBlocks,
loadTestReactor,
)
class TestFPMLumpedFP(unittest.TestCase):
"""
Tests the fission product model interface behavior when lumped fission products are enabled.
Notes
-----
This loads the global fission products from a file stream.
"""
def setUp(self):
o = buildOperatorOfEmptyHexBlocks()
o.removeAllInterfaces()
self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)
o.addInterface(self.fpModel)
# Load the fission products from a file stream.
dummyLFPs = test_lumpedFissionProduct.getDummyLFPFile()
self.fpModel.setGlobalLumpedFissionProducts(dummyLFPs.createLFPsFromFile())
# Set up the global LFPs and check that they are setup.
self.fpModel.interactBOL()
self.assertTrue(self.fpModel._useGlobalLFPs)
def test_loadGlobalLFPsFromFile(self):
"""Tests that loading lumped fission products from a file."""
self.assertEqual(len(self.fpModel._globalLFPs), 3)
lfps = self.fpModel.getGlobalLumpedFissionProducts()
self.assertIn("LFP39", lfps)
def test_getAllFissionProductNames(self):
"""Tests retrieval of the fission product names within all the lumped fission products of the core."""
fissionProductNames = self.fpModel.getAllFissionProductNames()
self.assertGreater(len(fissionProductNames), 5)
self.assertIn("XE135", fissionProductNames)
def test_fpApplication(self):
o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)
# Set up the global LFPs and check that they are setup.
self.assertTrue(fpModel._useGlobalLFPs)
fpModel.interactBOL()
for b in r.core.iterBlocks():
if b.isFuel():
self.assertTrue(b._lumpedFissionProducts is not None)
else:
self.assertTrue(b._lumpedFissionProducts is None)
# now check if all depletable blocks do not have all nuclides if not detailedAxialExpansion
fpModel.allBlocksNeedAllNucs = False
fpModel.interactBOL()
allNucsInProblem = set(r.blueprints.allNuclidesInProblem)
for b in r.core.iterBlocks():
if isDepletable(b):
if len(allNucsInProblem - set(b.getNuclides())) > 0:
break
else:
self.assertTrue(False, "All blocks have all nuclides!")
class TestFPMExplicitMC2Lib(unittest.TestCase):
"""
Tests the fission product model interface behavior when explicit fission products are enabled.
These tests can use a smaller test reactor, and so will be faster.
"""
def setUp(self):
o, r = loadTestReactor(
customSettings={
CONF_FP_MODEL: "explicitFissionProducts",
CONF_FISSION_PRODUCT_LIBRARY_NAME: "MC2-3",
},
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
self.r = r
self.nuclideBases = self.r.nuclideBases
self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)
# Set up the global LFPs and check that they are setup.
self.assertFalse(self.fpModel._useGlobalLFPs)
def test_nuclideFlags(self):
"""Test that the nuclide flags contain the set of MC2-3 modeled nuclides."""
# Run the ``interactBOL`` here to trigger setting up the fission
# products in the reactor data model.
self.fpModel.interactBOL()
for nb in self.nuclideBases.byMcc3Id.values():
self.assertIn(nb.name, self.r.blueprints.nuclideFlags.keys())
def test_nuclidesInModelFuel(self):
"""Test that the fuel blocks contain all the MC2-3 modeled nuclides."""
# Run the ``interactBOL`` here to trigger setting up the fission
# products in the reactor data model.
self.fpModel.interactBOL()
b = self.r.core.getFirstBlock(Flags.FUEL)
nuclideList = b.getNuclides()
for nb in self.nuclideBases.byMcc3Id.values():
self.assertIn(nb.name, nuclideList)
class TestFPMExplicitMC2LibSlow(unittest.TestCase):
"""
Tests the fission product model interface behavior when explicit fission products are enabled.
These tests require a large test reactor, and will lead to slower tests.
"""
def setUp(self):
o, r = loadTestReactor(
customSettings={
CONF_FP_MODEL: "explicitFissionProducts",
CONF_FISSION_PRODUCT_LIBRARY_NAME: "MC2-3",
}
)
self.r = r
self.nuclideBases = self.r.nuclideBases
self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)
# Set up the global LFPs and check that they are setup.
self.assertFalse(self.fpModel._useGlobalLFPs)
def test_nuclidesInModelAllDepletableBlocks(self):
"""Test that the depletable blocks contain all the MC2-3 modeled nuclides."""
# Check that there are some fuel and control blocks in the core model.
fuelBlocks = self.r.core.getBlocks(Flags.FUEL)
controlBlocks = self.r.core.getBlocks(Flags.CONTROL)
self.assertGreater(len(fuelBlocks), 0)
self.assertGreater(len(controlBlocks), 0)
# prove that the control blocks are not depletable
for b in controlBlocks:
self.assertFalse(isDepletable(b))
# as a corrolary of the above, prove that no components in the control blocks are depletable
for b in controlBlocks:
for c in b.getComponents():
self.assertFalse(isDepletable(c))
# Force the the first component in the control blocks
# to be labeled as depletable for checking that explicit
# fission products can be assigned.
for b in controlBlocks:
c = b.getComponents()[0]
c.p.flags |= Flags.DEPLETABLE
# now each control block should be depletable
for b in controlBlocks:
self.assertTrue(isDepletable(b))
# as a corrolary of the above, prove that only the first component in each control block is depletable
for b in controlBlocks:
comps = list(b.getComponents())
for i, c in enumerate(comps):
if i == 0:
self.assertTrue(isDepletable(c))
else:
self.assertFalse(isDepletable(c))
# Run the ``interactBOL`` here to trigger setting up the fission
# products in the reactor data model.
self.fpModel.interactBOL()
# Check that the depletable blocks have all explicit
# fission products in them.
for b in self.r.core.iterBlocks():
nuclideList = b.getNuclides()
if isDepletable(b):
for nb in self.nuclideBases.byMcc3Id.values():
self.assertIn(nb.name, nuclideList)
else:
self.assertLess(len(b.getNuclides()), len(self.nuclideBases.byMcc3Id))
================================================
FILE: armi/physics/neutronics/fissionProductModel/tests/test_lumpedFissionProduct.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lumpedFissionProduce module."""
import io
import math
import os
import unittest
from armi.context import RES
from armi.nucDirectory.nuclideBases import NuclideBases
from armi.physics.neutronics.fissionProductModel import (
REFERENCE_LUMPED_FISSION_PRODUCT_FILE,
lumpedFissionProduct,
)
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FP_MODEL,
CONF_LFP_COMPOSITION_FILE_PATH,
)
from armi.reactor.flags import Flags
from armi.reactor.tests.test_reactors import buildOperatorOfEmptyHexBlocks
from armi.settings import Settings
LFP_TEXT = """LFP35 GE73 5.9000E-06
LFP35 GE74 1.4000E-05
LFP35 GE76 1.6000E-04
LFP35 AS75 8.9000E-05
LFP35 KR85 8.9000E-05
LFP35 MO99 8.9000E-05
LFP35 SM150 8.9000E-05
LFP35 XE135 8.9000E-05
LFP39 XE135 8.9000E-05
LFP38 XE135 8.9000E-05
"""
def getDummyLFPFile():
return lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT))
class TestFissionProductDefinitionFile(unittest.TestCase):
"""Test of the fission product model."""
def setUp(self):
self.fpd = getDummyLFPFile()
self.nuclideBases = NuclideBases()
def test_createLFPs(self):
"""Test of the fission product model creation."""
lfps = self.fpd.createLFPsFromFile()
xe135 = self.nuclideBases.fromName("XE135")
self.assertEqual(len(lfps), 3)
self.assertIn("LFP35", lfps)
for lfp in lfps.values():
self.assertIn(xe135, lfp)
def test_createReferenceLFPs(self):
"""Test of the reference fission product model creation."""
with open(REFERENCE_LUMPED_FISSION_PRODUCT_FILE, "r") as LFP_FILE:
LFP_TEXT = LFP_FILE.read()
fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT))
fpd.fName = REFERENCE_LUMPED_FISSION_PRODUCT_FILE
lfps = fpd.createLFPsFromFile()
self.assertEqual(len(lfps), 5)
LFP_IDS = [
"LFP35",
"LFP38",
"LFP39",
"LFP40",
"LFP41",
]
for lfp_id in LFP_IDS:
self.assertIn(lfp_id, lfps)
mo99 = self.nuclideBases.fromName("MO99")
ref_mo99_yields = [0.00091, 0.00112, 0.00099, 0.00108, 0.00101]
for ref_fp_yield, lfp_id in zip(ref_mo99_yields, LFP_IDS):
lfp = lfps[lfp_id]
self.assertIn(mo99, lfp)
error = math.fabs(ref_fp_yield - lfp[mo99]) / ref_fp_yield
self.assertLess(error, 1e-6)
class TestLFP(unittest.TestCase):
"""Test of the lumped fission product yields."""
def setUp(self):
self.fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT))
self.nuclideBases = NuclideBases()
def test_getYield(self):
"""Test of the yield of a fission product."""
xe135 = self.nuclideBases.fromName("XE135")
lfp = self.fpd.createSingleLFPFromFile("LFP39")
lfp[xe135] = 1.2
val3 = lfp[xe135]
self.assertEqual(val3, 1.2)
self.assertEqual(lfp[5], 0.0)
def test_gaseousYieldFraction(self):
lfp = self.fpd.createSingleLFPFromFile("LFP39")
# This is equal to the Xe yield set in the dummy ``LFP_TEXT``
# data for these tests.
self.assertEqual(lfp.getGaseousYieldFraction(), 8.9000e-05)
def test_isGas(self):
"""Tests that a nuclide is a gas or not at STP based on its chemical phase."""
nb = self.nuclideBases.byName["H1"]
self.assertTrue(lumpedFissionProduct.isGas(nb))
nb = self.nuclideBases.byName["H2"]
self.assertTrue(lumpedFissionProduct.isGas(nb))
nb = self.nuclideBases.byName["H3"]
self.assertTrue(lumpedFissionProduct.isGas(nb))
nb = self.nuclideBases.byName["U235"]
self.assertFalse(lumpedFissionProduct.isGas(nb))
nb = self.nuclideBases.byName["O16"]
self.assertTrue(lumpedFissionProduct.isGas(nb))
nb = self.nuclideBases.byName["XE135"]
self.assertTrue(lumpedFissionProduct.isGas(nb))
class TestLFPCollection(unittest.TestCase):
"""Test of the fission product collection."""
def setUp(self):
fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT))
self.lfps = fpd.createLFPsFromFile()
self.nuclideBases = NuclideBases()
def test_getAllFissionProductNames(self):
"""Test to ensure the fission product names are present."""
names = self.lfps.getAllFissionProductNames()
self.assertIn("XE135", names)
self.assertIn("KR85", names)
def test_getAllFissionProductNuclideBases(self):
"""Test to ensure the fission product nuclide bases are present."""
clideBases = self.lfps.getAllFissionProductNuclideBases()
xe135 = self.nuclideBases.fromName("XE135")
kr85 = self.nuclideBases.fromName("KR85")
self.assertIn(xe135, clideBases)
self.assertIn(kr85, clideBases)
def test_duplicate(self):
"""Test to ensure that when we duplicate, we don't adjust the original file."""
newLfps = self.lfps.duplicate()
ba = self.nuclideBases.fromName("XE135")
lfp1 = self.lfps["LFP39"]
lfp2 = newLfps["LFP39"]
v1 = lfp1[ba]
lfp1[ba] += 1.3 # make sure copy doesn't change w/ first.
v2 = lfp2[ba]
self.assertEqual(v1, v2)
def test_getNumberDensities(self):
o = buildOperatorOfEmptyHexBlocks()
b = next(o.r.core.iterBlocks(Flags.FUEL))
fpDensities = self.lfps.getNumberDensities(objectWithParentDensities=b)
for fp in ["GE73", "GE74", "GE76", "AS75", "KR85", "MO99", "SM150", "XE135"]:
self.assertEqual(fpDensities[fp], 0.0)
# basic test reactor has no fission products in it
def test_getMassFrac(self):
with self.assertRaises(ValueError):
self.lfps.getMassFrac(oldMassFrac=None)
oldMassFrac = {
"LFP35": 0.5,
"LFP38": 0.2,
"LFP39": 0.3,
}
newMassFracs = self.lfps.getMassFrac(oldMassFrac)
refMassFrac = {
"GE73": 0.0034703064077030933,
"GE74": 0.00834728937688672,
"GE76": 0.09797894499881823,
"AS75": 0.053783069618403435,
"KR85": 0.0609551394006646,
"MO99": 0.07100169460812283,
"SM150": 0.1076193196365748,
"XE135": 0.5968442359528263,
}
for fp, newMassFrac in newMassFracs.items():
self.assertAlmostEqual(newMassFrac, refMassFrac[fp.name])
class TestLFPFromRefFile(unittest.TestCase):
"""Tests loading from the `referenceFissionProducts.dat` file."""
def test_fissionProductYields(self):
"""Test that the fission product yields for the lumped fission products sums to 2.0."""
cs = Settings()
cs[CONF_FP_MODEL] = "infinitelyDilute"
cs[CONF_LFP_COMPOSITION_FILE_PATH] = os.path.join(RES, "referenceFissionProducts.dat")
self.lfps = lumpedFissionProduct.lumpedFissionProductFactory(cs)
for lfp in self.lfps.values():
self.assertAlmostEqual(lfp.getTotalYield(), 2.0, places=3)
class TestLFPExplicit(unittest.TestCase):
"""Tests loading fission products with explicit modeling."""
def test_explicitFissionProducts(self):
"""Tests that there are no lumped fission products added when the `explicitFissionProducts` model is enabled."""
cs = Settings()
cs[CONF_FP_MODEL] = "explicitFissionProducts"
self.lfps = lumpedFissionProduct.lumpedFissionProductFactory(cs)
self.assertIsNone(self.lfps)
class TestMo99LFP(unittest.TestCase):
"""Test of the fission product model from Mo99."""
def setUp(self):
self.lfps = lumpedFissionProduct._buildMo99LumpedFissionProduct()
def test_getAllFissionProductNames(self):
"""Test to ensure that Mo99 is present, but other FP are not."""
names = self.lfps.getAllFissionProductNames()
self.assertIn("MO99", names)
self.assertNotIn("KR85", names)
self.assertAlmostEqual(self.lfps["LFP35"].getTotalYield(), 2.0)
================================================
FILE: armi/physics/neutronics/globalFlux/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Global flux solvers."""
RX_ABS_MICRO_LABELS = ["nGamma", "fission", "nalph", "np", "nd", "nt"]
RX_PARAM_NAMES = ["rateCap", "rateFis", "rateProdN2n", "rateProdFis", "rateAbs"]
================================================
FILE: armi/physics/neutronics/globalFlux/globalFluxInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Global flux interface provide a base class for all neutronics tools that compute the neutron
and/or photon flux.
"""
import math
from typing import Dict, Optional
import numpy as np
from armi import interfaces, runLog
from armi.physics import constants, executers, neutronics
from armi.physics.neutronics.globalFlux import RX_ABS_MICRO_LABELS, RX_PARAM_NAMES
from armi.reactor import geometry, reactors
from armi.reactor.blocks import Block
from armi.reactor.converters import geometryConverters, uniformMesh
from armi.reactor.flags import Flags
from armi.settings.caseSettings import Settings
from armi.utils import getBurnSteps, getMaxBurnSteps, units
ORDER = interfaces.STACK_ORDER.FLUX
class GlobalFluxInterface(interfaces.Interface):
"""
A general abstract interface for global flux-calculating modules.
Should be subclassed by more specific implementations.
"""
name = "GlobalFlux" # make sure to set this in subclasses
purpose = "globalFlux"
_ENERGY_BALANCE_REL_TOL = 1e-5
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
if self.cs["nCycles"] > 1000:
self.cycleFmt = "04d" # produce ig0001.inp
else:
self.cycleFmt = "03d" # produce ig001.inp
if getMaxBurnSteps(self.cs) > 10:
self.nodeFmt = "03d" # produce ig001_001.inp
else:
self.nodeFmt = "1d" # produce ig001_1.inp.
self._bocKeff = None # for tracking rxSwing
self._setTightCouplingDefaults()
def _setTightCouplingDefaults(self):
"""Enable tight coupling defaults for the interface.
- allows users to set tightCoupling: true in settings without
having to specify the specific tightCouplingSettings for this interface.
- this is splt off from self.__init__ for testing
"""
if self.coupler is None and self.cs["tightCoupling"]:
self.coupler = interfaces.TightCoupler("keff", 1.0e-4, self.cs["tightCouplingMaxNumIters"])
@staticmethod
def getHistoryParams():
"""Return parameters that will be added to assembly versus time history printouts."""
return ["detailedDpa", "detailedDpaPeak", "detailedDpaPeakRate"]
def interactBOC(self, cycle=None):
interfaces.Interface.interactBOC(self, cycle)
self.r.core.p.rxSwing = 0.0 # zero out rxSwing until last time node.
self.r.core.p.maxDetailedDpaThisCycle = 0.0 # zero out cumulative params
self.r.core.p.dpaFullWidthHalfMax = 0.0
self.r.core.p.elevationOfACLP3Cycles = 0.0
self.r.core.p.elevationOfACLP7Cycles = 0.0
for b in self.r.core.iterBlocks():
b.p.detailedDpaThisCycle = 0.0
b.p.newDPA = 0.0
def interactEveryNode(self, cycle, node):
"""
Calculate flux, power, and keff for this cycle and node.
Flux, power, and keff are generally calculated at every timestep to ensure flux
is up to date with the reactor state.
"""
interfaces.Interface.interactEveryNode(self, cycle, node)
self._setRxSwingRelatedParams()
def interactCoupled(self, iteration):
"""Runs during a tightly-coupled physics iteration to updated the flux and power."""
interfaces.Interface.interactCoupled(self, iteration)
self._setRxSwingRelatedParams()
def _setRxSwingRelatedParams(self):
"""Set Params Related to Rx Swing."""
if self.r.p.timeNode == 0:
# track boc uncontrolled keff for rxSwing param.
self._bocKeff = self.r.core.p.keffUnc or self.r.core.p.keff
# A 1 burnstep cycle would have 2 nodes, and the last node would be node index 1 (first is zero)
lastNodeInCycle = getBurnSteps(self.cs)[self.r.p.cycle]
if self.r.p.timeNode == lastNodeInCycle and self._bocKeff is not None:
eocKeff = self.r.core.p.keffUnc or self.r.core.p.keff
swing = (eocKeff - self._bocKeff) / (eocKeff * self._bocKeff)
self.r.core.p.rxSwing = swing * units.ABS_REACTIVITY_TO_PCM
runLog.info(
f"BOC Uncontrolled keff: {self._bocKeff}, "
f"EOC Uncontrolled keff: {self.r.core.p.keffUnc}, "
f"Cycle Reactivity Swing: {self.r.core.p.rxSwing} pcm"
)
def checkEnergyBalance(self):
"""Check that there is energy balance between the power generated and the specified power.
.. impl:: Validate the energy generation matches user specifications.
:id: I_ARMI_FLUX_CHECK_POWER
:implements: R_ARMI_FLUX_CHECK_POWER
This method checks that the global power computed from flux
evaluation matches the global power specified from the user within a
tolerance; if it does not, a ``ValueError`` is raised. The
global power from the flux solve is computed by summing the
block-wise power in the core. This value is then compared to the
user-specified power and raises an error if relative difference is
above :math:`10^{-5}`.
"""
powerGenerated = (
self.r.core.calcTotalParam("power", calcBasedOnFullObj=False, generationNum=2) / units.WATTS_PER_MW
)
self.r.core.setPowerIfNecessary()
specifiedPower = self.r.core.p.power / units.WATTS_PER_MW / self.r.core.powerMultiplier
if not math.isclose(powerGenerated, specifiedPower, rel_tol=self._ENERGY_BALANCE_REL_TOL):
raise ValueError(
"The power generated in {} is {} MW, but the user specified power is {} MW.\n"
"This indicates a software bug. Please report to the developers.".format(
self.r.core, powerGenerated, specifiedPower
)
)
def getIOFileNames(self, cycle, node, coupledIter=None, additionalLabel=""):
"""
Return the input and output file names for this run.
Parameters
----------
cycle : int
The cycle number
node : int
The burn node number (e.g. 0 for BOC, 1 for MOC, etc.)
coupledIter : int, optional
Coupled iteration number (for tightly-coupled cases)
additionalLabel : str, optional
An optional tag to the file names to differentiate them
from another case.
Returns
-------
inName : str
Input file name
outName : str
Output file name
stdName : str
Standard output file name
"""
timeId = "{0:" + self.cycleFmt + "}_{1:" + self.nodeFmt + "}" # build names with proper number of zeros
if coupledIter is not None:
timeId += "_{0:03d}".format(coupledIter)
inName = self.cs.caseTitle + timeId.format(cycle, node) + "{}.{}.inp".format(additionalLabel, self.name)
outName = self.cs.caseTitle + timeId.format(cycle, node) + "{}.{}.out".format(additionalLabel, self.name)
stdName = outName.strip(".out") + ".stdout"
return inName, outName, stdName
def calculateKeff(self, label="keff"):
"""
Runs neutronics tool and returns keff without applying it to the reactor.
Used for things like direct-eigenvalue reactivity coefficients and CR worth iterations.
For anything more complicated than getting keff, clients should
call ``getExecuter`` to build their case.
"""
raise NotImplementedError()
class GlobalFluxInterfaceUsingExecuters(GlobalFluxInterface):
"""
A global flux interface that makes use of the ARMI Executer system to run.
Using Executers is optional but seems to allow easy interoperability between
the myriad global flux solvers in the world.
If a new global flux solver does not fit easily into the Executer pattern, then
it will be best to just start from the base GlobalFluxInterface rather than
trying to adjust the Executer pattern to fit.
Notes
-----
This points library users to the Executer object, which is intended to
provide commonly-used structure useful for many global flux plugins.
"""
def interactEveryNode(self, cycle, node):
"""
Calculate flux, power, and keff for this cycle and node.
Flux, power, and keff are generally calculated at every timestep to ensure flux
is up to date with the reactor state.
"""
executer = self.getExecuter(label=self.getLabel(self.cs.caseTitle, cycle, node))
executer.run()
GlobalFluxInterface.interactEveryNode(self, cycle, node)
def interactCoupled(self, iteration):
"""Runs during a tightly-coupled physics iteration to updated the flux and power."""
executer = self.getExecuter(
label=self.getLabel(self.cs.caseTitle, self.r.p.cycle, self.r.p.timeNode, iteration)
)
executer.run()
GlobalFluxInterface.interactCoupled(self, iteration)
def getTightCouplingValue(self):
"""Return the parameter value."""
if self.coupler.parameter == "keff":
return self.r.core.p.keff
if self.coupler.parameter == "power":
scaledCorePowerDistribution = []
for a in self.r.core:
scaledPower = []
assemPower = sum(b.p.power for b in a)
for b in a:
scaledPower.append(b.p.power / assemPower)
scaledCorePowerDistribution.append(scaledPower)
return scaledCorePowerDistribution
return None
@staticmethod
def getOptionsCls():
"""
Get a blank options object.
Subclass this to allow generic updating of options.
"""
return GlobalFluxOptions
@staticmethod
def getExecuterCls():
return GlobalFluxExecuter
def getExecuterOptions(self, label=None):
"""
Get an executer options object populated from current user settings and reactor.
If you want to set settings more deliberately (e.g. to specify a cross section
library rather than use an auto-derived name), use ``getOptionsCls`` and build
your own.
"""
opts = self.getOptionsCls()(label)
opts.fromUserSettings(self.cs)
opts.fromReactor(self.r)
return opts
def getExecuter(self, options=None, label=None):
"""
Get executer object for performing custom client calcs.
This allows plugins to update options in a somewhat generic
way. For example, reactivity coefficients plugin may want to
request adjoint flux.
"""
if options and label:
raise ValueError(
f"Cannot supply a label (`{label}`) and options at the same time. Apply label to options object first."
)
opts = options or self.getExecuterOptions(label)
executer = self.getExecuterCls()(options=opts, reactor=self.r)
return executer
def calculateKeff(self, label="keff"):
"""
Run global flux with current user options and just return keff without applying it.
Used for things like direct-eigenvalue reactivity coefficients and CR worth iterations.
"""
executer = self.getExecuter(label=label)
executer.options.applyResultsToReactor = False
executer.options.calcReactionRatesOnMeshConversion = False
output = executer.run()
return output.getKeff()
@staticmethod
def getLabel(caseTitle, cycle, node, iteration=None):
"""
Make a label (input/output file name) for the executer based on cycle, node, iteration.
Parameters
----------
caseTitle : str, required
The caseTitle for the ARMI run
cycle : int, required
The cycle number
node : int, required
The time node index
iteration : int, optional
The coupled iteration index
"""
if iteration is not None:
return f"{caseTitle}-flux-c{cycle}n{node}i{iteration}"
else:
return f"{caseTitle}-flux-c{cycle}n{node}"
class GlobalFluxOptions(executers.ExecutionOptions):
"""Data structure representing common options in Global Flux Solvers.
.. impl:: Options for neutronics solvers.
:id: I_ARMI_FLUX_OPTIONS
:implements: R_ARMI_FLUX_OPTIONS
This class functions as a data structure for setting and retrieving
execution options for performing flux evaluations, these options
involve:
* What sort of problem is to be solved, i.e. real/adjoint,
eigenvalue/fixed-source, neutron/gamma, boundary conditions
* Convergence criteria for iterative algorithms
* Geometry type and mesh conversion details
* Specific parameters to be calculated after flux has been evaluated
These options can be retrieved by directly accessing class members. The
options are set by specifying a :py:class:`Settings
` object and optionally specifying
a :py:class:`Reactor ` object.
Attributes
----------
adjoint : bool
True if the ``CONF_NEUTRONICS_TYPE`` setting is set to ``adjoint`` or ``real``.
calcReactionRatesOnMeshConversion : bool
This option is used to recalculate reaction rates after a mesh
conversion and remapping of neutron flux. This can be disabled
in certain global flux implementations if reaction rates are not
required, but by default it is enabled.
eigenvalueProblem : bool
Whether this is a eigenvalue problem or a fixed source problem
includeFixedSource : bool
This can happen in eig if Fredholm Alternative satisfied.
photons : bool
Run the photon/gamma uniform mesh converter?
real : bool
True if ``CONF_NEUTRONICS_TYPE`` setting is set to ``real``.
aclpDoseLimit : float
Dose limit in dpa used to position the above-core load pad (if one exists)
boundaries : str
External Neutronic Boundary Conditions. Reflective does not include axial.
cs : Settings
Settings for this run
detailedAxialExpansion : bool
Turn on detailed axial expansion? from settings
dpaPerFluence : float
A quick and dirty conversion that is used to get dpaPeak
energyDepoCalcMethodStep : str
For gamma transport/normalization
epsEigenvalue : float
Convergence criteria for calculating the eigenvalue in the global flux solver
epsFissionSourceAvg : float
Convergence criteria for average fission source, from settings
epsFissionSourcePoint : float
Convergence criteria for point fission source, from settings
geomType : geometry.GeomType
Reactor Core geometry type (HEX, RZ, RZT, etc)
hasNonUniformAssems: bool
Has any non-uniform assembly flags, from settings
isRestart : bool
Restart global flux case using outputs from last time as a guess
kernelName : str
The neutronics / depletion solver for global flux solve.
loadPadElevation : float
The elevation of the bottom of the above-core load pad (ACLP) from
the bottom of the upper grid plate (in cm).
loadPadLength : float
The length of the load pad. Used to compute average and peak dose.
maxOuters : int
XY and Axial partial current sweep max outer iterations.
savePhysicsFilesList : bool
Is this timestamp in the list of savePhysicsFiles in the settings?
symmetry : str
Reactor symmetry: full core, third-core, etc
xsKernel : str
Lattice Physics Kernel, from settings
"""
def __init__(self, label: Optional[str] = None):
executers.ExecutionOptions.__init__(self, label)
# have defaults
self.adjoint: bool = False
self.calcReactionRatesOnMeshConversion: bool = True
self.eigenvalueProblem: bool = True
self.includeFixedSource: bool = False
self.photons: bool = False
self.real: bool = True
# no defaults
self.aclpDoseLimit: Optional[float] = None
self.boundaries: Optional[str] = None
self.cs: Optional[Settings] = None
self.detailedAxialExpansion: Optional[bool] = None
self.dpaPerFluence: Optional[float] = None
self.energyDepoCalcMethodStep: Optional[str] = None
self.epsEigenvalue: Optional[float] = None
self.epsFissionSourceAvg: Optional[float] = None
self.epsFissionSourcePoint: Optional[float] = None
self.geomType: Optional[geometry.GeomType] = None
self.hasNonUniformAssems: Optional[bool] = None
self.isRestart: Optional[bool] = None
self.kernelName: Optional[str] = None
self.loadPadElevation: Optional[float] = None
self.loadPadLength: Optional[float] = None
self.maxOuters: Optional[int] = None
self.savePhysicsFilesList: Optional[bool] = None
self.symmetry: Optional[str] = None
self.xsKernel: Optional[str] = None
def fromUserSettings(self, cs: Settings):
"""
Map user input settings from cs to a set of specific global flux options.
This is not required; these options can alternatively be set programmatically.
"""
from armi.physics.neutronics.settings import (
CONF_ACLP_DOSE_LIMIT,
CONF_BOUNDARIES,
CONF_DPA_PER_FLUENCE,
CONF_EIGEN_PROB,
CONF_LOAD_PAD_ELEVATION,
CONF_LOAD_PAD_LENGTH,
CONF_NEUTRONICS_KERNEL,
CONF_RESTART_NEUTRONICS,
CONF_XS_KERNEL,
)
from armi.settings.fwSettings.globalSettings import (
CONF_DETAILED_AXIAL_EXPANSION,
CONF_NON_UNIFORM_ASSEM_FLAGS,
CONF_PHYSICS_FILES,
)
self.kernelName = cs[CONF_NEUTRONICS_KERNEL]
self.setRunDirFromCaseTitle(cs.caseTitle)
self.isRestart = cs[CONF_RESTART_NEUTRONICS]
self.adjoint = neutronics.adjointCalculationRequested(cs)
self.real = neutronics.realCalculationRequested(cs)
self.detailedAxialExpansion = cs[CONF_DETAILED_AXIAL_EXPANSION]
self.hasNonUniformAssems = any([Flags.fromStringIgnoreErrors(f) for f in cs[CONF_NON_UNIFORM_ASSEM_FLAGS]])
self.eigenvalueProblem = cs[CONF_EIGEN_PROB]
# dose/dpa specific (should be separate subclass?)
self.dpaPerFluence = cs[CONF_DPA_PER_FLUENCE]
self.aclpDoseLimit = cs[CONF_ACLP_DOSE_LIMIT]
self.loadPadElevation = cs[CONF_LOAD_PAD_ELEVATION]
self.loadPadLength = cs[CONF_LOAD_PAD_LENGTH]
self.boundaries = cs[CONF_BOUNDARIES]
self.xsKernel = cs[CONF_XS_KERNEL]
self.cs = cs
self.savePhysicsFilesList = cs[CONF_PHYSICS_FILES]
def fromReactor(self, reactor: reactors.Reactor):
self.geomType = reactor.core.geomType
self.symmetry = reactor.core.symmetry
cycleNodeStamp = f"{reactor.p.cycle:03d}{reactor.p.timeNode:03d}"
if self.savePhysicsFilesList:
self.savePhysicsFiles = cycleNodeStamp in self.savePhysicsFilesList
else:
self.savePhysicsFiles = False
class GlobalFluxExecuter(executers.DefaultExecuter):
"""
A short-lived object that coordinates the prep, execution, and processing of a flux solve.
There are many forms of global flux solves:
* Eigenvalue/Fixed source
* Adjoint/real
* Diffusion/PN/SN/MC
* Finite difference/nodal
There are also many reasons someone might need a flux solve:
* Update multigroup flux and power on reactor and compute keff
* Just compute keff in a temporary perturbed state
* Just compute flux and adjoint flux on a state to
There may also be some required transformations when a flux solve is done:
* Add/remove edge assemblies
* Apply a uniform axial mesh
There are also I/O performance complexities, including running on fast local paths
and copying certain user-defined files back to the working directory on error
or completion. Given all these options and possible needs for information from
global flux, this class provides a unified interface to everything.
.. impl:: Ensure the mesh in the reactor model is appropriate for neutronics solver execution.
:id: I_ARMI_FLUX_GEOM_TRANSFORM
:implements: R_ARMI_FLUX_GEOM_TRANSFORM
The primary purpose of this class is perform geometric and mesh
transformations on the reactor model to ensure a flux evaluation can
properly perform. This includes:
* Applying a uniform axial mesh for the 3D flux solve
* Expanding symmetrical geometries to full-core if necessary
* Adding/removing edge assemblies if necessary
* Undoing any transformations that might affect downstream calculations
"""
def __init__(self, options: GlobalFluxOptions, reactor):
executers.DefaultExecuter.__init__(self, options, reactor)
self.options: GlobalFluxOptions
self.geomConverters: Dict[str, geometryConverters.GeometryConverter] = {}
def _performGeometryTransformations(self, makePlots=False):
"""
Apply geometry conversions to make reactor work in neutronics.
There are two conditions where things must happen:
1. If you are doing finite-difference, you need to add the edge assemblies (fast).
For this, we just modify the reactor in place
2. If you are doing detailed axial expansion, you need to average out the axial mesh (slow!)
For this we need to create a whole copy of the reactor and use that.
In both cases, we need to undo the modifications between reading the output
and applying the result to the data model.
See Also
--------
_undoGeometryTransformations
"""
if any(self.geomConverters):
raise RuntimeError(
"The reactor has been transformed, but not restored to the original.\n"
+ "Geometry converter is set to {} \n.".format(self.geomConverters)
+ "This is a programming error and requires further investigation."
)
neutronicsReactor = self.r
converter = self.geomConverters.get("axial")
if not converter:
if self.options.detailedAxialExpansion or self.options.hasNonUniformAssems:
converter = uniformMesh.converterFactory(self.options)
converter.convert(self.r)
neutronicsReactor = converter.convReactor
if makePlots:
converter.plotConvertedReactor()
self.geomConverters["axial"] = converter
if self.edgeAssembliesAreNeeded():
converter = self.geomConverters.get("edgeAssems", geometryConverters.EdgeAssemblyChanger())
converter.addEdgeAssemblies(neutronicsReactor.core)
self.geomConverters["edgeAssems"] = converter
self.r = neutronicsReactor
def _undoGeometryTransformations(self):
"""
Restore original data model state and/or apply results to it.
Notes
-----
These transformations occur in the opposite order than that which they were applied in.
Otherwise, the uniform mesh guy would try to add info to assem's on the source reactor
that don't exist.
See Also
--------
_performGeometryTransformations
"""
geomConverter = self.geomConverters.get("edgeAssems")
if geomConverter:
geomConverter.scaleParamsRelatedToSymmetry(
self.r.core, paramsToScaleSubset=self.options.paramsToScaleSubset
)
# Resets the reactor core model to the correct symmetry and removes
# stored attributes on the converter to ensure that there is
# state data that is long-lived on the object in case the garbage
# collector does not remove it. Additionally, this will reset the
# global assembly counter.
geomConverter.removeEdgeAssemblies(self.r.core)
meshConverter = self.geomConverters.get("axial")
if meshConverter:
if self.options.applyResultsToReactor or self.options.hasNonUniformAssems:
meshConverter.applyStateToOriginal()
self.r = meshConverter._sourceReactor
# Resets the stored attributes on the converter to
# ensure that there is state data that is long-lived on the
# object in case the garbage collector does not remove it.
# Additionally, this will reset the global assembly counter.
meshConverter.reset()
# clear the converters in case this function gets called twice
self.geomConverters = {}
def edgeAssembliesAreNeeded(self) -> bool:
"""
True if edge assemblies are needed in this calculation.
We only need them in finite difference cases that are not full core.
"""
return (
"FD" in self.options.kernelName
and self.options.symmetry.domain == geometry.DomainType.THIRD_CORE
and self.options.symmetry.boundary == geometry.BoundaryType.PERIODIC
and self.options.geomType == geometry.GeomType.HEX
)
class GlobalFluxResultMapper(interfaces.OutputReader):
"""
A short-lived class that maps neutronics output data to a reactor mode.
Neutronics results can come from a file or a pipe or in memory.
This is always subclassed for specific neutronics runs but contains
some generic methods that are universally useful for
any global flux calculation. These are mostly along the lines of
information that can be derived from other information, like
dpa rate coming from dpa deltas and cycle length.
"""
def getKeff(self):
raise NotImplementedError()
def clearFlux(self):
"""Delete flux on all blocks. Needed to prevent stale flux when partially reloading."""
for b in self.r.core.iterBlocks():
b.p.mgFlux = []
b.p.adjMgFlux = []
b.p.mgFluxGamma = []
b.p.extSrc = []
def _renormalizeNeutronFluxByBlock(self, renormalizationCorePower):
"""
Normalize the neutron flux within each block to meet the renormalization power.
Parameters
----------
renormalizationCorePower: float
Specified power to renormalize the neutron flux for using the isotopic energy
generation rates on the cross section libraries (in Watts)
See Also
--------
getTotalEnergyGenerationConstants
"""
# update the block power param here as well so
# the ratio/multiplications below are consistent
currentCorePower = 0.0
for b in self.r.core.iterBlocks():
# The multi-group flux is volume integrated, so J/cm * n-cm/s gives units of Watts
b.p.power = np.dot(b.getTotalEnergyGenerationConstants(), b.getIntegratedMgFlux())
b.p.flux = sum(b.getMgFlux())
currentCorePower += b.p.power
powerRatio = renormalizationCorePower / currentCorePower
runLog.info(
"Renormalizing the neutron flux in {: 0:
a.p.kInf = totalSrc / totalAbs # assembly average k-inf.
def computeDpaRate(mgFlux, dpaXs):
r"""
Compute the DPA rate incurred by exposure of a certain flux spectrum.
.. impl:: Compute DPA rates.
:id: I_ARMI_FLUX_DPA
:implements: R_ARMI_FLUX_DPA
This method calculates DPA rates using the inputted multigroup flux and DPA cross sections.
Displacements calculated by displacement cross-section:
.. math::
:nowrap:
\begin{aligned}
\text{Displacement rate} &= \phi N_{\text{HT9}} \sigma \\
&= (\#/\text{cm}^2/s) \cdot (1/cm^3) \cdot (\text{barn})\\
&= (\#/\text{cm}^5/s) \cdot \text{(barn)} * 10^{-24} \text{cm}^2/\text{barn} \\
&= \#/\text{cm}^3/s
\end{aligned}
::
DPA rate = displacement density rate / (number of atoms/cc)
= dr [#/cm^3/s] / (nHT9) [1/cm^3]
= flux * barn * 1e-24
.. math::
\frac{\text{dpa}}{s} = \frac{\phi N \sigma}{N} = \phi * \sigma
the number density of the structural material cancels out. It's in the macroscopic
cross-section and in the original number of atoms.
Parameters
----------
mgFlux : list
multigroup neutron flux in #/cm^2/s
dpaXs : list
DPA cross section in barns to convolute with flux to determine DPA rate
Returns
-------
dpaPerSecond : float
The dpa/s in this material due to this flux
Raises
------
RuntimeError
Negative dpa rate.
"""
displacements = 0.0
if len(mgFlux) != len(dpaXs):
runLog.warning(
"Multigroup flux of length {} is incompatible with dpa cross section of length {};"
"dpa rate will be set do 0.0".format(len(mgFlux), len(dpaXs)),
single=True,
)
return displacements
for flux, barns in zip(mgFlux, dpaXs):
displacements += flux * barns
dpaPerSecond = displacements * units.CM2_PER_BARN
if dpaPerSecond < 0:
runLog.warning(
"Negative DPA rate calculated at {}".format(dpaPerSecond),
single=True,
label="negativeDpaPerSecond",
)
# ensure physical meaning of dpaPerSecond, it is likely just slightly negative
if dpaPerSecond < -1.0e-10:
raise RuntimeError("Calculated DPA rate is substantially negative at {}".format(dpaPerSecond))
dpaPerSecond = 0.0
return dpaPerSecond
def calcReactionRates(obj, keff, lib):
r"""
Compute 1-group reaction rates for this object (usually a block).
.. impl:: Return the reaction rates for a given ArmiObject
:id: I_ARMI_FLUX_RX_RATES
:implements: R_ARMI_FLUX_RX_RATES
This method computes 1-group reaction rates for the inputted
:py:class:`ArmiObject ` These
reaction rates include:
* fission
* nufission
* n2n
* absorption
Scatter could be added as well. This function is quite slow so it is
skipped for now as it is uncommonly needed.
Reaction rates are:
.. math::
\Sigma \phi = \sum_{\text{nuclides}} \sum_{\text{energy}} \Sigma
\phi
The units of :math:`N \sigma \phi` are::
[#/bn-cm] * [bn] * [#/cm^2/s] = [#/cm^3/s]
The group-averaged microscopic cross section is:
.. math::
\sigma_g = \frac{\int_{E g}^{E_{g+1}} \phi(E) \sigma(E)
dE}{\int_{E_g}^{E_{g+1}} \phi(E) dE}
Parameters
----------
obj : Block
The object to compute reaction rates on. Notionally this could be upgraded to be
any kind of ArmiObject but with params defined as they are it currently is only
implemented for a block.
keff : float
The keff of the core. This is required to get the neutron production rate correct
via the neutron balance statement (since nuSigF has a 1/keff term).
lib : XSLibrary
Microscopic cross sections to use in computing the reaction rates.
"""
rate = {}
for simple in RX_PARAM_NAMES:
rate[simple] = 0.0
numberDensities = obj.getNumberDensities()
for nucName, numberDensity in numberDensities.items():
if numberDensity == 0.0:
continue
nucrate = {}
for simple in RX_PARAM_NAMES:
nucrate[simple] = 0.0
nucMc = lib.getNuclide(nucName, obj.getMicroSuffix())
micros = nucMc.micros
# absorption is fission + capture (no n2n here)
mgFlux = obj.getMgFlux()
for name in RX_ABS_MICRO_LABELS:
for g, (groupFlux, xs) in enumerate(zip(mgFlux, micros[name])):
dphi = numberDensity * groupFlux
nucrate["rateAbs"] += dphi * xs
if name != "fission":
nucrate["rateCap"] += dphi * xs
else:
nucrate["rateFis"] += dphi * xs
# scale nu by keff.
nucrate["rateProdFis"] += dphi * xs * micros.neutronsPerFission[g] / keff
for groupFlux, n2nXs in zip(mgFlux, micros.n2n):
# this n2n xs is reaction based. Multiply by 2.
dphi = numberDensity * groupFlux
nucrate["rateProdN2n"] += 2.0 * dphi * n2nXs
for simple in RX_PARAM_NAMES:
if nucrate[simple]:
rate[simple] += nucrate[simple]
for paramName, val in rate.items():
obj.p[paramName] = val # put in #/cm^3/s
vFuel = obj.getComponentAreaFrac(Flags.FUEL) if rate["rateFis"] > 0.0 else 1.0
obj.p.fisDens = rate["rateFis"] / vFuel
obj.p.fisDensHom = rate["rateFis"]
================================================
FILE: armi/physics/neutronics/globalFlux/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generic global flux interface."""
import unittest
from unittest.mock import patch
import numpy as np
from armi import settings
from armi.nuclearDataIO.cccc import isotxs
from armi.physics.neutronics.globalFlux import globalFluxInterface
from armi.physics.neutronics.settings import (
CONF_GRID_PLATE_DPA_XS_SET,
CONF_XS_KERNEL,
)
from armi.reactor import geometry
from armi.reactor.blocks import HexBlock
from armi.reactor.flags import Flags
from armi.reactor.tests import test_blocks, test_reactors
from armi.tests import ISOAA_PATH
class MockReactorParams:
def __init__(self):
self.cycle = 1
self.timeNode = 2
class MockCoreParams:
pass
class MockCore:
def __init__(self):
# just pick a random geomType
self.geomType = geometry.GeomType.CARTESIAN
self.symmetry = "full"
self.p = MockCoreParams()
class MockReactor:
def __init__(self):
self.core = MockCore()
self.o = None
self.p = MockReactorParams()
class MockGlobalFluxInterface(globalFluxInterface.GlobalFluxInterface):
"""
Add fake keff calc to a the general gf interface.
This simulates a 1000 pcm keff increase over 1 step.
"""
def interactBOC(self, cycle=None):
globalFluxInterface.GlobalFluxInterface.interactBOC(self, cycle=cycle)
self.r.core.p.keff = 1.00
def interactEveryNode(self, cycle, node):
globalFluxInterface.GlobalFluxInterface.interactEveryNode(self, cycle, node)
self.r.core.p.keff = 1.01
class MockGlobalFluxWithExecuters(globalFluxInterface.GlobalFluxInterfaceUsingExecuters):
def getExecuterCls(self):
return MockGlobalFluxExecuter
class MockGlobalFluxWithExecutersNonUniform(MockGlobalFluxWithExecuters):
def getExecuterOptions(self, label=None):
"""Return modified executerOptions."""
opts = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterOptions(self, label=label)
opts.hasNonUniformAssems = True # to increase test coverage
return opts
class MockGlobalFluxExecuter(globalFluxInterface.GlobalFluxExecuter):
"""Tests for code that uses Executers, which rely on OutputReaders to update state."""
def _readOutput(self):
class MockOutputReader:
def apply(self, r):
r.core.p.keff += 0.01
def getKeff(self):
return 1.05
return MockOutputReader()
class TestGlobalFluxOptions(unittest.TestCase):
"""Tests for GlobalFluxOptions."""
def test_readFromSettings(self):
"""Test reading global flux options from case settings.
.. test:: Tests GlobalFluxOptions.
:id: T_ARMI_FLUX_OPTIONS_CS
:tests: R_ARMI_FLUX_OPTIONS
"""
cs = settings.Settings()
opts = globalFluxInterface.GlobalFluxOptions("neutronics-run")
opts.fromUserSettings(cs)
self.assertFalse(opts.adjoint)
def test_readFromReactors(self):
"""Test reading global flux options from reactor objects.
.. test:: Tests GlobalFluxOptions.
:id: T_ARMI_FLUX_OPTIONS_R
:tests: R_ARMI_FLUX_OPTIONS
"""
reactor = MockReactor()
opts = globalFluxInterface.GlobalFluxOptions("neutronics-run")
opts.fromReactor(reactor)
self.assertEqual(opts.geomType, geometry.GeomType.CARTESIAN)
self.assertFalse(opts.savePhysicsFiles)
def test_savePhysicsFiles(self):
reactor = MockReactor()
opts = globalFluxInterface.GlobalFluxOptions("neutronics-run")
# savePhysicsFilesList matches MockReactor parameters
opts.savePhysicsFilesList = ["001002"]
opts.fromReactor(reactor)
self.assertTrue(opts.savePhysicsFiles)
# savePhysicsFilesList does not match MockReactor parameters
opts.savePhysicsFilesList = ["001000"]
opts.fromReactor(reactor)
self.assertFalse(opts.savePhysicsFiles)
class TestGFI(unittest.TestCase):
def test_computeDpaRate(self):
"""
Compute DPA and DPA rates from multi-group neutron flux and cross sections.
.. test:: Compute DPA rates.
:id: T_ARMI_FLUX_DPA
:tests: R_ARMI_FLUX_DPA
"""
xs = [1, 2, 3]
flx = [0.5, 0.75, 2]
res = globalFluxInterface.computeDpaRate(flx, xs)
self.assertEqual(res, 10**-24 * (0.5 + 1.5 + 6))
def test_interaction(self):
"""
Ensure the basic interaction hooks work.
Check that a 1000 pcm rx swing is observed due to the mock.
"""
cs = settings.Settings()
cs["burnSteps"] = 2
_o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
gfi = MockGlobalFluxInterface(r, cs)
bocKeff = 1.1
r.core.p.keffUnc = 1.1
gfi.interactBOC()
r.p.cycle, r.p.timeNode = 0, 0
gfi.interactEveryNode(0, 0)
self.assertAlmostEqual(gfi._bocKeff, r.core.p.keffUnc)
r.core.p.keffUnc = 1.05
r.p.cycle, r.p.timeNode = 0, 1
gfi.interactEveryNode(0, 1)
# doesn't change since its not the first node
self.assertAlmostEqual(gfi._bocKeff, bocKeff)
r.core.p.keffUnc = 1.01
r.p.cycle, r.p.timeNode = 0, 2
gfi.interactEveryNode(0, 2)
self.assertAlmostEqual(gfi._bocKeff, bocKeff)
self.assertAlmostEqual(r.core.p.rxSwing, -1e5 * (1.1 - 1.01) / (1.1 * 1.01))
gfi.interactBOC(0)
# now its zeroed at BOC
self.assertAlmostEqual(r.core.p.rxSwing, 0)
def test_getIOFileNames(self):
cs = settings.Settings()
gfi = MockGlobalFluxInterface(MockReactor(), cs)
inf, _outf, _stdname = gfi.getIOFileNames(1, 2, 1)
self.assertEqual(inf, "armi001_2_001.GlobalFlux.inp")
def test_getHistoryParams(self):
params = globalFluxInterface.GlobalFluxInterface.getHistoryParams()
self.assertEqual(len(params), 3)
self.assertIn("detailedDpa", params)
def test_checkEnergyBalance(self):
"""Test energy balance check.
.. test:: Block-wise power is consistent with reactor data model power.
:id: T_ARMI_FLUX_CHECK_POWER
:tests: R_ARMI_FLUX_CHECK_POWER
"""
cs = settings.Settings()
_o, r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
gfi = MockGlobalFluxInterface(r, cs)
self.assertEqual(gfi.checkEnergyBalance(), None)
# Test when nameplate power doesn't equal sum of block power
r.core.p.power = 1e-10
with self.assertRaises(ValueError):
gfi.checkEnergyBalance()
class TestGFIWithExecuters(unittest.TestCase):
"""Tests for the default global flux execution."""
@classmethod
def setUpClass(cls):
cls.cs = settings.Settings()
cls.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")[1]
def setUp(self):
self.r.core.p.keff = 1.0
self.gfi = MockGlobalFluxWithExecuters(self.r, self.cs)
@patch("armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxExecuter._execute")
@patch("armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxExecuter._performGeometryTransformations")
def test_executerInteraction(self, mockGeometryTransform, mockExecute):
"""Run the global flux interface and executer though one time now.
.. test:: Run the global flux interface to check that the mesh converter is called before the neutronics solver.
:id: T_ARMI_FLUX_GEOM_TRANSFORM_ORDER
:tests: R_ARMI_FLUX_GEOM_TRANSFORM
"""
call_order = []
mockGeometryTransform.side_effect = lambda *a, **kw: call_order.append(mockGeometryTransform)
mockExecute.side_effect = lambda *a, **kw: call_order.append(mockExecute)
gfi = self.gfi
gfi.interactBOC()
gfi.interactEveryNode(0, 0)
self.assertEqual([mockGeometryTransform, mockExecute], call_order)
def test_calculateKeff(self):
self.assertEqual(self.gfi.calculateKeff(), 1.05) # set in mock
def test_getExecuterCls(self):
class0 = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterCls()
self.assertEqual(class0, globalFluxInterface.GlobalFluxExecuter)
def test_setTightCouplingDefaults(self):
"""Assert that tight coupling defaults are only set if cs["tightCoupling"]=True."""
self.assertIsNone(self.gfi.coupler)
self._setTightCouplingTrue()
self.assertEqual(self.gfi.coupler.parameter, "keff")
self._setTightCouplingFalse()
def test_getTightCouplingValue(self):
"""Test getTightCouplingValue returns the correct value for keff and type for power."""
self._setTightCouplingTrue()
self.assertEqual(self.gfi.getTightCouplingValue(), 1.0) # set in setUp
self.gfi.coupler.parameter = "power"
for a in self.r.core:
for b in a:
b.p.power = 10.0
self.assertEqual(
self.gfi.getTightCouplingValue(),
self._getCouplingPowerDistributions(self.r.core),
)
self._setTightCouplingFalse()
@staticmethod
def _getCouplingPowerDistributions(core):
scaledPowers = []
for a in core:
assemblyPower = sum(b.p.power for b in a)
scaledPowers.append([b.p.power / assemblyPower for b in a])
return scaledPowers
def _setTightCouplingTrue(self):
self.cs["tightCoupling"] = True
self.gfi._setTightCouplingDefaults()
def _setTightCouplingFalse(self):
self.cs["tightCoupling"] = False
class TestGFIWithExecutersNonUniform(unittest.TestCase):
"""Tests for global flux execution with non-uniform assemblies."""
@classmethod
def setUpClass(cls):
cs = settings.Settings()
_o, cls.r = test_reactors.loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
cls.r.core.p.keff = 1.0
cls.gfi = MockGlobalFluxWithExecutersNonUniform(cls.r, cs)
@patch("armi.reactor.converters.uniformMesh.converterFactory")
def test_executerInteractionNonUniformAssems(self, mockConverterFactory):
"""Run the global flux interface with non-uniform assemblies.
This will serve as a broad end-to-end test of the interface, and also
stress test the mesh issues with non-uniform assemblies.
.. test:: Run the global flux interface to show the geometry converter is called when the
nonuniform mesh option is used.
:id: T_ARMI_FLUX_GEOM_TRANSFORM_CONV
:tests: R_ARMI_FLUX_GEOM_TRANSFORM
"""
gfi = self.gfi
gfi.interactBOC()
gfi.interactEveryNode(0, 0)
self.assertTrue(gfi.getExecuterOptions().hasNonUniformAssems)
mockConverterFactory.assert_called()
def test_calculateKeff(self):
self.assertEqual(self.gfi.calculateKeff(), 1.05) # set in mock
def test_getExecuterCls(self):
class0 = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterCls()
self.assertEqual(class0, globalFluxInterface.GlobalFluxExecuter)
class TestGlobalFluxResultMapper(unittest.TestCase):
"""
Test that global flux result mappings run.
Notes
-----
This does not test that the flux mapping is correct. That has to be done
at another level.
"""
def test_mapper(self):
# Switch to MC2v2 setting to make sure the isotopic/elemental expansions are compatible with
# actually doing some math using the ISOAA test microscopic library
o, r = test_reactors.loadTestReactor(
customSettings={CONF_XS_KERNEL: "MC2v2"},
inputFileName="smallestTestReactor/armiRunSmallest.yaml",
)
applyDummyFlux(r)
r.core.lib = isotxs.readBinary(ISOAA_PATH)
mapper = globalFluxInterface.GlobalFluxResultMapper(cs=o.cs)
mapper.r = r
mapper._renormalizeNeutronFluxByBlock(100)
self.assertAlmostEqual(r.core.calcTotalParam("power", generationNum=2), 100)
mapper._updateDerivedParams()
self.assertGreater(r.core.p.maxPD, 0.0)
self.assertGreater(r.core.p.maxFlux, 0.0)
mapper.updateDpaRate()
block = r.core.getFirstBlock()
self.assertGreater(block.p.detailedDpaRate, 0)
self.assertEqual(block.p.detailedDpa, 0)
mapper.clearFlux()
self.assertEqual(len(block.p.mgFlux), 0)
def test_getDpaXs(self):
cs = settings.Settings()
mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs)
# test fuel block
b = HexBlock("fuel", height=10.0)
vals = mapper.getDpaXs(b)
self.assertEqual(len(vals), 33)
self.assertAlmostEqual(vals[0], 2345.69, 1)
# build a grid plate block
b = HexBlock("grid_plate", height=10.0)
b.p.flags = Flags.GRID_PLATE
self.assertTrue(b.hasFlags(Flags.GRID_PLATE))
# test grid plate block
mapper.cs[CONF_GRID_PLATE_DPA_XS_SET] = "dpa_EBRII_PE16"
vals = mapper.getDpaXs(b)
self.assertEqual(len(vals), 33)
self.assertAlmostEqual(vals[0], 2478.95, 1)
# test null case
mapper.cs[CONF_GRID_PLATE_DPA_XS_SET] = "fake"
with self.assertRaises(KeyError):
mapper.getDpaXs(b)
def test_getBurnupPeakingFactor(self):
cs = settings.Settings()
mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs)
# test fuel block
mapper.cs["burnupPeakingFactor"] = 0.0
b = HexBlock("fuel", height=10.0)
b.p.flux = 100.0
b.p.fluxPeak = 250.0
factor = mapper.getBurnupPeakingFactor(b)
self.assertEqual(factor, 2.5)
def test_getBurnupPeakingFactorZero(self):
cs = settings.Settings()
mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs)
# test fuel block without any peaking factor set
b = HexBlock("fuel", height=10.0)
factor = mapper.getBurnupPeakingFactor(b)
self.assertEqual(factor, 0.0)
class TestGlobalFluxUtils(unittest.TestCase):
def test_calcReactionRates(self):
"""
Test that the reaction rate code executes and sets a param > 0.0.
.. test:: Return the reaction rates for a given ArmiObject.
:id: T_ARMI_FLUX_RX_RATES
:tests: R_ARMI_FLUX_RX_RATES
"""
b = test_blocks.loadTestBlock()
test_blocks.applyDummyData(b)
self.assertAlmostEqual(b.p.rateAbs, 0.0)
globalFluxInterface.calcReactionRates(b, 1.01, b.core.lib)
self.assertGreater(b.p.rateAbs, 0.0)
vfrac = b.getComponentAreaFrac(Flags.FUEL)
self.assertEqual(b.p.fisDens, b.p.rateFis / vfrac)
self.assertEqual(b.p.fisDensHom, b.p.rateFis)
def applyDummyFlux(r, ng=33):
"""Set arbitrary flux distribution on a Reactor."""
for b in r.core.iterBlocks():
b.p.power = 1.0
b.p.mgFlux = np.arange(ng, dtype=np.float64)
================================================
FILE: armi/physics/neutronics/isotopicDepletion/__init__.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package houses helper tools that allow ARMI to communicate with external isotopic depletion programs."""
================================================
FILE: armi/physics/neutronics/isotopicDepletion/crossSectionTable.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing the CrossSectionTable class.
The CrossSectionTable is useful for performing isotopic depletion analysis by storing one-group cross sections of
interest to such an analysis. This used to live alongside the isotopicDepletionInterface, but that proved to be an
unpleasant coupling between the ARMI composite model and the physics code contained therein. Separating it out at least
means that the composite model doesn't need to import the isotopicDepletionInterface to function.
"""
import collections
from typing import List
import numpy as np
from armi.nucDirectory import nucDir
class CrossSectionTable(collections.OrderedDict):
"""
This is a set of one group cross sections for use with isotopicDepletion analysis.
It can also double as a reaction rate table.
XStable is indexed by nucNames (nG), (nF), (n2n), (nA), (nP) and (n3n) are expected the cross sections are returned
in barns.
"""
rateTypes = ("nG", "nF", "n2n", "nA", "nP", "n3n")
def __init__(self, *args, **kwargs):
collections.OrderedDict.__init__(self, *args, **kwargs)
self._name = None
def setName(self, name):
self._name = name
def getName(self):
return self._name
def add(self, nucName, nG=0.0, nF=0.0, n2n=0.0, nA=0.0, nP=0.0, n3n=0.0):
"""
Add one group cross sections to the table.
Parameters
----------
nucName : str
nuclide name -- e.g. 'U235'
nG : float
(n,gamma) cross section in barns
nF : float
(n,fission) cross section in barns
n2n : float
(n,2n) cross section in barns
nA : float
(n,alpha) cross section in barns
nP : float
(n,proton) cross section in barns
n3n : float
(n,3n) cross section in barns
"""
xsData = {rateType: xs for rateType, xs in zip(self.rateTypes, [nG, nF, n2n, nA, nP, n3n])}
nb = nucDir.nuclideBases.byName[nucName]
mcnpNucName = int(nb.getMcnpId())
self[mcnpNucName] = xsData
def addMultiGroupXS(self, nucName, microMultiGroupXS, mgFlux, totalFlux=None):
"""
Perform group collapse to one group cross sections and add to table.
Parameters
----------
nucName: str
nuclide name -- e.g. 'U235'
microMultiGroupXS: XSCollection
micro cross sections, typically a XSCollection from an ISOTXS
mgFlux: list like
The flux in each energy group
totalFlux: float
The total flux. Optional argument for increased speed if already available.
"""
totalFlux = totalFlux if totalFlux is not None else sum(mgFlux)
xsTypes = ("nG", "nF", "n2n", "nA", "nP")
mgCrossSections = (
microMultiGroupXS.nGamma,
microMultiGroupXS.fission,
microMultiGroupXS.n2n,
microMultiGroupXS.nalph,
microMultiGroupXS.np,
)
oneGroupXS = np.asarray(mgCrossSections).dot(mgFlux) / totalFlux
oneGroupXSbyName = {xsType: xs for xsType, xs in zip(xsTypes, oneGroupXS)}
oneGroupXSbyName["n3n"] = 0.0
self.add(nucName, **oneGroupXSbyName)
def hasValues(self):
"""Determines if there are non-zero values in this cross section table."""
return any(any(nuclideCrossSectionSet.values()) for nuclideCrossSectionSet in self.values())
def getXsecTable(
self,
headerFormat="$ xsecs for {}",
tableFormat="\n{{mcnpId}} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}",
):
"""
Make a cross section table for external depletion physics code input decks.
.. impl:: Generate a formatted cross section table.
:id: I_ARMI_DEPL_TABLES1
:implements: R_ARMI_DEPL_TABLES
Loops over the reaction rates stored as ``self`` to produce a string with the cross sections for each
nuclide in the block. Cross sections may be populated by ``makeReactionRateTable``.
The string will have a header with the table's name formatted according to ``headerFormat`` followed by rows
for each unique nuclide/reaction combination, where each line is formatted according to ``tableFormat``.
Parameters
----------
headerFormat: string (optional)
This is the format in which the elements of the header with be returned -- i.e. if you use a .format() call
with the case name you'll return a formatted list of strings.
tableFormat: string (optional)
This is the format in which the elements of the table with be returned -- i.e. if you use a .format() call
with mcnpId, nG, nF, n2n, n3n, nA, and nP you'll get the format you want. If you use a .format() call with
the case name you'll return a formatted list of string elements
Results
-------
output: list
a list of string elements that together make a xsec card
"""
output = [headerFormat.format(self.getName())]
for mcnpNucName in sorted(self.keys()):
rxRates = self[mcnpNucName]
dataToWrite = {rateType: rxRates[rateType] for rateType in self.rateTypes}
if any(dataToWrite[rateType] for rateType in self.rateTypes):
dataToWrite["mcnpId"] = mcnpNucName
output.append(tableFormat.format(**dataToWrite))
return output
def makeReactionRateTable(obj, nuclides: List = None):
"""
Generate a reaction rate table for given nuclides.
Often useful in support of depletion.
.. impl:: Generate a reaction rate table with entries for (nG), (nF), (n2n), (nA), and (nP) reactions.
:id: I_ARMI_DEPL_TABLES0
:implements: R_ARMI_DEPL_TABLES
For a given composite object ``obj`` and a list of nuclides ``nuclides`` in that object, call
``obj.getReactionRates()`` for each nuclide with a ``nDensity`` parameter of 1.0. If ``nuclides`` is not
specified, use a list of all nuclides in ``obj``. This will reach upwards through the parents of ``obj`` to the
associated :py:class:`~armi.reactor.reactors.Core` object and pull the ISOTXS library that is stored there. If
``obj`` does not belong to a ``Core``, a warning is printed.
For each child of ``obj``, use the ISOTXS library and the cross-section ID for the associated block to produce a
reaction rate dictionary in units of inverse seconds for the nuclide specified in the original call to
``obj.getReactionRates()``. Because ``nDensity`` was originally specified as 1.0, this dictionary actually
represents the reaction rates per unit volume. If the nuclide is not in the ISOTXS library a warning is printed.
Combine the reaction rates for all nuclides into a combined dictionary by summing together reaction rates of the
same type on the same isotope from each of the children of ``obj``.
If ``obj`` has a non-zero multi-group flux, sum the group-wise flux into the total flux and normalize the
reaction rates by the total flux, producing a one-group macroscopic cross section for each reaction type on each
nuclide. Store these values in a ``CrossSectionTable``.
Parameters
----------
nuclides : list, optional
list of nuclide names for which to generate the cross-section table.
If absent, use all nuclides obtained by self.getNuclides().
Notes
-----
This also used to do some caching on the block level but that has been removed and the calls to this may therefore
need to be re-optimized.
See Also
--------
armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface.CrossSectionTable
armi.reactor.composites.Composite.getReactionRates
"""
if nuclides is None:
nuclides = obj.getNuclides()
rxRates = {nucName: {rxName: 0 for rxName in CrossSectionTable.rateTypes} for nucName in nuclides}
for armiObject in obj:
for nucName in nuclides:
rxnRates = armiObject.getReactionRates(nucName, nDensity=1.0)
for rxName, rxRate in rxnRates.items():
rxRates[nucName][rxName] += rxRate
crossSectionTable = CrossSectionTable()
crossSectionTable.setName(obj.getName())
totalFlux = sum(obj.getIntegratedMgFlux())
if totalFlux:
for nucName, nucRxRates in rxRates.items():
xSecs = {rxName: rxRate / totalFlux for rxName, rxRate in nucRxRates.items()}
crossSectionTable.add(nucName, **xSecs)
return crossSectionTable
================================================
FILE: armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An abstract class for interfaces between ARMI and programs that simulate transmutation and decay."""
import collections
from armi import interfaces
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO import xsLibraries
from armi.physics.neutronics.isotopicDepletion.crossSectionTable import (
CrossSectionTable,
)
from armi.reactor import composites
from armi.reactor.flags import Flags
def isDepletable(obj: composites.ArmiObject):
"""
Return True if obj or any child is flagged as DEPLETABLE.
The DEPLETABLE flag is automatically set to True if any composition contains nuclides that are
in the active nuclides list, unless flags are specifically set and DEPLETABLE is left out.
This is often interpreted by depletion plugins as indicating which parts of the problem to apply
depletion to. Analysts may want to turn on and off depletion in certain problems.
For example, sometimes they want the control rods to deplete to figure out how often to replace
them.
Warning
-------
The ``DEPLETABLE`` flag is automatically added to compositions that have active nuclides. If you
explicitly define any flags at all, you must also manually include ``DEPLETABLE`` or else the
objects will silently not deplete.
Notes
-----
The auto-flagging of ``DEPLETABLE`` happens in the construction of blueprints
rather than in a plugin hook because the reactor is not available at the time
the plugin hook runs.
See Also
--------
armi.reactor.blueprints.componentBlueprint.insertDepletableNuclideKeys
"""
return obj.hasFlags(Flags.DEPLETABLE) or obj.containsAtLeastOneChildWithFlags(Flags.DEPLETABLE)
class AbstractIsotopicDepleter:
"""
Interact with a depletion code.
This interface and subClasses deplete under a flux defined outside this interface
The depletion in this analysis only depends on the flux, material vectors, nuclear data and continuous source and
loss objects.
The depleters derived from this abstract class use all the fission products ARMI can handle -- i.e. do not form
lumped fission products.
The class attribute _depleteByName contains a ARMI objects to deplete keyed by name.
.. impl:: ARMI provides a base class to deplete isotopes.
:id: I_ARMI_DEPL_ABC
:implements: R_ARMI_DEPL_ABC
This class provides some basic infrastructure typically needed in depletion calculations within the ARMI
framework. It stores a reactor, operator, and case settings object, and also defines methods to store and
retrieve the objects which should be depleted based on their names.
"""
name = None
purpose = "depletion"
def __init__(self, r=None, cs=None, o=None):
self.r = r
self.cs = cs
self.o = o
# ARMI objects to deplete keyed by name order is important for consistency in iterating through objects
self._depleteByName = collections.OrderedDict()
self.efpdToBurn = None
self.allNuclidesInProblem = r.blueprints.allNuclidesInProblem if r else []
def addToDeplete(self, armiObj):
"""Add the object to the group of objects to be depleted."""
self._depleteByName[armiObj.getName()] = armiObj
def setToDeplete(self, armiObjects):
"""Change the group of objects to deplete to the specified group."""
listOfTuples = [(obj.getName(), obj) for obj in armiObjects]
self._depleteByName = collections.OrderedDict(listOfTuples)
def getToDeplete(self):
"""Return objects to be depleted."""
return list(self._depleteByName.values())
def run(self):
"""
Submit depletion case with external solver to the cluster.
In addition to running the physics kernel, this method calls the waitForJob method to wait for it job to finish.
comm = MPI.COMM_SELF.Spawn(sys.executable,args=['cpi.py'],maxprocs=5)
"""
raise NotImplementedError
def makeXsecTable(
compositeName,
xsType,
mgFlux,
isotxs,
headerFormat="$ xsecs for {}",
tableFormat="\n{mcnpId} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}",
):
"""
Make a cross section table for depletion physics input decks.
Parameters
----------
armiObject: armiObject
an armi object -- batch or block -- with a .p.xsType and a getMgFlux method
activeNuclides: list
a list of the nucNames of active isotopes
isotxs: isotxs object
headerFormat: string (optional)
this is the format in which the elements of the header with be returned -- i.e. if you use a .format() call with
the case name you'll return a formatted list of string elements
tableFormat: string (optional)
This is the format in which the elements of the table with be returned -- i.e. if you use a .format() call with
mcnpId, nG, nF, n2n, n3n, nA, and nP you'll get the format you want. If you use a .format() call with the case
name you'll return a formatted list of strings.
Results
-------
output: list
a list of string elements that together make a xsec card
See Also
--------
crossSectionTable.makeCrossSectionTable
Makes a table for arbitrary ArmiObjects
"""
xsTable = CrossSectionTable()
if not xsType or not sum(mgFlux) > 0:
return []
xsTable.setName(compositeName)
totalFlux = sum(mgFlux)
for nucLabel, nuc in isotxs.items():
if xsType != xsLibraries.getSuffixFromNuclideLabel(nucLabel):
continue
nucName = nuc.name
nb = nuclideBases.byName[nucName]
if isinstance(nb, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase)):
continue
microMultiGroupXS = isotxs[nucLabel].micros
if not isinstance(nb, nuclideBases.NaturalNuclideBase):
xsTable.addMultiGroupXS(nucName, microMultiGroupXS, mgFlux, totalFlux)
return xsTable.getXsecTable(headerFormat=headerFormat, tableFormat=tableFormat)
class AbstractIsotopicDepletionReader(interfaces.OutputReader):
"""Read number density output produced by the isotopic depletion."""
def read(self):
"""Read a isotopic depletion Output File and applies results to armi objects in the
``ToDepletion`` attribute.
"""
raise NotImplementedError
class Csrc:
"""
Writes a continuous source term card in a depletion interface.
Notes
-----
The chemical vector is a dictionary of chemicals and their removal rate constant. This works like a decay constant.
The isotopic vector is used to make a source material in continuous source definitions.
This is also the base class for continuous loss cards.
"""
def __init__(self):
self._chemicalVector = {}
self._isotopicVector = {}
self.defaultVector = {"0": 0}
def setChemicalVector(self, chemicalVector):
self._chemicalVector = chemicalVector
def getChemicalVector(self):
return self._chemicalVector
def write(self):
"""Return a list of lines to write for a csrc card."""
raise NotImplementedError
================================================
FILE: armi/physics/neutronics/latticePhysics/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialization of the interfaces for running lattice physics calculations."""
# ruff: noqa: F401
import os
from armi import interfaces, settings
from armi.physics import neutronics
from armi.utils import pathTools
ORDER = interfaces.STACK_ORDER.CROSS_SECTIONS
================================================
FILE: armi/physics/neutronics/latticePhysics/latticePhysicsInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lattice Physics Interface.
Parent classes for codes responsible for generating broad-group cross sections.
"""
import os
from armi import interfaces, nuclearDataIO, runLog
from armi.physics import neutronics
from armi.physics.neutronics import LatticePhysicsFrequency
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.settings import (
CONF_CLEAR_XS,
CONF_GEN_XS,
CONF_LATTICE_PHYSICS_FREQUENCY,
CONF_TOLERATE_BURNUP_CHANGE,
CONF_XS_KERNEL,
)
from armi.utils import safeCopy
LATTICE_PHYSICS = "latticePhysics"
def setBlockNeutronVelocities(r, neutronVelocities):
"""
Set the ``mgNeutronVelocity`` parameter for each block using the ``neutronVelocities`` dictionary data.
Parameters
----------
r : Reactor
A Reactor object, that we want to modify.
neutronVelocities : dict
Dictionary that is keyed with the ``representativeBlock`` XS IDs with values of multigroup neutron velocity data
computed by MC2.
Raises
------
ValueError
Multi-group neutron velocities was not computed during the cross section calculation.
"""
for b in r.core.iterBlocks():
xsID = b.getMicroSuffix()
if xsID not in neutronVelocities:
raise ValueError(
f"Cannot assign multi-group neutron velocity to {b} because it does not exist in the neutron "
f"velocities dictionary with keys: {neutronVelocities.keys()}. The XS library does not contain data "
f"for the {xsID} xsid."
)
b.p.mgNeutronVelocity = neutronVelocities[b.getMicroSuffix()]
class LatticePhysicsInterface(interfaces.Interface):
"""Class for interacting with lattice physics codes."""
purpose = LATTICE_PHYSICS
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
# Set to True by default, but should be disabled when perturbed cross sections are generated.
self._updateBlockNeutronVelocities = True
self._burnupTolerance = self.cs[CONF_TOLERATE_BURNUP_CHANGE]
self._oldXsIdsAndBurnup = {}
self.executablePath = self._getExecutablePath()
self.executableRoot = os.path.dirname(self.executablePath)
self.includeGammaXS = neutronics.gammaTransportIsRequested(cs) or neutronics.gammaXsAreRequested(cs)
self._latticePhysicsFrequency = LatticePhysicsFrequency[self.cs[CONF_LATTICE_PHYSICS_FREQUENCY]]
def _getExecutablePath(self):
raise NotImplementedError
def interactBOL(self, cycle=0):
"""
Run the lattice physics code if ``genXS`` is set and update burnup groups.
Generate new cross sections based off the case settings and the current state of the reactor if the lattice
physics frequency is BOL.
"""
if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOL:
self.updateXSLibrary(cycle)
def interactBOC(self, cycle=0):
"""
Run the lattice physics code if ``genXS`` is set and update burnup groups.
Generate new cross sections based off the case settings and the current state of the reactor if the lattice
physics frequency is BOC.
Notes
-----
:py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.interactBOC` also calls this if the
``runLatticePhysicsBeforeShuffling`` setting is True. This happens because branch searches may need XS.
"""
if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOC:
self.updateXSLibrary(cycle)
def updateXSLibrary(self, cycle, node=None):
"""
Update the current XS library, either by creating or reloading one.
Parameters
----------
cycle : int
The cycle that is being processed. Used to name the library.
node : int, optional
The node that is being processed. Used to name the library.
See Also
--------
computeCrossSections : run lattice physics on the current reactor state no matter weather needed or not.
"""
runLog.important(f"Preparing XS for cycle {cycle}")
representativeBlocks, xsIds = self._getBlocksAndXsIds()
if self._newLibraryShouldBeCreated(cycle, representativeBlocks, xsIds):
if self.cs[CONF_CLEAR_XS]:
self.clearXS()
self.computeCrossSections(blockList=representativeBlocks, xsLibrarySuffix=self._getSuffix(cycle))
self._renameExistingLibrariesForStatepoint(cycle, node)
else:
self.readExistingXSLibraries(cycle, node)
self._checkInputs()
def _renameExistingLibrariesForStatepoint(self, cycle, node):
"""Copy the existing neutron and/or gamma libraries into cycle-dependent files."""
safeCopy(neutronics.ISOTXS, nuclearDataIO.getExpectedISOTXSFileName(cycle, node))
if self.includeGammaXS:
safeCopy(
neutronics.GAMISO,
nuclearDataIO.getExpectedGAMISOFileName(cycle=cycle, node=node, suffix=self._getSuffix(cycle)),
)
safeCopy(
neutronics.PMATRX,
nuclearDataIO.getExpectedPMATRXFileName(cycle=cycle, node=node, suffix=self._getSuffix(cycle)),
)
def _checkInputs(self):
pass
def readExistingXSLibraries(self, cycle, node):
raise NotImplementedError
def makeCycleXSFilesAsBaseFiles(self, cycle, node):
raise NotImplementedError
@staticmethod
def _copyLibraryFilesForCycle(cycle, libFiles):
runLog.extra(f"Current library files: {libFiles}")
for baseName, cycleName in libFiles.items():
if not os.path.exists(cycleName):
if not os.path.exists(baseName):
raise ValueError(
f"Neither {cycleName} nor {baseName} libraries exist. Either the current cycle library for "
f"cycle {cycle} should exist or a base library is required to continue."
)
runLog.info(
f"Existing library {cycleName} for cycle {cycle} does not exist. The active library is {baseName}"
)
else:
runLog.info(f"Using {baseName} as an active library")
if cycleName != baseName:
safeCopy(cycleName, baseName)
def _readGammaBinaries(self, lib, gamisoFileName, pmatrxFileName):
raise NotImplementedError(f"Gamma cross sections not implemented in {self.cs[CONF_XS_KERNEL]}")
def _writeGammaBinaries(self, lib, gamisoFileName, pmatrxFileName):
raise NotImplementedError(f"Gamma cross sections not implemented in {self.cs[CONF_XS_KERNEL]}")
def _getSuffix(self, cycle):
return ""
def interactEveryNode(self, cycle=None, node=None):
"""
Run the lattice physics code if ``genXS`` is set and update burnup groups.
Generate new cross sections based off the case settings and the current state of the reactor if the lattice
physics frequency is at least everyNode.
If this is not a coupled calculation, or if cross sections are only being generated at everyNode, then we want
to regenerate all cross sections here. If it _is_ a coupled calculation, and we are generating cross sections at
coupled iterations, then keep the existing XS lib for now, adding any XS groups as necessary to ensure that all
XS groups are covered.
"""
if self._latticePhysicsFrequency >= LatticePhysicsFrequency.everyNode:
if not self.o.couplingIsActive() or self._latticePhysicsFrequency == LatticePhysicsFrequency.everyNode:
self.r.core.lib = None
self.updateXSLibrary(self.r.p.cycle, self.r.p.timeNode)
def interactCoupled(self, iteration):
"""
Runs on coupled iterations to generate cross sections that are updated with the temperature state.
Notes
-----
This accounts for changes in cross section data due to temperature changes, which are important for cross
section resonance effects and accurately characterizing Doppler constant and coefficient evaluations. For
Standard and Equilibrium run types, this coupling iteration is limited to when the time node is equal to zero.
The validity of this assumption lies in the expectation that these runs have consistent power, flow, and
temperature conditions at all time nodes. For Snapshot run types, this assumption, in general, is invalidated as
the requested reactor state may sufficiently differ from what exists on the database and where tight coupling is
needed to capture temperature effects.
.. warning::
For Standard and Equilibrium run types, if the reactor power, flow, and/or temperature state is expected to
vary over the lifetime of the simulation, as could be the case with
:ref:`detailed cycle histories `, a custom subclass should be considered.
Parameters
----------
iteration : int
This is unused since cross sections are generated on a per-cycle basis.
"""
# always run for snapshots to account for temp effect of different flow or power statepoint
targetFrequency = (
LatticePhysicsFrequency.firstCoupledIteration if iteration == 0 else LatticePhysicsFrequency.all
)
if self._latticePhysicsFrequency >= targetFrequency:
self.r.core.lib = None
self.updateXSLibrary(self.r.p.cycle, self.r.p.timeNode)
def clearXS(self):
raise NotImplementedError
def interactEOC(self, cycle=None):
"""
Interact at the end of a cycle.
Force updating cross sections at the start of the next cycle.
"""
self.r.core.lib = None
def computeCrossSections(self, baseList=None, forceSerial=False, xsLibrarySuffix="", blockList=None):
"""
Prepare a batch of inputs, execute them, and store results on reactor library.
Parameters
----------
baseList : list
a user-specified set of bases that will be run instead of calculating all of them
forceSerial : bool, optional
Will run on 1 processor in sequence instead of on many in parallel
Useful for optimization/batch runs where every processor is on a different branch
xsLibrarySuffix : str, optional
A book-keeping suffix used in Doppler calculations
blockList : list, optional
List of blocks for which to generate cross sections. If None, representative blocks will be determined.
"""
self.r.core.lib = self._generateXsLibrary(baseList, forceSerial, xsLibrarySuffix, blockList)
def _generateXsLibrary(
self,
baseList,
forceSerial,
xsLibrarySuffix,
blockList,
writers=None,
purgeFP=True,
):
raise NotImplementedError
def _executeLatticePhysicsCalculation(self, returnedFromWriters, forceSerial):
raise NotImplementedError
def generateLatticePhysicsInputs(self, baseList, xsLibrarySuffix, blockList, xsWriters=None):
"""
Write input files for the generation of cross section libraries.
Parameters
----------
baseList : list
A list of cross-section id strings (e.g. AA, BC) that will be generated. Default: all in reactor
xsLibrarySuffix : str
A suffix added to the end of the XS file names such as 'voided' for voided XS. Default: Empty
blockList : list
The blocks to write inputs for.
xsWriters : list, optional
The specified writers to write the input files
Returns
-------
returnedFromWriters: list
A list of what this specific writer instance returns for each representative block.
It is the responsibility of the subclassed interface to implement.
In many cases, it is the executing agent.
"""
returnedFromWriters = []
baseList = set(baseList or [])
representativeBlocks = blockList or self.getRepresentativeBlocks()
for repBlock in representativeBlocks:
xsId = repBlock.getMicroSuffix()
if not baseList or xsId in baseList:
# write the step number to the info log
runLog.info(
"Creating input writer(s) for {0} with {1:65s} BU (%FIMA): {2:10.2f}".format(
xsId, repBlock, repBlock.p.percentBu
)
)
writers = self.getWriters(repBlock, xsLibrarySuffix, xsWriters)
for writer in writers:
fromWriter = writer.write()
returnedFromWriters.append(fromWriter)
return returnedFromWriters
def getWriters(self, representativeBlock, xsLibrarySuffix, writers=None):
"""
Return valid lattice physics writer subclass(es).
Parameters
----------
representativeBlock : Block
A representative block object that can be created from a block collection.
xsLibrarySuffix : str
A suffix added to the end of the XS file names such as 'voided' for voided XS. Default: Empty
writers : list of lattice physics writer objects, optional
If the writers are known, they can be provided and constructed.
Returns
-------
writers : list
A list of writers for the provided representative block.
"""
xsID = representativeBlock.getMicroSuffix()
if writers:
# Construct the writers that are provided
writers = [
w(
representativeBlock,
r=self.r,
externalCodeInterface=self,
xsLibrarySuffix=xsLibrarySuffix,
)
for w in writers
]
else:
geom = self.cs[CONF_CROSS_SECTION][xsID].geometry
writers = self._getGeomDependentWriters(representativeBlock, xsID, geom, xsLibrarySuffix)
return writers
def _getGeomDependentWriters(self, representativeBlock, xsID, geom, xsLibrarySuffix):
raise NotImplementedError
def getReader(self):
raise NotImplementedError
def _newLibraryShouldBeCreated(self, cycle, representativeBlockList, xsIDs):
"""
Determines whether the cross section generator should be executed at this cycle.
Criteria include:
#. CONF_GEN_XS setting is turned on
#. We are beyond any requested skipCycles (restart cycles)
#. The blocks have changed burnup beyond the burnup threshold
#. Lattice physics kernel (e.g. MC2) hasn't already been executed for this cycle
(possible if it runs during fuel handling)
"""
executeXSGen = bool(self.cs[CONF_GEN_XS] and cycle >= self.cs["skipCycles"])
idsChangedBurnup = self._checkBurnupThresholds(representativeBlockList)
if executeXSGen and not idsChangedBurnup:
executeXSGen = False
if self.r.core.hasLib():
# justification=r.core.lib property can raise exception or load pre-generated ISOTXS, but the interface
# should have responsibility of loading XS's have already generated for this cycle (maybe during fuel
# management). Should we update due to changes that occurred during fuel management?
missing = set(xsIDs) - set(self.r.core.lib.xsIDs)
if missing and not executeXSGen:
runLog.info(
f"Although a XS library {self.r.core.lib} exists on {self.r.core}, there are missing XS IDs "
f"{missing} required. The XS generation on cycle {cycle} is not enabled, but will be run to "
"generate these missing cross sections."
)
executeXSGen = True
elif missing:
runLog.info(
f"Although a XS library {self.r.core.lib} exists on {self.r.core}, there are missing XS IDs "
f"{missing} required. These will be generated on cycle {cycle}."
)
executeXSGen = True
else:
runLog.info(
f"A XS library {self.r.core.lib} exists on {self.r.core} and contains the required XS data for XS "
f"IDs {self.r.core.lib.xsIDs}. The generation of XS will be skipped."
)
executeXSGen = False
if executeXSGen:
runLog.info(f"Cross sections will be generated on cycle {cycle} for the following XS IDs: {xsIDs}")
else:
runLog.info(
f"Cross sections will not be generated on cycle {cycle}. The setting `{CONF_GEN_XS}` is "
f"{self.cs[CONF_GEN_XS]} and `skipCycles` is {self.cs['skipCycles']}"
)
return executeXSGen
def _checkBurnupThresholds(self, blockList):
"""
Check to see if burnup has changed meaningfully.
If there are, then the xs sets should be regenerated. Otherwise then go ahead and skip xs generation.
This is motivated by the idea that during very long explicit equilibrium runs, it might save time to turn off xs
generation at a certain point.
Parameters
----------
blockList: iterable
List of all blocks to examine
Returns
-------
idsChangedBurnup: bool
flag regarding whether or not burnup changed substantially
"""
idsChangedBurnup = True
if self._burnupTolerance > 0:
idsChangedBurnup = False
for b in blockList:
xsID = b.getMicroSuffix()
if xsID not in self._oldXsIdsAndBurnup:
# Looks like a new ID was found that was not in the old ID's have to regenerate the cross-sections
# this time around
self._oldXsIdsAndBurnup[xsID] = b.p.percentBu
idsChangedBurnup = True
else:
# The id was found. Now it is time to compare the burnups to determine if there has been enough
# meaningful change between the runs
buOld = self._oldXsIdsAndBurnup[xsID]
buNow = b.p.percentBu
if abs(buOld - buNow) > self._burnupTolerance:
idsChangedBurnup = True
# update the oldXs burnup to be the about to be newly generated xsBurnup
self._oldXsIdsAndBurnup[xsID] = buNow
runLog.important(
f"Burnup has changed in xsID {xsID} from {buOld} to {buNow}. Recalculating Cross-sections"
)
return idsChangedBurnup
def _getProcessesPerNode(self):
raise NotImplementedError
def getRepresentativeBlocks(self):
"""Return a list of all blocks in the problem."""
xsGroupManager = self.getInterface("xsGroups")
return xsGroupManager.representativeBlocks.values() # OrderedDict
def _getBlocksAndXsIds(self):
"""Return blocks and their xsIds."""
blocks = self.getRepresentativeBlocks()
return blocks, [b.getMicroSuffix() for b in blocks]
def updatePhysicsCouplingControl(self):
"""
Disable XS update in equilibrium cases after a while.
Notes
-----
This is only relevant for equilibrium cases. We have to turn off XS updates after several cyclics or else the
number densities will never converge.
"""
if self.r.core.p.cyclics >= self.cs["numCyclicsBeforeStoppingXS"]:
self.enabled(False)
runLog.important(f"Disabling {self} because numCyclics={self.r.core.p.cyclics}")
================================================
FILE: armi/physics/neutronics/latticePhysics/latticePhysicsWriter.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lattice Physics Writer.
Parent class for lattice physics writers.
Seeks to provide access to common methods used by general lattice physics codes.
"""
import collections
import math
import numpy as np
import ordered_set
from armi import interfaces, runLog
from armi.nucDirectory import nuclideBases
from armi.physics import neutronics
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FP_MODEL,
)
from armi.physics.neutronics.settings import (
CONF_GEN_XS,
CONF_MINIMUM_FISSILE_FRACTION,
CONF_MINIMUM_NUCLIDE_DENSITY,
)
from armi.reactor import components
from armi.reactor.flags import Flags
from armi.settings.fwSettings.globalSettings import CONF_DETAILED_AXIAL_EXPANSION
from armi.utils.customExceptions import warn_when_root
# number of decimal places to round temperatures to in _groupNuclidesByTemperature
_NUM_DIGITS_ROUND_TEMPERATURE = 3
# index of the temperature in the nuclide dictionary: {nuc: (density, temp, category)}
_NUCLIDE_VALUES_TEMPERATURE_INDEX = 1
@warn_when_root
def nuclideNameFoundMultipleTimes(nuclideName):
return "Nuclide `{}' was found multiple times.".format(nuclideName)
class LatticePhysicsWriter(interfaces.InputWriter):
"""
Parent class for creating the inputs for lattice physics codes.
Contains methods for extracting all nuclides for a given problem.
"""
_SPACE = " "
_SEPARATOR = " | "
# Nuclide categories
UNUSED_CATEGORY = "Unused" + 3 * _SPACE
FUEL_CATEGORY = "Fuel" + 5 * _SPACE
STRUCTURE_CATEGORY = "Structure"
COOLANT_CATEGORY = "Coolant" + 2 * _SPACE
FISSION_PRODUCT_CATEGORY = "Fission Product"
# Nuclide attributes
DEPLETABLE = "Depletable" + 4 * _SPACE
UNDEPLETABLE = "Non-Depletable"
REPRESENTED = "Represented" + 2 * _SPACE
INF_DILUTE = "Inf Dilute"
def __init__(
self,
representativeBlock,
r=None,
externalCodeInterface=None,
xsLibrarySuffix="",
generateExclusiveGammaXS=False,
):
interfaces.InputWriter.__init__(self, r=r, externalCodeInterface=externalCodeInterface)
self.cs = self.eci.cs
self.block = representativeBlock
if not isinstance(xsLibrarySuffix, str):
raise TypeError("xsLibrarySuffix should be a string; got {}".format(type(xsLibrarySuffix)))
self.xsLibrarySuffix = xsLibrarySuffix
self.generateExclusiveGammaXS = generateExclusiveGammaXS
if self.generateExclusiveGammaXS and not neutronics.gammaXsAreRequested(self.cs):
raise ValueError("Invalid `{}` setting to generate gamma XS for {}.".format(CONF_GEN_XS, self.block))
self.xsId = representativeBlock.getMicroSuffix()
self.xsSettings = self.cs[CONF_CROSS_SECTION][self.xsId]
self.mergeIntoClad = self.xsSettings.mergeIntoClad
self.mergeIntoFuel = self.xsSettings.mergeIntoFuel
self.driverXsID = self.xsSettings.driverID
self.numExternalRings = self.xsSettings.numExternalRings
self.criticalBucklingSearchActive = self.xsSettings.criticalBuckling
self.ductHeterogeneous = self.xsSettings.ductHeterogeneous
self.traceIsotopeThreshold = self.xsSettings.traceIsotopeThreshold
self.executeExclusive = self.xsSettings.xsExecuteExclusive
self.priority = self.xsSettings.xsPriority
self.maxAtomNumberToModelInfDilute = (
self.xsSettings.xsMaxAtomNumber if self.xsSettings.xsMaxAtomNumber is not None else 999
)
# would prefer this in 1D but its used in 0D in _writeSourceComposition
self.minDriverDensity = self.xsSettings.minDriverDensity
blockNeedsFPs = representativeBlock.getLumpedFissionProductCollection() is not None
self.modelFissionProducts = blockNeedsFPs and self.cs[CONF_FP_MODEL] != "noFissionProducts"
self.explicitFissionProducts = self.cs[CONF_FP_MODEL] == "explicitFissionProducts"
self.diluteFissionProducts = blockNeedsFPs and self.cs[CONF_FP_MODEL] == "infinitelyDilute"
self.minimumNuclideDensity = self.cs[CONF_MINIMUM_NUCLIDE_DENSITY]
self.infinitelyDiluteDensity = self.minimumNuclideDensity
self._unusedNuclides = set()
self._allNuclideObjects = None
def __repr__(self):
suffix = " with Suffix:`{}`".format(self.xsLibrarySuffix) if self.xsLibrarySuffix else ""
if self.generateExclusiveGammaXS:
xsFlag = neutronics.GAMMA
elif neutronics.gammaXsAreRequested(self.cs) and self._isGammaXSGenerationEnabled:
xsFlag = neutronics.NEUTRONGAMMA
else:
xsFlag = neutronics.NEUTRON
return "<{} - XS ID {} ({} XS){}>".format(self.__class__.__name__, self.xsId, xsFlag, suffix)
def _writeTitle(self, fileObj):
self._writeComment(
fileObj,
"ARMI generated case for caseTitle {}, block {}\n".format(self.cs.caseTitle, self.block),
)
def write(self):
raise NotImplementedError
@property
def _isSourceDriven(self):
return bool(self.driverXsID)
@property
def _isGammaXSGenerationEnabled(self):
"""Gamma transport is not available generically across all lattice physic solvers."""
return False
def _getAllNuclidesByTemperatureInC(self, component=None):
"""
Returns a dictionary where all nuclides in the block are grouped by temperature.
Some lattice physics codes, like ``SERPENT`` create mixtures of nuclides
at similar temperatures to construct a problem. The dictionary returned is of the form ::
{temp1: {n1: (d1, temp1, category1),
n2: (d2, temp1, category2)}
temp2: {n3: (d3, temp2, category3),
n4: (d4, temp2, category4)}
...
}
"""
nuclides = self._getAllNuclideObjects(component)
return _groupNuclidesByTemperature(nuclides)
def _getAllNuclideObjects(self, component=None):
"""
Returns a single dictionary of all nuclides in the component.
Calls :py:meth:`_getAllNuclidesByCategory`, which returns two dictionaries:
one with just fission products and another with the remaining nuclides.
This method just updates ``self._allNuclideObjects`` to contain the fission
products as well.
The dictionaries are structured with :py:class:`armi.nucDirectory.nuclideBases.NuclideBase`
objects, with `(density, temperatureInC, and category)`` tuples for that nuclide object.
"""
nucs, fissProds = self._getAllNuclidesByCategory(component)
nucs.update(fissProds)
return nucs
def _getAllNuclidesByCategory(self, component=None):
"""
Determine number densities and temperatures for each nuclide.
Temperatures are a bit complex due to some special cases:
Nuclides that build up like Pu239 have zero density at BOL but need cross sections.
Nuclides like Mo99 are sometimes in structure and sometimes in lumped fission products. What temp to use?
Nuclides like B-10 are in control blocks but these aren't candidates for XS creation. What temperature?
To deal with this, we compute (flux-weighted) average temperatures of each nuclide based on its current
component temperatures.
"""
dfpDensities = self._getDetailedFPDensities()
(
coolantNuclides,
fuelNuclides,
structureNuclides,
) = self.r.core.getNuclideCategories()
nucDensities = {}
subjectObject = component or self.block
depletableNuclides = nuclideBases.getDepletableNuclides(self.r.blueprints.activeNuclides, self.block)
objNuclides = subjectObject.getNuclides()
# If the explicit fission product model is enabled then the number densities
# on the components will already contain all the nuclides required to be
# modeled by the lattice physics writer. Otherwise, assume that `allNuclidesInProblem`
# should be modeled.
if self.explicitFissionProducts:
# If detailed axial expansion is active, mapping between blocks occurs on uniform mesh
# and this can cause blocks to have isotopes that they don't have cross sections for.
# Fix this by adding all isotopes so they are present in lattice physics.
if self.cs[CONF_DETAILED_AXIAL_EXPANSION]:
nuclides = self.r.blueprints.allNuclidesInProblem
else:
nuclides = ordered_set.OrderedSet(sorted(objNuclides))
else:
nuclides = self.r.blueprints.allNuclidesInProblem
nuclides = nuclides.union(self.r.blueprints.nucsToForceInXsGen)
numDensities = subjectObject.getNuclideNumberDensities(nuclides)
for nucName, dens in zip(nuclides, numDensities):
nuc = self.r.nuclideBases.byName[nucName]
if isinstance(nuc, nuclideBases.LumpNuclideBase):
continue # skip LFPs here but add individual FPs below.
if isinstance(subjectObject, components.Component):
if self.ductHeterogeneous and "Homogenized" in subjectObject.name:
# Nuclide temperatures representing heterogeneous model component temperatures
nucTemperatureInC = self._getAvgNuclideTemperatureInC(nucName)
else:
# Heterogeneous number densities and temperatures
nucTemperatureInC = subjectObject.temperatureInC
else:
# Homogeneous number densities and temperatures
nucTemperatureInC = self._getAvgNuclideTemperatureInC(nucName)
density = max(dens, self.minimumNuclideDensity)
if nuc in nucDensities:
nuclideNameFoundMultipleTimes(nucName)
dens, nucTemperatureInC, nucCategory = nucDensities[nuc]
density = dens + density
nucDensities[nuc] = (density, nucTemperatureInC, nucCategory)
continue
nucCategory = ""
# Remove nuclides from detailed fission product dictionary if they are a part of the core materials
# (e.g., Zr in the U10Zr which is at fuel temperature and Mo in HT9 which is at structure temp)
if nuc in dfpDensities:
density += dfpDensities[nuc]
nucCategory += self.FISSION_PRODUCT_CATEGORY + self._SEPARATOR
del dfpDensities[nuc]
elif nucName in self._unusedNuclides:
nucCategory += self.UNUSED_CATEGORY + self._SEPARATOR
elif nucName in fuelNuclides:
nucCategory += self.FUEL_CATEGORY + self._SEPARATOR
elif nucName in coolantNuclides:
nucCategory += self.COOLANT_CATEGORY + self._SEPARATOR
elif nucName in structureNuclides:
nucCategory += self.STRUCTURE_CATEGORY + self._SEPARATOR
# Add additional `attributes` to the nuclide categories
if nucName in objNuclides:
nucCategory += self.REPRESENTED + self._SEPARATOR
else:
nucCategory += self.INF_DILUTE + self._SEPARATOR
if nucName in depletableNuclides:
nucCategory += self.DEPLETABLE
else:
nucCategory += self.UNDEPLETABLE
nucDensities[nuc] = (density, nucTemperatureInC, nucCategory)
if not self._isSourceDriven:
nucDensities = self._adjustPuFissileDensity(nucDensities)
fissionProductDensities = self._getDetailedFissionProducts(dfpDensities)
if self._unusedNuclides:
runLog.debug(
"The following unused nuclides (defined in the loading file) are being added to {} at {} C: {}".format(
subjectObject,
self._getFuelTemperature(),
list(self._unusedNuclides),
)
)
# the sortFunc makes orders the nucideDensities and fissionProductDensities by name.
sortFunc = lambda nb_data_tuple: nb_data_tuple[0].name
nucDensities = collections.OrderedDict(sorted(nucDensities.items(), key=sortFunc))
fissionProductDensities = collections.OrderedDict(sorted(fissionProductDensities.items(), key=sortFunc))
return nucDensities, fissionProductDensities
def _getAvgNuclideTemperatureInC(self, nucName):
"""Return the block fuel temperature and the nuclides average temperature in C."""
# Get the temperature of the nuclide in the block
xsgm = self.getInterface("xsGroups")
nucTemperatureInC = xsgm.getNucTemperature(self.xsId, nucName)
if not nucTemperatureInC or math.isnan(nucTemperatureInC):
# Assign the fuel temperature to the nuclide if it is None or NaN.
nucTemperatureInC = self._getFuelTemperature() # NBD b/c the nuclide is not in problem.
self._unusedNuclides.add(nucName)
return nucTemperatureInC
def _getFuelTemperature(self):
fuelComponents = self.block.getComponents(Flags.FUEL)
if not fuelComponents:
fuelTemperatureInC = self.block.getAverageTempInC()
else:
fuelTemperatureInC = np.mean([fc.temperatureInC for fc in fuelComponents])
if not fuelTemperatureInC or math.isnan(fuelTemperatureInC):
raise ValueError(
"The fuel temperature of block {0} is {1} and is not valid".format(self.block, fuelTemperatureInC)
)
return fuelTemperatureInC
def _getDetailedFissionProducts(self, dfpDensities):
"""Return a dictionary of fission products not provided in the reactor blueprint nuclides.
Notes
-----
Assumes that all fission products are at the same temperature of the lumped fission product of U238 within the
block.
"""
if self.cs[CONF_FP_MODEL] != "noFissionProducts":
fissProductTemperatureInC = self._getAvgNuclideTemperatureInC("LFP38")
return {
fp: (dens, fissProductTemperatureInC, self.FISSION_PRODUCT_CATEGORY)
for fp, dens in dfpDensities.items()
}
return {}
def _getDetailedFPDensities(self):
"""
Expands the nuclides in the LFP based on their yields.
Returns
-------
dfpDensities : dict
Detailed Fission Product Densities. keys are FP names, values are block number densities in atoms/bn-cm.
Raises
------
IndexError
The lumped fission products were not initialized on the blocks.
"""
dfpDensities = {}
if not self.modelFissionProducts:
return dfpDensities
lfpCollection = self.block.getLumpedFissionProductCollection()
if self.diluteFissionProducts:
if lfpCollection is None:
raise ValueError("Lumped fission products are not initialized. Did interactAll BOL run?")
dfps = lfpCollection.getAllFissionProductNuclideBases()
for individualFpBase in dfps:
dfpDensities[individualFpBase] = self.minimumNuclideDensity
else:
# expand densities and sum
dfpDensitiesByName = lfpCollection.getNumberDensities(self.block)
# now, go through the list and make sure that there aren't any values less than the
# minimumNuclideDensity; we need to keep trace amounts of nuclides in the problem
for fpName, fpDens in dfpDensitiesByName.items():
fp = self.r.nuclideBases.byName[fpName]
dfpDensities[fp] = max(fpDens, self.minimumNuclideDensity)
return dfpDensities
def _writeNuclide(self, fileObj, nuclide, density, nucTemperatureInC, category, xsIdSpecified=None):
raise NotImplementedError
@property
def _isCriticalBucklingSearchActive(self):
return self.criticalBucklingSearchActive
def _writeComment(self, fileObj, msg):
raise NotImplementedError()
def _writeGroupStructure(self, fileObj):
raise NotImplementedError()
def _adjustPuFissileDensity(self, nucDensities):
"""
Checks if the minimum fissile composition is lower than the allowed minimum fissile fraction and adds
additional Pu-239.
Notes
-----
We're going to increase the Pu-239 density to make the ratio of fissile mass to heavy metal mass equal to the
target ``CONF_MINIMUM_FISSILE_FRACTION``::
minFrac = (fiss - old + new) / (hm - old + new)
minFrac * (hm - old + new) = fiss - old + new
minFrac * (hm - old) + old - fiss = new * (1 - minFrac)
new = (minFrac * (hm - old) + old - fiss) / (1 - minFrac)
where::
minFrac = ``CONF_MINIMUM_FISSILE_FRACTION`` setting
fiss = fissile mass of block
hm = heavy metal mass of block
old = number density of Pu-239 before adjustment
new = number density of Pu-239 after adjustment
"""
minFrac = self.cs[CONF_MINIMUM_FISSILE_FRACTION]
fiss = sum(dens[0] for nuc, dens in nucDensities.items() if nuc.isFissile())
hm = sum(dens[0] for nuc, dens in nucDensities.items() if nuc.isHeavyMetal())
if fiss / hm < minFrac:
pu239 = self.r.nuclideBases.byName["PU239"]
old, temp, msg = nucDensities[pu239]
new = (minFrac * (hm - old) + old - fiss) / (1 - minFrac)
nucDensities[pu239] = (new, temp, msg)
runLog.warning(
f"Adjusting Pu-239 number densities in {self.block} from {old} to {new} "
f"to meet minimum fissile fraction of {minFrac}."
)
return nucDensities
def _getDriverBlock(self):
"""Return the block that is driving the representative block for this writer."""
xsgm = self.getInterface("xsGroups")
driverBlock = xsgm.representativeBlocks.get(self.driverXsID, None)
if self.driverXsID != "" and driverBlock is None:
msg = f"No representativeBlock found for driver XS ID {self.driverXsID} to use in {self}!"
runLog.error(msg)
raise ValueError(msg)
return driverBlock
def _groupNuclidesByTemperature(nuclides):
"""
Creates a dictionary of temperatures and nuclides at those temperatures.
Nuclides is a dictionary with ``NuclideBase`` objects as keys, and
the density, temperature, and category of those nuclides as values.
Notes
-----
The temperature will be rounded to a number of digits according to ``_NUM_DIGITS_ROUND_TEMPERATURE``,
because the average temperature for each nuclide can vary down to numerical precision,
i.e. 873.15 and 873.15000000001
"""
tempDict = {}
for nuclide, values in nuclides.items():
temperature = round(values[_NUCLIDE_VALUES_TEMPERATURE_INDEX], _NUM_DIGITS_ROUND_TEMPERATURE)
if temperature not in tempDict:
tempDict[temperature] = {nuclide: values}
else:
tempDict[temperature][nuclide] = values
return tempDict
================================================
FILE: armi/physics/neutronics/latticePhysics/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/physics/neutronics/latticePhysics/tests/test_latticeInterface.py
================================================
# Copyright 2021 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the Lattice Interface."""
import unittest
from collections import OrderedDict
from armi import settings
from armi.nuclearDataIO.cccc import isotxs
from armi.operators.operator import Operator
from armi.physics.neutronics import LatticePhysicsFrequency
from armi.physics.neutronics.crossSectionGroupManager import CrossSectionGroupManager
from armi.physics.neutronics.latticePhysics.latticePhysicsInterface import (
LatticePhysicsInterface,
)
from armi.physics.neutronics.settings import CONF_GEN_XS, CONF_GLOBAL_FLUX_ACTIVE
from armi.reactor.assemblies import (
HexAssembly,
grids,
)
from armi.reactor.reactors import Core, Reactor
from armi.reactor.tests.test_blocks import buildSimpleFuelBlock
from armi.tests import ISOAA_PATH, mockRunLogs
# As an interface, LatticePhysicsInterface must be subclassed to be used
class LatticeInterfaceTester(LatticePhysicsInterface):
def __init__(self, r, cs):
self.name = "LatticeInterfaceTester"
super().__init__(r, cs)
def _getExecutablePath(self):
return "/tmp/fake_path"
def readExistingXSLibraries(self, cycle, node):
pass
class LatticeInterfaceTesterLibFalse(LatticeInterfaceTester):
"""Subclass setting _newLibraryShouldBeCreated = False."""
def _newLibraryShouldBeCreated(self, cycle, representativeBlockList, xsIDs):
self.testVerification = True
return False
class TestLatticePhysicsInterfaceBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create empty reactor core
cls.o = Operator(settings.Settings())
cls.o.r = Reactor("testReactor", None)
cls.o.r.core = Core("testCore")
# add an assembly with a single block
cls.assembly = HexAssembly("testAssembly")
cls.assembly.spatialGrid = grids.AxialGrid.fromNCells(1)
cls.assembly.spatialGrid.armiObject = cls.assembly
cls.assembly.add(buildSimpleFuelBlock())
# init and add interfaces
cls.xsGroupInterface = CrossSectionGroupManager(cls.o.r, cls.o.cs)
cls.o.addInterface(cls.xsGroupInterface)
class TestLatticePhysicsInterface(TestLatticePhysicsInterfaceBase):
"""Test Lattice Physics Interface."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.latticeInterface = LatticeInterfaceTesterLibFalse(cls.o.r, cls.o.cs)
cls.o.addInterface(cls.latticeInterface)
def setUp(self):
self.o.r.core.lib = "Nonsense"
self.latticeInterface.testVerification = False
def test_includeGammaXS(self):
"""Test that we can correctly flip the switch to calculate gamma XS."""
# The default operator here turns off Gamma XS generation
self.assertFalse(self.latticeInterface.includeGammaXS)
self.assertEqual(self.o.cs[CONF_GLOBAL_FLUX_ACTIVE], "Neutron")
# but we can create an operator that turns on Gamma XS generation
cs = settings.Settings().modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: "Neutron and Gamma"})
newOperator = Operator(cs)
newLatticeInterface = LatticeInterfaceTesterLibFalse(newOperator.r, cs)
self.assertTrue(newLatticeInterface.includeGammaXS)
self.assertEqual(cs[CONF_GLOBAL_FLUX_ACTIVE], "Neutron and Gamma")
def test_latticePhysicsInterface(self):
"""Super basic test of the LatticePhysicsInterface."""
self.assertEqual(self.latticeInterface._updateBlockNeutronVelocities, True)
self.assertEqual(self.latticeInterface.executablePath, "/tmp/fake_path")
self.assertEqual(self.latticeInterface.executableRoot, "/tmp")
self.latticeInterface.updateXSLibrary(0)
self.assertEqual(len(self.latticeInterface._oldXsIdsAndBurnup), 0)
def test_interactBOL(self):
"""
Test interactBOL() with different update frequencies.
Notes
-----
Unlike other interactions, self.o.r.core.lib is not set to None at BOC, so this test uses
self.testVerification instead.
"""
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.never
self.latticeInterface.interactBOL()
self.assertFalse(self.latticeInterface.testVerification)
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode
self.latticeInterface.interactBOL()
self.assertFalse(self.latticeInterface.testVerification)
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOL
self.latticeInterface.interactBOL()
self.assertTrue(self.latticeInterface.testVerification)
def test_interactBOC(self):
"""
Test interactBOC() with different update frequencies.
Notes
-----
Unlike other interactions, self.o.r.core.lib is not set to None at BOC, so this test uses
self.testVerification instead.
"""
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOL
self.latticeInterface.interactBOC()
self.assertFalse(self.latticeInterface.testVerification)
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode
self.latticeInterface.interactBOC()
self.assertFalse(self.latticeInterface.testVerification)
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOC
self.latticeInterface.interactBOC()
self.assertTrue(self.latticeInterface.testVerification)
def test_interactEveryNode(self):
"""Test interactEveryNode() with different update frequencies."""
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOC
self.latticeInterface.interactEveryNode()
self.assertEqual(self.o.r.core.lib, "Nonsense")
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode
self.latticeInterface.interactEveryNode()
self.assertIsNone(self.o.r.core.lib)
def test_interactEveryNodeWhenCoupled(self):
"""
Test that the XS lib is not cleared when coupled iterations are turned on
and XS will be generated during the coupled iterations.
"""
self.o.couplingIsActive = lambda: True
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration
self.latticeInterface.interactEveryNode()
self.assertEqual(self.o.r.core.lib, "Nonsense")
self.o.couplingIsActive = lambda: False
self.latticeInterface.interactEveryNode()
self.assertIsNone(self.o.r.core.lib)
def test_interactEveryNodeWhenCoupledButNot(self):
"""
Test that the XS lib is cleared when coupled iterations are turned on
but the lattice physics frequency is not high enough.
"""
self.o.couplingIsActive = lambda: True
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration
self.latticeInterface.interactEveryNode()
self.assertEqual(self.o.r.core.lib, "Nonsense")
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode
self.latticeInterface.interactEveryNode()
self.assertIsNone(self.o.r.core.lib)
def test_interactEveryNodeFirstCoupled(self):
"""Test interactEveryNode() with LatticePhysicsFrequency.firstCoupledIteration."""
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration
self.latticeInterface.interactEveryNode()
self.assertIsNone(self.o.r.core.lib)
def test_interactEveryNodeAll(self):
"""Test interactEveryNode() with LatticePhysicsFrequency.all."""
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.all
self.latticeInterface.interactEveryNode()
self.assertIsNone(self.o.r.core.lib)
def test_interactFirstCoupledIteration(self):
"""Test interactCoupled() with different update frequencies on first iteration."""
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode
self.latticeInterface.interactCoupled(iteration=0)
self.assertEqual(self.o.r.core.lib, "Nonsense")
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration
self.latticeInterface.interactCoupled(iteration=0)
self.assertIsNone(self.o.r.core.lib)
def test_interactAll(self):
"""Test interactCoupled() with different update frequencies on non-first iteration."""
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration
self.latticeInterface.interactCoupled(iteration=1)
self.assertEqual(self.o.r.core.lib, "Nonsense")
self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.all
self.latticeInterface.interactCoupled(iteration=1)
self.assertIsNone(self.o.r.core.lib)
def test_getSuffix(self):
self.assertEqual(self.latticeInterface._getSuffix(7), "")
class TestLatticePhysicsLibraryCreation(TestLatticePhysicsInterfaceBase):
"""Test variations of _newLibraryShouldBeCreated."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.latticeInterface = LatticeInterfaceTester(cls.o.r, cls.o.cs)
cls.o.addInterface(cls.latticeInterface)
cls.xsGroupInterface.representativeBlocks = OrderedDict({"AA": cls.assembly[0]})
cls.b, cls.xsIDs = cls.latticeInterface._getBlocksAndXsIds()
def setUp(self):
"""Reset representativeBlocks and CONF_GEN_XS."""
self.xsGroupInterface.representativeBlocks = OrderedDict({"AA": self.assembly[0]})
self.assembly[0].p.xsType = "A"
self.o.cs[CONF_GEN_XS] = ""
self.o.r.core.lib = isotxs.readBinary(ISOAA_PATH)
def test_libCreation_NoGenXS(self):
"""No ISOTXS and xs gen not requested."""
self.o.r.core.lib = None
with mockRunLogs.BufferLog() as mock:
xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs)
self.assertIn("Cross sections will not be generated on cycle 1.", mock.getStdout())
self.assertFalse(xsGen)
def test_libCreation_GenXS(self):
"""No ISOTXS and xs gen requested."""
self.o.cs[CONF_GEN_XS] = "Neutron"
self.o.r.core.lib = None
with mockRunLogs.BufferLog() as mock:
xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs)
self.assertIn(
"Cross sections will be generated on cycle 1 for the following XS IDs: ['AA']",
mock.getStdout(),
)
self.assertTrue(xsGen)
def test_libCreation_NoGenXS_2(self):
"""ISOTXS present and has all of the necessary information."""
with mockRunLogs.BufferLog() as mock:
xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs)
self.assertIn(
"The generation of XS will be skipped.",
mock.getStdout(),
)
self.assertFalse(xsGen)
def test_libCreation_GenXS_2(self):
"""ISOTXS present and does not have all of the necessary information."""
self.xsGroupInterface.representativeBlocks = OrderedDict({"BB": self.assembly[0]})
b, xsIDs = self._modifyXSType()
with mockRunLogs.BufferLog() as mock:
xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, b, xsIDs)
self.assertIn(
"is not enabled, but will be run to generate these missing cross sections.",
mock.getStdout(),
)
self.assertTrue(xsGen)
def test_libCreation_GenXS_3(self):
"""ISOTXS present and does not have all of the necessary information."""
self.o.cs[CONF_GEN_XS] = "Neutron"
b, xsIDs = self._modifyXSType()
with mockRunLogs.BufferLog() as mock:
xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, b, xsIDs)
self.assertIn("These will be generated on cycle ", mock.getStdout())
self.assertTrue(xsGen)
def _modifyXSType(self):
self.xsGroupInterface.representativeBlocks = OrderedDict({"BB": self.assembly[0]})
self.assembly[0].p.xsType = "B"
return self.latticeInterface._getBlocksAndXsIds()
================================================
FILE: armi/physics/neutronics/latticePhysics/tests/test_latticeWriter.py
================================================
# Copyright 2021 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the Lattice Physics Writer."""
import unittest
from collections import defaultdict
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FP_MODEL,
)
from armi.physics.neutronics.latticePhysics.latticePhysicsInterface import (
setBlockNeutronVelocities,
)
from armi.physics.neutronics.latticePhysics.latticePhysicsWriter import (
LatticePhysicsWriter,
)
from armi.physics.neutronics.settings import (
CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,
CONF_XS_BLOCK_REPRESENTATION,
)
from armi.testing import loadTestReactor
from armi.tests import TEST_ROOT
class FakeLatticePhysicsWriter(LatticePhysicsWriter):
"""LatticePhysicsWriter is abstract, so it must be subclassed to be tested."""
def __init__(self, block, r, eci):
self.testOut = ""
super(FakeLatticePhysicsWriter, self).__init__(block, r, eci, "", False)
def write(self):
pass
def _writeNuclide(self, fileObj, nuclide, density, nucTemperatureInC, category, xsIdSpecified=None):
pass
def _writeComment(self, fileObj, msg):
self.testOut += "\n" + str(msg)
def _writeGroupStructure(self, fileObj):
pass
class TestLatticePhysicsWriter(unittest.TestCase):
"""Test Lattice Physics Writer."""
def setUp(self):
self.o, self.r = loadTestReactor(TEST_ROOT)
self.cs = self.o.cs
self.cs[CONF_CROSS_SECTION].setDefaults(
self.cs[CONF_XS_BLOCK_REPRESENTATION],
self.cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self.block = self.r.core.getFirstBlock()
self.w = FakeLatticePhysicsWriter(self.block, self.r, self.o)
def test_setBlockNeutronVelocities(self):
d = defaultdict(float)
d["AA"] = 10.0
setBlockNeutronVelocities(self.r, d)
tot = sum([b.p.mgNeutronVelocity for b in self.r.core.iterBlocks()])
self.assertGreater(tot, 3000.0)
def test_latticePhysicsWriter(self):
"""Super basic test of the LatticePhysicsWriter."""
self.assertEqual(self.w.xsId, "AA")
self.assertFalse(self.w.modelFissionProducts)
self.assertEqual(self.w.driverXsID, "")
self.assertAlmostEqual(self.w.minimumNuclideDensity, 1e-15, delta=1e-16)
self.assertEqual(self.w.testOut, "")
self.assertEqual(str(self.w), "")
self.w._writeTitle(None)
self.assertIn("ARMI generated case for caseTitle armiRun", self.w.testOut)
nucs = self.w._getAllNuclidesByTemperatureInC(None)
self.assertEqual(len(nucs.keys()), 1)
self.assertAlmostEqual(list(nucs.keys())[0], 450.0, delta=0.1)
def test_writeTitle(self):
self.w._writeTitle("test_writeTitle")
self.assertIn("ARMI generated case for caseTitle", self.w.testOut)
def test_isSourceDriven(self):
self.assertFalse(self.w._isSourceDriven)
self.w.driverXsID = True
self.assertTrue(self.w._isSourceDriven)
def test_isGammaXSGenerationEnabled(self):
self.assertFalse(self.w._isGammaXSGenerationEnabled)
def test_getAllNuclidesByTemperatureInCNone(self):
nucsByTemp = self.w._getAllNuclidesByTemperatureInC(None)
keys0 = list(nucsByTemp.keys())
self.assertEqual(len(keys0), 1)
self.assertEqual(keys0[0], 450.0)
keys1 = nucsByTemp[keys0[0]]
self.assertGreater(len(keys1), 1)
names = [k.name for k in keys1]
self.assertIn("AM241", names)
self.assertIn("U238", names)
def test_getAllNuclidesByTemperatureInC(self):
self.w.explicitFissionProducts = False
c = self.r.core[0][0]
nucsByTemp = self.w._getAllNuclidesByTemperatureInC(c)
keys0 = list(nucsByTemp.keys())
self.assertEqual(len(keys0), 1)
self.assertEqual(keys0[0], 450.0)
keys1 = nucsByTemp[keys0[0]]
self.assertGreater(len(keys1), 1)
names = [k.name for k in keys1]
self.assertIn("AM241", names)
self.assertIn("U238", names)
def test_getAllNuclidesByTempInCExplicitFisProd(self):
self.w.explicitFissionProducts = True
c = self.r.core[0][0]
nucsByTemp = self.w._getAllNuclidesByTemperatureInC(c)
keys0 = list(nucsByTemp.keys())
self.assertEqual(len(keys0), 1)
self.assertEqual(keys0[0], 450.0)
keys1 = nucsByTemp[keys0[0]]
self.assertGreater(len(keys1), 1)
names = [k.name for k in keys1]
self.assertIn("AM241", names)
self.assertIn("U238", names)
def test_getAvgNuclideTemperatureInC(self):
temp = self.w._getAvgNuclideTemperatureInC("U238")
self.assertAlmostEqual(temp, 450, delta=0.001)
temp = self.w._getAvgNuclideTemperatureInC("U235")
self.assertAlmostEqual(temp, 450, delta=0.001)
def test_getFuelTemperature(self):
temp = self.w._getFuelTemperature()
self.assertAlmostEqual(temp, 450, delta=0.001)
def test_getDetailedFissionProducts(self):
dfpDen = defaultdict(int)
dfpDen["U238"] = 1.2
dfpDen["U235"] = 2.3
dfpDen["AM241"] = 3.4
prods = self.w._getDetailedFissionProducts(dfpDen)
self.assertEqual(len(prods), 3)
self.assertIn("U238", prods)
self.assertIn("U235", prods)
self.assertIn("AM241", prods)
def test_getDetailedFissionProductsPass(self):
self.cs[CONF_FP_MODEL] = "noFissionProducts"
prods = self.w._getDetailedFissionProducts({})
self.assertEqual(len(prods), 0)
def test_getDetailedFPDensities(self):
self.w.modelFissionProducts = False
dens = self.w._getDetailedFPDensities()
self.assertEqual(len(dens), 0)
self.w.modelFissionProducts = True
with self.assertRaises(AttributeError):
dens = self.w._getDetailedFPDensities()
def test_isCriticalBucklingSearchActive(self):
isActive = self.w._isCriticalBucklingSearchActive
self.assertTrue(isActive)
def test_getDriverBlock(self):
self.w.driverXsID = ""
b = self.w._getDriverBlock()
self.assertIsNone(b)
self.w.driverXsID = "AA"
with self.assertRaises(ValueError):
b = self.w._getDriverBlock()
================================================
FILE: armi/physics/neutronics/macroXSGenerationInterface.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Converts microscopic cross sections to macroscopic cross sections by multiplying by number density.
.. math::
\Sigma_i = N_i \sigma_i
"""
from armi import context, interfaces, mpiActions, runLog
from armi.nuclearDataIO import xsCollections
from armi.physics.neutronics.settings import CONF_MINIMUM_NUCLIDE_DENSITY
from armi.utils import getBurnSteps, iterables
class MacroXSGenerator(mpiActions.MpiAction):
"""An action that can make macroscopic cross sections, even in parallel."""
def __init__(
self,
blocks,
lib,
buildScatterMatrix,
libType,
minimumNuclideDensity=0.0,
):
mpiActions.MpiAction.__init__(self)
self.buildScatterMatrix = buildScatterMatrix
self.libType = libType
self.lib = lib
self.blocks = blocks
self.minimumNuclideDensity = minimumNuclideDensity
def __reduce__(self):
# Prevent blocks and lib from being broadcast by passing None to ctor. Although lib must be broadcast, we need
# to do it explicitly to correctly deal with the default lib=None argument in buildMacros(), which utilizes this
# action. Default arguments make things more complicated.
return (
MacroXSGenerator,
(
None,
None,
self.buildScatterMatrix,
self.libType,
self.minimumNuclideDensity,
),
)
def invokeHook(self):
# logic here gets messy due to all the default arguments in the calling method. There exists a large number of
# permutations to be handled.
if context.MPI_RANK == 0:
allBlocks = self.blocks
if allBlocks is None:
allBlocks = self.r.core.getBlocks()
lib = self.lib or self.r.core.lib
else:
allBlocks = []
lib = None
mc = xsCollections.MacroscopicCrossSectionCreator(self.buildScatterMatrix, self.minimumNuclideDensity)
if context.MPI_SIZE > 1:
myBlocks = self.scatterList(allBlocks)
lib = context.MPI_COMM.bcast(lib, root=0)
myMacros = [mc.createMacrosFromMicros(lib, b, libType=self.libType) for b in myBlocks]
allMacros = self.gatherList(myMacros)
else:
allMacros = [mc.createMacrosFromMicros(lib, b, libType=self.libType) for b in allBlocks]
if context.MPI_RANK == 0:
for b, macro in zip(allBlocks, allMacros):
b.macros = macro
@staticmethod
def scatterList(lst):
"""Helper functions for mpi communication."""
if context.MPI_RANK == 0:
chunked = iterables.split(lst, context.MPI_SIZE)
else:
chunked = None
return context.MPI_COMM.scatter(chunked, root=0)
@staticmethod
def gatherList(localList):
"""Helper functions for mpi communication."""
globalList = context.MPI_COMM.gather(localList, root=0)
if context.MPI_RANK == 0:
globalList = iterables.flatten(globalList)
return globalList
class MacroXSGenerationInterface(interfaces.Interface):
"""
Builds macroscopic cross sections on all Blocks.
Notes
-----
This probably should not be an interface since it has no interactXYZ methods. It should probably be converted to an
MpiAction.
"""
name = "macroXsGen"
def __init__(self, r, cs):
interfaces.Interface.__init__(self, r, cs)
self.macrosLastBuiltAt = None
self.minimumNuclideDensity = cs[CONF_MINIMUM_NUCLIDE_DENSITY]
def buildMacros(
self,
lib=None,
bListSome=None,
buildScatterMatrix=True,
libType="micros",
):
"""
Builds block-level macroscopic cross sections for making diffusion equation matrices.
This will use MPI if armi.context.MPI_SIZE > 1
Builds G-vectors of the basic XS ('nGamma','fission','nalph','np','n2n','nd','nt') Builds GxG matrices for
scatter matrices
.. impl:: Build macroscopic cross sections for blocks.
:id: I_ARMI_MACRO_XS
:implements: R_ARMI_MACRO_XS
This method builds macroscopic cross sections for a user-specified set of blocks using a specified
microscopic neutron or gamma cross section library. If no blocks are specified, cross sections are
calculated for all blocks in the core. If no library is specified, the existing r.core.lib is used. The
basic arithmetic involved in generating macroscopic cross sections consists of multiplying isotopic number
densities by isotopic microscopic cross sections and summing over all isotopes in a composition. The
calculation is implemented in:py:func:`computeMacroscopicGroupConstants
`. This method uses an
:py:class:`mpiAction ` to distribute the work of calculating macroscopic cross
sections across the worker processes.
Parameters
----------
lib : library object , optional
If lib is specified, then buildMacros will build macro XS using micro XS data from lib. If lib = None, then
buildMacros will use the existing library self.r.core.lib. If that does not exist, then buildMacros will use
a new nuclearDataIO.ISOTXS object.
buildScatterMatrix : Boolean, optional
If True, all macro XS will be built, including the time-consuming scatter matrix. If False, only the macro
XS that are needed for fluxRecon.computePinMGFluxAndPower will be built. These include 'transport',
'fission', and a few others. No ng x ng matrices (such as 'scatter' or 'chi') will be built. Essentially,
this option saves huge runtime for the fluxRecon module.
libType : str, optional
The block attribute containing the desired microscopic XS for this block: either "micros" for neutron XS or
"gammaXS" for gamma XS.
"""
cycle = self.r.p.cycle
burnSteps = getBurnSteps(self.cs)
self.macrosLastBuiltAt = sum([burnSteps[i] + 1 for i in range(cycle)]) + self.r.p.timeNode
runLog.important("Building macro XS")
xsGen = MacroXSGenerator(
bListSome,
lib,
buildScatterMatrix,
libType,
self.minimumNuclideDensity,
)
xsGen.broadcast()
xsGen.invoke(self.o, self.r, self.cs)
================================================
FILE: armi/physics/neutronics/parameters.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parameter definitions for the Neutronics Plugin.
We hope neutronics plugins that compute flux will use ``mgFlux``, etc., which will enable modular
construction of apps.
"""
from armi.reactor import parameters
from armi.reactor.blocks import Block
from armi.reactor.parameters import ParamLocation
from armi.reactor.parameters.parameterDefinitions import isNumpyArray
from armi.reactor.reactors import Core
from armi.utils import units
def getNeutronicsParameterDefinitions():
"""Return ParameterDefinitionCollections for each appropriate ArmiObject."""
return {Block: _getNeutronicsBlockParams(), Core: _getNeutronicsCoreParams()}
def _getNeutronicsBlockParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder() as pb:
pb.defParam(
"axMesh",
units=units.UNITLESS,
description="number of neutronics axial mesh points in this block",
default=None,
categories=[parameters.Category.retainOnReplacement],
)
pb.defParam(
"mgFlux",
setter=isNumpyArray("mgFlux"),
units=f"n*{units.CM}/{units.SECONDS}",
description="multigroup volume-integrated flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"adjMgFlux",
units=f"n*{units.CM}/{units.SECONDS}",
description="multigroup adjoint neutron flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"lastMgFlux",
units=f"n*{units.CM}/{units.SECONDS}",
description="multigroup volume-integrated flux used for averaging the latest and previous depletion step",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=False,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
],
default=None,
)
pb.defParam(
"mgFluxGamma",
units=f"#*{units.CM}/{units.SECONDS}",
description="multigroup gamma flux",
location=ParamLocation.VOLUME_INTEGRATED,
saveToDB=True,
categories=[
parameters.Category.fluxQuantities,
parameters.Category.multiGroupQuantities,
parameters.Category.gamma,
],
default=None,
)
pb.defParam(
"mgNeutronVelocity",
units=f"{units.CM}/{units.SECONDS}",
description="multigroup neutron velocity",
location=ParamLocation.AVERAGE,
saveToDB=True,
categories=[parameters.Category.multiGroupQuantities],
default=None,
)
pb.defParam(
"extSrc",
units=f"#/{units.CM}^3/{units.SECONDS}",
description="multigroup external source",
location=ParamLocation.AVERAGE,
saveToDB=False,
categories=[parameters.Category.multiGroupQuantities],
default=None,
)
pb.defParam(
"mgGammaSrc",
units=f"#/{units.CM}^3/{units.SECONDS}",
description="multigroup gamma source",
location=ParamLocation.AVERAGE,
saveToDB=True,
categories=[
parameters.Category.multiGroupQuantities,
parameters.Category.gamma,
],
default=None,
)
pb.defParam(
"gammaSrc",
units=f"#/{units.CM}^3/{units.SECONDS}",
description="gamma source",
location=ParamLocation.AVERAGE,
saveToDB=True,
categories=[parameters.Category.gamma],
default=0.0,
)
# Not anointing the pin fluxes as a MG quantity, since it has an extra dimension, which
# could lead to issues, depending on how the multiGroupQuantities category gets used
pb.defParam(
"pinMgFluxes",
units=f"n/{units.CM}^2/{units.SECONDS}",
description="""
The block-level pin multigroup fluxes. pinMgFluxes[i, g] represents the flux in group g
for pin i. Flux units are the standard n/cm^2/s. The "ARMI pin ordering" is used, which
is counter-clockwise from 3 o'clock.
""",
categories=[parameters.Category.pinQuantities],
saveToDB=True,
default=None,
)
pb.defParam(
"pinMgFluxesAdj",
units=units.UNITLESS,
description="should be a blank 3-D array, but re-defined later (nPins x ng x nAxialSegments)",
categories=[parameters.Category.pinQuantities],
saveToDB=False,
default=None,
)
pb.defParam(
"pinMgFluxesGamma",
units=f"#/{units.CM}^2/{units.SECONDS}",
description="should be a blank 3-D array, but re-defined later (nPins x ng x nAxialSegments)",
categories=[parameters.Category.pinQuantities, parameters.Category.gamma],
saveToDB=False,
default=None,
)
pb.defParam(
"chi",
units=units.UNITLESS,
description="Energy distribution of fission neutrons",
location=ParamLocation.AVERAGE,
saveToDB=True,
default=None,
)
pb.defParam(
"linPow",
units=f"{units.WATTS}/{units.METERS}",
description=(
"Pin-averaged linear heat rate, which is calculated by evaluating the block power "
"and dividing by the number of pins. If gamma transport is enabled, then this "
"represents the combined neutron and gamma heating. If gamma transport is disabled "
"then this represents the energy generation in the pin, where gammas are assumed to "
"deposit their energy locally. Note that this value does not implicitly account "
"for axial and radial peaking factors within the block. Use `linPowByPin` for "
"obtaining the pin linear heat rate with peaking factors included."
),
location=ParamLocation.AVERAGE,
default=0.0,
categories=[
parameters.Category.detailedAxialExpansion,
parameters.Category.neutronics,
],
)
pb.defParam(
"linPowByPin",
setter=isNumpyArray("linPowByPin"),
units=f"{units.WATTS}/{units.CM}",
description=(
"Pin linear linear heat rate, which is calculated through flux reconstruction and "
"accounts for axial and radial peaking factors. This differs from the `linPow` "
"parameter, which assumes no axial and radial peaking in the block as this "
"information is unavailable without detailed flux reconstruction. The same "
"application of neutron and gamma heating results applies."
),
location=ParamLocation.CHILDREN,
categories=[parameters.Category.pinQuantities],
default=None,
)
# gamma category because linPowByPin is only split by neutron/gamma when gamma is activated
pb.defParam(
"linPowByPinNeutron",
setter=isNumpyArray("linPowByPinNeutron"),
units=f"{units.WATTS}/{units.CM}",
description="Pin linear neutron heat rate. This is the neutron heating component of `linPowByPin`",
location=ParamLocation.CHILDREN,
categories=[parameters.Category.pinQuantities, parameters.Category.gamma],
default=None,
)
pb.defParam(
"linPowByPinGamma",
setter=isNumpyArray("linPowByPinGamma"),
units=f"{units.WATTS}/{units.CM}",
description="Pin linear gamma heat rate. This is the gamma heating component of `linPowByPin`",
location=ParamLocation.CHILDREN,
categories=[parameters.Category.pinQuantities, parameters.Category.gamma],
default=None,
)
pb.defParam(
"reactionRates",
units=f"#/{units.SECONDS}",
description='List of reaction rates in specified by setting "reactionsToDB"',
location=ParamLocation.VOLUME_INTEGRATED,
categories=[parameters.Category.fluxQuantities],
default=None,
)
with pDefs.createBuilder(
saveToDB=True,
default=None,
location=ParamLocation.EDGES,
categories=[parameters.Category.detailedAxialExpansion, "depletion"],
) as pb:
pb.defParam(
"pointsEdgeFastFluxFr",
units=units.UNITLESS,
description="Fraction of flux above 100keV at edges of the block",
)
pb.defParam(
"pointsEdgeDpa",
setter=isNumpyArray("pointsEdgeDpa"),
units=units.DPA,
description="displacements per atom at edges of the block",
location=ParamLocation.EDGES | ParamLocation.BOTTOM,
categories=["cumulative", "detailedAxialExpansion", "depletion"],
)
pb.defParam(
"pointsEdgeDpaRate",
setter=isNumpyArray("pointsEdgeDpaRate"),
units=f"{units.DPA}/{units.SECONDS}",
description="Current time derivative of the displacement per atoms at edges of the block",
location=ParamLocation.EDGES | ParamLocation.BOTTOM,
)
with pDefs.createBuilder(
saveToDB=True,
default=None,
location=ParamLocation.CORNERS,
categories=[
parameters.Category.detailedAxialExpansion,
parameters.Category.depletion,
],
) as pb:
pb.defParam(
"cornerFastFlux",
units=f"n/{units.CM}^2/{units.SECONDS}",
description="Neutron flux above 100keV at hexagon block corners",
)
pb.defParam(
"pointsCornerFastFluxFr",
units=units.UNITLESS,
description="Fraction of flux above 100keV at corners of the block",
)
pb.defParam(
"pointsCornerDpa",
setter=isNumpyArray("pointsCornerDpa"),
units=units.DPA,
description="displacements per atom at corners of the block",
location=ParamLocation.CORNERS | ParamLocation.BOTTOM,
categories=["cumulative", "detailedAxialExpansion", "depletion"],
)
pb.defParam(
"pointsCornerDpaRate",
setter=isNumpyArray("pointsCornerDpaRate"),
units=f"{units.DPA}/{units.SECONDS}",
description="Current time derivative of the displacement per atoms at corners of the block",
location=ParamLocation.CORNERS | ParamLocation.BOTTOM,
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
categories=[parameters.Category.detailedAxialExpansion],
) as pb:
# Neutronics reaction rate params that are not re-derived in mesh conversion
pb.defParam(
"rateBalance",
units=f"1/{units.CM}^3/{units.SECONDS}",
description="Numerical balance between particle production and destruction (should be small)",
)
pb.defParam(
"rateProdNet",
units=f"1/{units.CM}^3/{units.SECONDS}",
description="The total neutron production including (n,2n) source and fission source.",
)
pb.defParam(
"capturePowerFrac",
units=units.UNITLESS,
description="Fraction of the power produced through capture in a block.",
saveToDB="True",
)
pb.defParam(
"fluence",
units=f"#/{units.CM}^2",
description="Fluence",
categories=["cumulative"],
)
pb.defParam(
"flux",
units=f"n/{units.CM}^2/{units.SECONDS}",
description="neutron flux",
categories=[
parameters.Category.retainOnReplacement,
parameters.Category.fluxQuantities,
],
)
pb.defParam("fluxAdj", units=units.UNITLESS, description="Adjoint flux")
pb.defParam(
"pdens",
units=f"{units.WATTS}/{units.CM}^3",
description="Average volumetric power density",
categories=[parameters.Category.neutronics],
)
pb.defParam(
"pdensDecay",
units=f"{units.WATTS}/{units.CM}^3",
description="Decay power density from decaying radionuclides",
)
pb.defParam(
"arealPd",
units=f"{units.MW}/{units.METERS}^2",
description="Power divided by XY area",
)
pb.defParam(
"fisDens",
units=f"fissions/{units.CM}^3/{units.SECONDS}",
description="Fission density in a pin (scaled up from homogeneous)",
)
pb.defParam(
"fisDensHom",
units=f"1/{units.CM}^3/{units.SECONDS}",
description="Homogenized fissile density",
)
pb.defParam(
"fluxGamma",
units=f"#/{units.CM}^2/{units.SECONDS}",
description="Gamma scalar flux",
categories=[
parameters.Category.retainOnReplacement,
parameters.Category.fluxQuantities,
],
)
pb.defParam(
"fluxPeak",
units=f"n/{units.CM}^2/{units.SECONDS}",
description="Peak neutron flux calculated within the mesh",
location=ParamLocation.MAX,
)
pb.defParam(
"kInf",
units=units.UNITLESS,
description=(
"Neutron production rate in this block/neutron absorption rate in this "
"block. Not truly kinf but a reasonable approximation of reactivity."
),
)
pb.defParam("medAbsE", units=units.EV, description="Median neutron absorption energy")
pb.defParam(
"medFisE",
units=units.EV,
description="Median energy of neutron causing fission",
)
pb.defParam("medFlxE", units=units.EV, description="Median neutron flux energy")
pb.defParam(
"pdensGamma",
units=f"{units.WATTS}/{units.CM}^3",
description="Average volumetric gamma power density",
categories=[parameters.Category.gamma],
)
# gamma category because pdens is only split by neutron/gamma when gamma is activated
pb.defParam(
"pdensNeutron",
units=f"{units.WATTS}/{units.CM}^3",
description="Average volumetric neutron power density",
categories=[parameters.Category.gamma],
)
pb.defParam(
"ppdens",
units=f"{units.WATTS}/{units.CM}^3",
description="Peak power density",
location=ParamLocation.MAX,
)
pb.defParam(
"ppdensGamma",
units=f"{units.WATTS}/{units.CM}^3",
description="Peak gamma density",
categories=[parameters.Category.gamma],
location=ParamLocation.MAX,
)
# rx rate params that are derived during mesh conversion.
# We'd like all things that can be derived from flux and XS to be
# in this category to minimize numerical diffusion but it is a WIP.
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
) as pb:
pb.defParam(
"rateAbs",
units=f"1/{units.CM}^3/{units.SECONDS}",
description="Total absorption rate in this block (fisson + capture).",
)
pb.defParam(
"rateCap",
units=f"1/{units.CM}^3/{units.SECONDS}",
description="Parasitic capture rate in this block.",
)
pb.defParam(
"rateProdN2n",
units=f"1/{units.CM}^3/{units.SECONDS}",
description="Production rate of neutrons from n2n reactions.",
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
categories=[parameters.Category.detailedAxialExpansion],
) as pb:
pb.defParam(
"rateFis",
units=f"1/{units.CM}^3/{units.SECONDS}",
description="Fission rate in this block.",
)
pb.defParam(
"rateProdFis",
units=f"1/{units.CM}^3/{units.SECONDS}",
description="Production rate of neutrons from fission reactions (nu * fission source / k-eff)",
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.VOLUME_INTEGRATED,
categories=[parameters.Category.detailedAxialExpansion],
) as pb:
pb.defParam(
"powerGenerated",
units=units.WATTS,
description="Generated power. Different than b.p.power only when gamma transport is activated.",
categories=[parameters.Category.gamma],
)
pb.defParam(
"power",
units=units.WATTS,
description="Total power",
categories=[parameters.Category.neutronics],
)
pb.defParam(
"powerGamma",
units=units.WATTS,
description="Total gamma power",
categories=[parameters.Category.gamma],
)
# gamma category because power is only split by neutron/gamma when gamma is activated
pb.defParam(
"powerNeutron",
units=units.WATTS,
description="Total neutron power",
categories=[parameters.Category.gamma],
)
with pDefs.createBuilder(default=0.0) as pb:
pb.defParam(
"detailedDpaThisCycle",
units=units.DPA,
location=ParamLocation.AVERAGE,
description=(
"Displacement per atom accumulated during this cycle. This accumulates "
"over a cycle and resets to zero at BOC."
),
categories=[
parameters.Category.cumulativeOverCycle,
parameters.Category.detailedAxialExpansion,
],
)
pb.defParam(
"detailedDpaPeakRate",
units=f"{units.DPA}/{units.SECONDS}",
description="Peak DPA rate based on detailedDpaPeak",
location=ParamLocation.MAX,
categories=[parameters.Category.cumulative, parameters.Category.neutronics],
)
pb.defParam(
"enrichmentBOL",
units=units.UNITLESS,
description="Enrichment during fabrication (mass fraction)",
)
pb.defParam(
"fastFlux",
units=f"1/{units.CM}^2/{units.SECONDS}",
description="Neutron flux above 100keV",
location=ParamLocation.AVERAGE,
categories=["detailedAxialExpansion"],
)
pb.defParam(
"fastFluxFr",
units=units.UNITLESS,
description="Fraction of flux above 100keV",
location=ParamLocation.AVERAGE,
categories=["detailedAxialExpansion"],
)
pb.defParam(
"pdensGenerated",
units=f"{units.WATTS}/{units.CM}^3",
description=(
"Volume-averaged generated power density. Different than b.p.pdens only "
"when gamma transport is activated."
),
location=ParamLocation.AVERAGE,
categories=[parameters.Category.gamma],
)
return pDefs
def _getNeutronicsCoreParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(categories=[parameters.Category.neutronics]) as pb:
pb.defParam(
"eigenvalues",
units=units.UNITLESS,
description="All available lambda-eigenvalues of reactor.",
default=None, # will be a list though, can't set default to mutable type.
location=ParamLocation.AVERAGE,
)
pb.defParam(
"kInf",
units=units.UNITLESS,
description="k-infinity",
default=0.0,
location=ParamLocation.AVERAGE,
)
return pDefs
================================================
FILE: armi/physics/neutronics/plugin.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A boilerplate entry for a neutronics physics plugin.
The ARMI Framework comes with a neutronics plugin that introduces two independent interfaces:
:py:mod:`~armi.physics.neutronics.fissionProductModel`
Handles fission product modeling
:py:mod:`~armi.physics.neutronics.crossSectionGroupManager`
Handles the management of different cross section "groups"
"""
import numpy as np
from armi import plugins, runLog
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.utils import tabulate
class NeutronicsPlugin(plugins.ArmiPlugin):
"""The built-in neutronics plugin with a few capabilities and a lot of state parameter definitions."""
@staticmethod
@plugins.HOOKIMPL
def exposeInterfaces(cs):
"""Collect and expose all of the interfaces that live under the built-in neutronics package."""
from armi.physics.neutronics import crossSectionGroupManager
from armi.physics.neutronics.fissionProductModel import fissionProductModel
interfaceInfo = []
for mod in (crossSectionGroupManager, fissionProductModel):
interfaceInfo += plugins.collectInterfaceDescriptions(mod, cs)
return interfaceInfo
@staticmethod
@plugins.HOOKIMPL
def defineParameters():
"""Define parameters for the plugin."""
from armi.physics.neutronics import parameters as neutronicsParameters
return neutronicsParameters.getNeutronicsParameterDefinitions()
@staticmethod
@plugins.HOOKIMPL
def defineParameterRenames():
return {"buGroup": "envGroup", "buGroupNum": "envGroupNum"}
@staticmethod
@plugins.HOOKIMPL
def defineEntryPoints():
"""Define entry points for the plugin."""
from armi.physics.neutronics import diffIsotxs
entryPoints = [diffIsotxs.CompareIsotxsLibraries]
return entryPoints
@staticmethod
@plugins.HOOKIMPL
def defineSettings():
"""Define settings for the plugin."""
from armi.physics.neutronics import crossSectionSettings
from armi.physics.neutronics import settings as neutronicsSettings
from armi.physics.neutronics.fissionProductModel import (
fissionProductModelSettings,
)
settings = [
crossSectionSettings.XSSettingDef(
CONF_CROSS_SECTION,
)
]
settings += neutronicsSettings.defineSettings()
settings += fissionProductModelSettings.defineSettings()
return settings
@staticmethod
@plugins.HOOKIMPL
def defineSettingsValidators(inspector):
"""Implementation of settings inspections for neutronics settings."""
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
getFissionProductModelSettingValidators,
)
from armi.physics.neutronics.settings import getNeutronicsSettingValidators
settingsValidators = getNeutronicsSettingValidators(inspector)
settingsValidators.extend(getFissionProductModelSettingValidators(inspector))
return settingsValidators
@staticmethod
@plugins.HOOKIMPL
def onProcessCoreLoading(core, cs, dbLoad):
"""Called whenever a Core object is newly built."""
applyEffectiveDelayedNeutronFractionToCore(core, cs)
@staticmethod
@plugins.HOOKIMPL
def getReportContents(r, cs, report, stage, blueprint):
"""Generates the Report Content for the Neutronics Report."""
from armi.physics.neutronics import reports
return reports.insertNeutronicsReport(r, cs, report, stage)
def applyEffectiveDelayedNeutronFractionToCore(core, cs):
"""Process the settings for the delayed neutron fraction and precursor decay constants."""
# Verify and set the core beta parameters based on the user-supplied settings
beta = cs["beta"]
decayConstants = cs["decayConstants"]
# If beta is interpreted as a float, then assign it to the total delayed neutron fraction
# parameter. Otherwise, setup the group-wise delayed neutron fractions and precursor decay
# constants.
reportTableData = []
if isinstance(beta, float):
core.p.beta = beta
reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta))
elif isinstance(beta, list) and isinstance(decayConstants, list):
if len(beta) != len(decayConstants):
raise ValueError(
f"The values for `beta` ({beta}) and `decayConstants` ({decayConstants}) are not consistent lengths."
)
core.p.beta = sum(beta)
core.p.betaComponents = np.array(beta)
core.p.betaDecayConstants = np.array(decayConstants)
reportTableData.append(("Total Delayed Neutron Fraction", core.p.beta))
for i, betaComponent in enumerate(core.p.betaComponents):
reportTableData.append((f"Group {i} Delayed Neutron Fractions", betaComponent))
for i, decayConstant in enumerate(core.p.betaDecayConstants):
reportTableData.append(("Group {i} Precursor Decay Constants", decayConstant))
# Report to the user the values were not applied.
if not reportTableData and (beta is not None or decayConstants is not None):
runLog.warning(
f"Delayed neutron fraction(s) - {beta} and decay constants - {decayConstants} have not been applied."
)
else:
runLog.extra(
tabulate.tabulate(
data=reportTableData,
headers=["Component", "Value"],
tableFmt="armi",
)
)
================================================
FILE: armi/physics/neutronics/settings.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some generic neutronics-related settings."""
import os
from armi import runLog
from armi.physics.neutronics import LatticePhysicsFrequency
from armi.physics.neutronics.const import NEUTRON
from armi.physics.neutronics.energyGroups import GROUP_STRUCTURE
from armi.settings import setting, settingsValidation
from armi.settings.fwSettings.globalSettings import (
CONF_DETAILED_AXIAL_EXPANSION,
CONF_NON_UNIFORM_ASSEM_FLAGS,
CONF_RUN_TYPE,
)
from armi.utils import directoryChangers
CONF_BOUNDARIES = "boundaries"
CONF_DPA_PER_FLUENCE = "dpaPerFluence"
CONF_EIGEN_PROB = "eigenProb"
CONF_EPS_EIG = "epsEig"
CONF_EPS_FSAVG = "epsFSAvg"
CONF_EPS_FSPOINT = "epsFSPoint"
CONF_GEN_XS = "genXS" # gamma stuff and neutronics plugin/lattice physics
CONF_GLOBAL_FLUX_ACTIVE = "globalFluxActive"
CONF_GROUP_STRUCTURE = "groupStructure"
CONF_INNERS_ = "inners"
CONF_LOADING_FILE = "loadingFile"
CONF_MCNP_LIB_BASE = "mcnpLibraryVersion"
CONF_NEUTRONICS_KERNEL = "neutronicsKernel"
CONF_NEUTRONICS_TYPE = "neutronicsType"
CONF_OUTERS_ = "outers"
CONF_RESTART_NEUTRONICS = "restartNeutronics"
# used by global flux interface
CONF_ACLP_DOSE_LIMIT = "aclpDoseLimit"
CONF_DPA_XS_SET = "dpaXsSet"
CONF_GRID_PLATE_DPA_XS_SET = "gridPlateDpaXsSet"
CONF_LOAD_PAD_ELEVATION = "loadPadElevation"
CONF_LOAD_PAD_LENGTH = "loadPadLength"
CONF_OPT_DPA = [
"",
"dpa_EBRII_INC600",
"dpa_EBRII_INCX750",
"dpa_EBRII_HT9",
"dpa_EBRII_PE16",
"dpa_EBRII_INC625",
]
# moved from xsSettings
CONF_CLEAR_XS = "clearXS"
CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION = "disableBlockTypeExclusionInXsGeneration"
CONF_LATTICE_PHYSICS_FREQUENCY = "latticePhysicsFrequency"
CONF_MINIMUM_FISSILE_FRACTION = "minimumFissileFraction"
CONF_MINIMUM_NUCLIDE_DENSITY = "minimumNuclideDensity"
CONF_TOLERATE_BURNUP_CHANGE = "tolerateBurnupChange"
CONF_XS_BLOCK_REPRESENTATION = "xsBlockRepresentation"
CONF_XS_KERNEL = "xsKernel"
def defineSettings():
"""Standard function to define settings; for neutronics."""
settings = [
setting.Setting(
CONF_GROUP_STRUCTURE,
default="ANL33",
label="Number of Energy Groups",
description="Energy group structure to use in neutronics simulations",
options=[
"ANL9",
"ANL33",
"ANL70",
"ANL116",
"ANL230",
"ANL703",
"ANL1041",
"ANL2082",
"ARMI33",
"ARMI45",
"CINDER63",
"348",
],
),
setting.Setting(
CONF_GLOBAL_FLUX_ACTIVE,
default="Neutron",
label="Global Flux Calculation",
description="Calculate the global flux at each timestep for the selected particle "
"type(s) using the specified neutronics kernel.",
options=["", "Neutron", "Neutron and Gamma"],
),
setting.Setting(
CONF_GEN_XS,
default="",
label="Multigroup Cross Sections Generation",
description="Generate multigroup cross sections for the selected particle "
"type(s) using the specified lattice physics kernel (see Lattice Physics "
"tab). When not set, the XS library will be auto-loaded from an existing "
"ISOTXS in the working directory, but fail if there is no ISOTXS.",
options=["", "Neutron", "Neutron and Gamma"],
),
setting.Setting(
CONF_DPA_PER_FLUENCE,
default=4.01568627451e-22,
label="DPA Per Fluence",
description="A quick and dirty conversion that is used to get dpaPeak",
),
setting.Setting(
CONF_BOUNDARIES,
default="Extrapolated",
label="Neutronic BCs",
description="External Neutronic Boundary Conditions. Reflective does not include axial.",
options=[
"Extrapolated",
"Reflective",
"Infinite",
"ZeroSurfaceFlux",
"ZeroInwardCurrent",
"Generalized",
],
enforcedOptions=True,
),
setting.Setting(
CONF_NEUTRONICS_KERNEL,
default="",
label="Neutronics Kernel",
description="The neutronics / depletion solver for global flux solve.",
options=[],
enforcedOptions=True,
),
setting.Setting(
CONF_MCNP_LIB_BASE,
default="ENDF/B-VII.1",
label="ENDF data library version to use for MCNP Analysis",
description=(
"This setting controls the nuclides in the problem according to "
"the available nuclides in the selected library. For instance, "
"some MCNP libraries contain elemental nuclides while others do "
f"not. Only used when MCNP is selected as {CONF_NEUTRONICS_KERNEL}."
),
options=["ENDF/B-V.0", "ENDF/B-VII.0", "ENDF/B-VII.1", "ENDF/B-VIII.0"],
),
setting.Setting(
CONF_NEUTRONICS_TYPE,
default="real",
label="Neutronics Type",
description="The type of neutronics solution that is desired.",
options=["real", "adjoint", "both"],
),
setting.Setting(
CONF_EIGEN_PROB,
default=True,
label="Eigenvalue Problem",
description="Is this a eigenvalue problem or a fixed source problem?",
),
setting.Setting(
CONF_EPS_EIG,
default=1e-07,
label="Eigenvalue Epsilon",
description="Convergence criteria for calculating the eigenvalue in the global flux solver",
),
setting.Setting(
CONF_EPS_FSAVG,
default=1e-05,
label="FS Avg. epsilon",
description="Convergence criteria for average fission source",
),
setting.Setting(
CONF_EPS_FSPOINT,
default=1e-05,
label="FS Point epsilon",
description="Convergence criteria for point fission source",
),
setting.Setting(
CONF_LOAD_PAD_ELEVATION,
default=0.0,
label="Load pad elevation (cm)",
description=(
"The elevation of the bottom of the above-core load pad (ACLP) in cm from the bottom of the upper grid "
"plate. Used for calculating the load pad dose"
),
),
setting.Setting(
CONF_LOAD_PAD_LENGTH,
default=0.0,
label="Load pad length (cm)",
description="The length of the load pad. Used to compute average and peak dose.",
),
setting.Setting(
CONF_ACLP_DOSE_LIMIT,
default=80.0,
label="ALCP dose limit",
description="Dose limit in dpa used to position the above-core load pad(if one exists)",
),
setting.Setting(
CONF_RESTART_NEUTRONICS,
default=False,
label="Restart neutronics",
description="Restart global flux case using outputs from last time as a guess",
),
setting.Setting(
CONF_OUTERS_,
default=100,
label="Max Outer Iterations",
description="XY and Axial partial current sweep max outer iterations.",
),
setting.Setting(
CONF_INNERS_,
default=0,
label="Inner Iterations",
description="XY and Axial partial current sweep inner iterations. 0 lets the neutronics code pick a "
"default.",
),
setting.Setting(
CONF_GRID_PLATE_DPA_XS_SET,
default="dpa_EBRII_HT9",
label="Grid plate DPA XS",
description=("The cross sections to use for grid plate blocks DPA when computing displacements per atom."),
options=CONF_OPT_DPA,
),
setting.Setting(
CONF_DPA_XS_SET,
default="dpa_EBRII_HT9",
label="DPA Cross Sections",
description="The cross sections to use when computing displacements per atom.",
options=CONF_OPT_DPA,
),
setting.Setting(
CONF_CLEAR_XS,
default=False,
label="Clear XS",
description="Delete all cross section libraries before regenerating them.",
),
setting.Setting(
CONF_MINIMUM_FISSILE_FRACTION,
default=0.045,
label="Minimum Fissile Fraction",
description="Minimum fissile fraction (fissile number densities / heavy metal number densities).",
oldNames=[("mc2.minimumFissileFraction", None)],
),
setting.Setting(
CONF_MINIMUM_NUCLIDE_DENSITY,
default=1e-15,
label="Minimum nuclide density",
description="Density to use for nuclides and fission products at infinite dilution. This is also used as "
"the minimum density considered for computing macroscopic cross sections.",
),
setting.Setting(
CONF_TOLERATE_BURNUP_CHANGE,
default=0.0,
label="Cross Section Burnup Group Tolerance",
description="Burnup window for computing cross sections. If the prior "
"cross sections were computed within the window, new cross sections will "
"not be generated and the prior calculated cross sections will be used.",
),
setting.Setting(
CONF_XS_BLOCK_REPRESENTATION,
default="Average",
label="Cross Section Block Averaging Method",
description="The type of averaging to perform when creating cross sections for a group of blocks",
options=[
"Median",
"Average",
"FluxWeightedAverage",
"ComponentAverage1DSlab",
],
),
setting.Setting(
CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,
default=False,
label="Which Block types to merge together in XS Generation",
description="Control which blocks get merged together by the XSGM. If set to ``None`` or ``True`` then all "
"block types in the XS ID will be considered. If set to ``False`` then a default of ['fuel'] will be used. "
"Can also be set to an exact list of strings for types to consider.",
),
setting.Setting(
CONF_XS_KERNEL,
default="MC2v3",
label="Lattice Physics Kernel",
description="Method to determine broad group cross sections for assemblies",
options=["", "MC2v2", "MC2v3", "MC2v3-PARTISN", "SERPENT"],
),
setting.Setting(
CONF_LATTICE_PHYSICS_FREQUENCY,
default="BOC",
label="Frequency of lattice physics updates",
description="Define the frequency at which cross sections are updated with new lattice physics "
"interactions.",
options=[opt.name for opt in list(LatticePhysicsFrequency)],
enforcedOptions=True,
),
]
return settings
def _blueprintsHasOldXSInput(inspector):
path = inspector.cs[CONF_LOADING_FILE]
with directoryChangers.DirectoryChanger(inspector.cs.inputDirectory):
with open(os.path.expandvars(path)) as f:
for line in f:
if line.startswith("cross sections:"):
return True
return False
def getNeutronicsSettingValidators(inspector):
"""The standard helper method, to provide validators to neutronics settings."""
queries = []
def migrateXSOption(name0):
"""
The `genXS` and `globalFluxActive` settings used to take True/False as inputs,
this helper method migrates those to the new values.
"""
value = inspector.cs[name0]
if value == "True":
value = NEUTRON
elif value == "False":
value = ""
inspector.cs = inspector.cs.modified(newSettings={name0: value})
def migrateXSOptionGenXS():
"""pass-through to migrateXSOption(), because Query functions cannot take arguments."""
migrateXSOption(CONF_GEN_XS)
def migrateXSOptionGlobalFluxActive():
"""pass-through to migrateXSOption(), because Query functions cannot take arguments."""
migrateXSOption(CONF_GLOBAL_FLUX_ACTIVE)
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_GEN_XS] in ("True", "False"),
"The {0} setting cannot not take `True` or `False` as an exact value any more.",
'Would you like to auto-correct {0} to the correct value? ("" or {1})'.format(CONF_GEN_XS, NEUTRON),
migrateXSOptionGenXS,
)
)
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_GLOBAL_FLUX_ACTIVE] in ("True", "False"),
"The {0} setting cannot not take `True` or `False` as an exact value any more.",
'Would you like to auto-correct {0} to the correct value? ("" or {1})'.format(
CONF_GLOBAL_FLUX_ACTIVE, NEUTRON
),
migrateXSOptionGlobalFluxActive,
)
)
def migrateNormalBCSetting():
"""The `boundary` setting is migrated from `Normal` to `Extrapolated`."""
inspector.cs = inspector.cs.modified(newSettings={CONF_BOUNDARIES: "Extrapolated"})
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_BOUNDARIES] == "Normal",
"The {0} setting now takes `Extrapolated` instead of `Normal` as a value.".format(CONF_BOUNDARIES),
"Would you like to auto-correct {0} from `Normal` to `Extrapolated`?".format(CONF_BOUNDARIES),
migrateNormalBCSetting,
)
)
def updateXSGroupStructure():
"""Trying to migrate to a valid XS group structure name."""
value = inspector.cs[CONF_GROUP_STRUCTURE]
newValue = value.upper()
if newValue in GROUP_STRUCTURE:
runLog.info("Updating the cross section group structure from {} to {}".format(value, newValue))
else:
newValue = inspector.cs.getSetting(CONF_GROUP_STRUCTURE).default
runLog.info(
"Unable to automatically convert the {} setting of {}. Defaulting to {}".format(
CONF_GROUP_STRUCTURE, value, newValue
)
)
inspector.cs = inspector.cs.modified(newSettings={CONF_GROUP_STRUCTURE: newValue})
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_GROUP_STRUCTURE] not in GROUP_STRUCTURE,
"The given group structure {0} was not recognized.".format(inspector.cs[CONF_GROUP_STRUCTURE]),
"Would you like to auto-correct the group structure value?",
updateXSGroupStructure,
)
)
def migrateDpa(name0):
"""Migrating some common shortened names for dpa XS sets."""
value = inspector.cs[name0]
if value == "dpaHT9_33":
value = "dpaHT9_ANL33_TwrBol"
elif value == "dpa_SS316":
value = "dpaSS316_ANL33_TwrBol"
inspector.cs = inspector.cs.modified(newSettings={name0: value})
def migrateDpaDpaXsSet():
"""Pass-through to migrateDpa(), because Query functions cannot take arguments."""
migrateDpa(CONF_DPA_XS_SET)
def migrateDpaGridPlate():
"""Pass-through to migrateDpa(), because Query functions cannot take arguments."""
migrateDpa(CONF_GRID_PLATE_DPA_XS_SET)
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_DPA_XS_SET] in ("dpaHT9_33", "dpa_SS316"),
"It appears you are using a shortened version of the {0}.".format(CONF_DPA_XS_SET),
"Would you like to auto-correct this to the full name?",
migrateDpaDpaXsSet,
)
)
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_GRID_PLATE_DPA_XS_SET] in ("dpaHT9_33", "dpa_SS316"),
"It appears you are using a shortened version of the {0}.".format(CONF_GRID_PLATE_DPA_XS_SET),
"Would you like to auto-correct this to the full name?",
migrateDpaGridPlate,
)
)
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_DETAILED_AXIAL_EXPANSION] and inspector.cs[CONF_NON_UNIFORM_ASSEM_FLAGS],
f"The use of {CONF_DETAILED_AXIAL_EXPANSION} and {CONF_NON_UNIFORM_ASSEM_FLAGS} is not supported.",
"Automatically set non-uniform assembly treatment to its default?",
lambda: inspector._assignCS(
CONF_NON_UNIFORM_ASSEM_FLAGS,
inspector.cs.getSetting(CONF_NON_UNIFORM_ASSEM_FLAGS).default,
),
)
)
queryMsg = (
"A Snapshots case is selected but the `latticePhysicsFrequency` "
"{0} is less than `firstCoupledIteration`. `firstCoupledIteration`"
" or `all` is recommended for Snapshots when they involve large changes "
"in power or flow compared to the loaded state."
).format(inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY])
queryPrompt = (
"Would you like to update `latticePhysicsFrequency` from "
f"{inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY]} to `firstCoupledIteration`?"
)
queries.append(
settingsValidation.Query(
lambda: inspector.cs[CONF_RUN_TYPE] == "Snapshots"
and not LatticePhysicsFrequency[inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY]]
>= LatticePhysicsFrequency.firstCoupledIteration,
queryMsg,
queryPrompt,
lambda: inspector._assignCS(CONF_LATTICE_PHYSICS_FREQUENCY, "firstCoupledIteration"),
)
)
return queries
================================================
FILE: armi/physics/neutronics/tests/ISOXA
================================================
Not a real cross section file; just a placeholder to unit test the file copying function.
================================================
FILE: armi/physics/neutronics/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/physics/neutronics/tests/rzmflxYA
================================================
Not a real flux spectrum file; just a placeholder to unit test the file copying function.
================================================
FILE: armi/physics/neutronics/tests/test_crossSectionManager.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the cross section manager.
:py:mod:`armi.physics.neutronics.crossSectionGroupManager`
"""
import copy
import os
import pickle
import sys
import unittest
from io import BytesIO
from unittest.mock import MagicMock
from armi import settings
from armi.physics.neutronics import crossSectionGroupManager
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.crossSectionGroupManager import (
AverageBlockCollection,
BlockCollection,
CrossSectionGroupManager,
FluxWeightedAverageBlockCollection,
MedianBlockCollection,
)
from armi.physics.neutronics.crossSectionSettings import XSModelingOptions
from armi.physics.neutronics.fissionProductModel.tests import test_lumpedFissionProduct
from armi.physics.neutronics.settings import (
CONF_LATTICE_PHYSICS_FREQUENCY,
CONF_XS_BLOCK_REPRESENTATION,
)
from armi.reactor.blocks import HexBlock
from armi.reactor.flags import Flags
from armi.reactor.tests import test_blocks, test_reactors
from armi.tests import TEST_ROOT, mockRunLogs
from armi.utils import units
from armi.utils.directoryChangers import TemporaryDirectoryChanger
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestBlockColl(unittest.TestCase):
def setUp(self):
self.blockList = makeBlocks()
self.bc = BlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)
self.bc.extend(self.blockList)
def test_add(self):
self.bc.append("DummyBlock1")
self.bc.extend(["DB2", "DB3"])
self.assertIn("DummyBlock1", self.bc)
self.assertIn("DB2", self.bc)
self.assertIn("DB3", self.bc)
def test_getBlocksInGroup(self):
for b in self.blockList:
self.assertIn(b, self.bc)
def test_is_pickleable(self):
self.bc.weightingParam = "test"
buf = BytesIO()
pickle.dump(self.bc, buf)
buf.seek(0)
newBc = pickle.load(buf)
self.assertEqual(self.bc.weightingParam, newBc.weightingParam)
class TestBlockCollMedian(unittest.TestCase):
def setUp(self):
self.blockList = makeBlocks(5)
for bi, b in enumerate(self.blockList):
b.setType("fuel")
b.p.percentBu = bi / 4.0 * 100
self.blockList[0], self.blockList[2] = self.blockList[2], self.blockList[0]
self.bc = MedianBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)
self.bc.extend(self.blockList)
def test_createRepresentativeBlock(self):
avgB = self.bc.createRepresentativeBlock()
self.assertAlmostEqual(avgB.p.percentBu, 50.0)
def test_getBlockNuclideTemperature(self):
# doesn't have to be in median block tests, but this is a simpler test
nuc = "U235"
testBlock = self.blockList[0]
amt, amtWeightedTemp = 0, 0
for c in testBlock:
dens = c.getNumberDensity(nuc)
if dens > 0:
thisAmt = dens * c.getVolume()
amt += thisAmt
amtWeightedTemp += thisAmt * c.temperatureInC
avgTemp = amtWeightedTemp / amt
self.assertAlmostEqual(avgTemp, crossSectionGroupManager.getBlockNuclideTemperature(testBlock, nuc))
class TestBlockCollAvg(unittest.TestCase):
@classmethod
def setUpClass(cls):
fpFactory = test_lumpedFissionProduct.getDummyLFPFile()
cls.blockList = makeBlocks(5)
for bi, b in enumerate(cls.blockList):
b.setType("fuel")
b.p.percentBu = bi / 4.0 * 100
b.setLumpedFissionProducts(fpFactory.createLFPsFromFile())
# put some trace Fe-56 and Na-23 into the fuel
# zero out all fuel nuclides except U-235 (for mass-weighting of component temperature)
fuelComp = b.getComponent(Flags.FUEL)
for nuc in fuelComp.getNuclides():
b.setNumberDensity(nuc, 0.0)
b.setNumberDensity("U235", bi)
fuelComp.setNumberDensity("FE56", 1e-15)
fuelComp.setNumberDensity("NA23", 1e-15)
b.p.gasReleaseFraction = bi * 2 / 8.0
for c in b:
if c.hasFlags(Flags.FUEL):
c.temperatureInC = 600.0 + bi
elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]):
c.temperatureInC = 500.0 + bi
elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]):
c.temperatureInC = 400.0 + bi
def setUp(self):
self.bc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)
self.bc.extend(self.blockList)
self.bc.averageByComponent = True
def test_performAverageByComponent(self):
"""Check the averageByComponent attribute."""
self.bc._checkBlockSimilarity = MagicMock(return_value=True)
self.assertTrue(self.bc._performAverageByComponent())
self.bc.averageByComponent = False
self.assertFalse(self.bc._performAverageByComponent())
def test_checkBlockSimilarity(self):
"""Check the block similarity test."""
self.assertTrue(self.bc._checkBlockSimilarity())
self.bc.append(test_blocks.loadTestBlock())
self.assertFalse(self.bc._checkBlockSimilarity())
def test_createRepresentativeBlock(self):
"""Test creation of a representative block.
.. test:: Create representative blocks using a volume-weighted averaging.
:id: T_ARMI_XSGM_CREATE_REPR_BLOCKS0
:tests: R_ARMI_XSGM_CREATE_REPR_BLOCKS
"""
avgB = self.bc.createRepresentativeBlock()
self.assertNotIn(avgB, self.bc)
# (0 + 1 + 2 + 3 + 4) / 5 = 10/5 = 2.0
# adjust for thermal expansion between input temp (600 C) and average temp (603 C)
fuelMat = avgB.getComponent(Flags.FUEL).material
expansion = (1.0 + fuelMat.linearExpansionPercent(Tc=603.0) / 100.0) / (
1.0 + fuelMat.linearExpansionPercent(Tc=600.0) / 100.0
)
self.assertAlmostEqual(avgB.getNumberDensity("U235") / expansion**2, 2.0)
# (0 + 1/4 + 2/4 + 3/4 + 4/4) / 5 * 100.0 = 50.0
self.assertEqual(avgB.p.percentBu, 50.0)
# check that a new block collection of the representative block has right temperatures
# this is required for Doppler coefficient calculations
newBc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)
newBc.append(avgB)
newBc.calcAvgNuclideTemperatures()
self.assertAlmostEqual(newBc.avgNucTemperatures["U235"], 603.0)
self.assertAlmostEqual(newBc.avgNucTemperatures["FE56"], 502.0)
self.assertAlmostEqual(newBc.avgNucTemperatures["NA23"], 402.0)
def test_createRepresentativeBlockDissimilar(self):
"""Test creation of a representative block from a collection with dissimilar blocks."""
uniqueBlock = test_blocks.loadTestBlock()
uniqueBlock.p.percentBu = 50.0
fpFactory = test_lumpedFissionProduct.getDummyLFPFile()
uniqueBlock.setLumpedFissionProducts(fpFactory.createLFPsFromFile())
uniqueBlock.setNumberDensity("U235", 2.0)
uniqueBlock.p.gasReleaseFraction = 1.0
for c in uniqueBlock:
if c.hasFlags(Flags.FUEL):
c.temperatureInC = 600.0
elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]):
c.temperatureInC = 500.0
elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]):
c.temperatureInC = 400.0
self.bc.append(uniqueBlock)
with mockRunLogs.BufferLog() as mock:
avgB = self.bc.createRepresentativeBlock()
self.assertIn("Non-matching block in AverageBlockCollection", mock.getStdout())
self.assertNotIn(avgB, self.bc)
# (0 + 1 + 2 + 3 + 4 + 2) / 6.0 = 12/6 = 2.0
self.assertAlmostEqual(avgB.getNumberDensity("U235"), 2.0)
# (0 + 1/4 + 2/4 + 3/4 + 4/4) / 5 * 100.0 = 50.0
self.assertAlmostEqual(avgB.p.percentBu, 50.0)
# U35 has different average temperature because blocks have different U235 content
newBc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)
newBc.append(avgB)
newBc.calcAvgNuclideTemperatures()
# temps expected to be proportional to volume-fraction weighted temperature
# this is a non-physical result, but it demonstrates a problem that exists in the code
# when dissimilar blocks are put together in a BlockCollection
structureVolume = sum(c.getVolume() for c in avgB.getComponents([Flags.CLAD, Flags.DUCT, Flags.WIRE]))
fuelVolume = avgB.getComponent(Flags.FUEL).getVolume()
coolantVolume = sum(c.getVolume() for c in avgB.getComponents([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]))
expectedIronTemp = (structureVolume * 500.0 + fuelVolume * 600.0) / (structureVolume + fuelVolume)
expectedSodiumTemp = (coolantVolume * 400.0 + fuelVolume * 600.0) / (coolantVolume + fuelVolume)
self.assertAlmostEqual(newBc.avgNucTemperatures["U235"], 600.0)
self.assertAlmostEqual(newBc.avgNucTemperatures["FE56"], expectedIronTemp)
self.assertAlmostEqual(newBc.avgNucTemperatures["NA23"], expectedSodiumTemp)
class TestComponentAveraging(unittest.TestCase):
@classmethod
def setUpClass(cls):
fpFactory = test_lumpedFissionProduct.getDummyLFPFile()
cls.blockList = makeBlocks(3)
for bi, b in enumerate(cls.blockList):
b.setType("fuel")
b.setLumpedFissionProducts(fpFactory.createLFPsFromFile())
# put some trace Fe-56 and Na-23 into the fuel
# zero out all fuel nuclides except U-235 (for mass-weighting of component temperature)
for nuc in b.getNuclides():
b.setNumberDensity(nuc, 0.0)
b.setNumberDensity("U235", bi)
b.setNumberDensity("FE56", bi / 2.0)
b.setNumberDensity("NA23", bi / 3.0)
for c in b:
if c.hasFlags(Flags.FUEL):
c.temperatureInC = 600.0 + bi
elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]):
c.temperatureInC = 500.0 + bi
elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]):
c.temperatureInC = 400.0 + bi
def setUp(self):
self.bc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)
blockCopies = [copy.deepcopy(b) for b in self.blockList]
self.bc.extend(blockCopies)
def test_getAverageComponentNumberDensities(self):
"""Test component number density averaging."""
# because of the way densities are set up, the middle block (index 1 of 0-2) component
# densities are equivalent to the average
b = self.bc[1]
for compIndex, c in enumerate(b.getComponents()):
avgDensities = self.bc._getAverageComponentNumberDensities(compIndex)
compDensities = c.getNumberDensities()
for nuc in c.getNuclides():
self.assertAlmostEqual(
compDensities[nuc],
avgDensities[nuc],
msg=f"{nuc} density {compDensities[nuc]} not equal to {avgDensities[nuc]}!",
)
self.assertEqual(len(compDensities), len(avgDensities))
def test_getAverageComponentTemperature(self):
"""Test mass-weighted component temperature averaging."""
b = self.bc[0]
massWeightedIncrease = 5.0 / 3.0
baseTemps = [600, 400, 500, 500, 400, 500, 400]
expectedTemps = [t + massWeightedIncrease for t in baseTemps]
for compIndex, c in enumerate(b.getComponents()):
avgTemp = self.bc._getAverageComponentTemperature(compIndex)
self.assertAlmostEqual(
expectedTemps[compIndex],
avgTemp,
msg=f"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!",
)
def test_getAvgCompTempVariedWeights(self):
"""Test mass-weighted component temperature averaging with variable weights."""
# make up a fake weighting with power param
self.bc.weightingParam = "power"
for i, b in enumerate(self.bc):
b.p.power = i
weightedIncrease = 1.8
baseTemps = [600, 400, 500, 500, 400, 500, 400]
expectedTemps = [t + weightedIncrease for t in baseTemps]
for compIndex, c in enumerate(b.getComponents()):
avgTemp = self.bc._getAverageComponentTemperature(compIndex)
self.assertAlmostEqual(
expectedTemps[compIndex],
avgTemp,
msg=f"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!",
)
def test_getAvgCompTempNoMass(self):
"""Test component temperature averaging when the components have no mass."""
for b in self.bc:
for nuc in b.getNuclides():
b.setNumberDensity(nuc, 0.0)
unweightedIncrease = 1.0
baseTemps = [600, 400, 500, 500, 400, 500, 400]
expectedTemps = [t + unweightedIncrease for t in baseTemps]
for compIndex, c in enumerate(b.getComponents()):
avgTemp = self.bc._getAverageComponentTemperature(compIndex)
self.assertAlmostEqual(
expectedTemps[compIndex],
avgTemp,
msg=f"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!",
)
class TestBlockCollCompAvg(unittest.TestCase):
"""Test Block collection component averages."""
def setUp(self):
r"""
First part of setup same as test_Cartesian.
Second part of setup builds lists/dictionaries of expected values to compare to.
has expected values for component isotopic atom density and component area.
"""
self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName="zpprTest.yaml")
# ndrawer1 lenFuelTypeD1 ndrawer2 lenFuelTypeD2
EuWeight = float(1 * 60 + 3 * 15)
otherEUWeight = float(1 * 15 + 3 * 45)
totalWeight = otherEUWeight + EuWeight
otherEUWeight /= totalWeight
EuWeight /= totalWeight
expectedRepBlanketBlock = [
{"U238": 0.045}, # DU
{"NA23": 0.02}, # Na
{"U238": 0.045}, # DU
]
expectedRepFuelBlock = [
{"U238": 0.045 * EuWeight + 0.045 * otherEUWeight}, # DU
{
"U235": 0.025 * EuWeight + 0.0125 * otherEUWeight,
"U238": 0.02 * EuWeight + 0.01 * otherEUWeight,
},
{"NA23": 0.02}, # Na}
{
"FE54": 0.07 * 0.05845,
"FE56": 0.07 * 0.91754,
"FE57": 0.07 * 0.02119,
"FE58": 0.07 * 0.00282,
}, # Steel
]
# later sorted by density so less massive block first
self.expectedBlockDensities = [
expectedRepBlanketBlock,
expectedRepFuelBlock,
expectedRepFuelBlock,
]
self.expectedAreas = [[1, 6, 1], [1, 2, 1, 4]]
def test_ComponentAverageRepBlock(self):
"""Tests that the XS group manager calculates the expected component atom density
and component area correctly.
Order of components is also checked since in 1D cases the order of the components matters.
"""
xsgm = self.o.getInterface("xsGroups")
for _xsID, xsOpt in self.o.cs[CONF_CROSS_SECTION].items():
self.assertEqual(xsOpt.blockRepresentation, None)
xsgm.interactBOL()
# Check that the correct defaults are propagated after the interactBOL
# from the cross section group manager is called.
for _xsID, xsOpt in self.o.cs[CONF_CROSS_SECTION].items():
self.assertEqual(xsOpt.blockRepresentation, self.o.cs[CONF_XS_BLOCK_REPRESENTATION])
xsgm.createRepresentativeBlocks()
representativeBlockList = list(xsgm.representativeBlocks.values())
representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume())
self.assertEqual(len(representativeBlockList), len(self.expectedBlockDensities))
for b, componentDensities, areas in zip(
representativeBlockList, self.expectedBlockDensities, self.expectedAreas
):
self.assertEqual(len(b), len(componentDensities))
self.assertEqual(len(b), len(areas))
for c, compDensity, compArea in zip(b, componentDensities, areas):
self.assertEqual(compArea, c.getArea())
cNucs = c.getNuclides()
self.assertEqual(len(cNucs), len(compDensity), (cNucs, compDensity))
for nuc in cNucs:
self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity[nuc])
self.assertIn(
"AC",
xsgm.representativeBlocks,
("Assemblies not in the core should still have XS groups, see _getMissingBlueprintBlocks()"),
)
class TestBlockCollCompAvg1DCyl(unittest.TestCase):
"""Test Block collection component averages for 1D cylinder."""
def setUp(self):
"""First part of setup same as test_Cartesian.
Second part of setup builds lists/dictionaries of expected values to compare to.
has expected values for component isotopic atom density and component area.
"""
self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT)
sodiumDensity = {"NA23": 0.022166571826233578}
steelDensity = {
"C": 0.0007685664978992269,
"V50": 6.795562118653462e-07,
"V51": 0.0002711429285342731,
"SI28": 0.0003789374369638149,
"SI29": 1.924063709833714e-05,
"SI30": 1.268328992580968e-05,
"CR50": 0.0004532023742335746,
"CR52": 0.008739556775111474,
"CR53": 0.0009909955713678232,
"CR54": 0.000246679773317009,
"MN55": 0.0004200803669857142,
"FE54": 0.004101496663229472,
"FE56": 0.06438472483061823,
"FE57": 0.0014869241111006412,
"FE58": 0.00019788230265709334,
"NI58": 0.0002944487657779742,
"NI60": 0.00011342053328927859,
"NI61": 4.930763373747379e-06,
"NI62": 1.571788956157717e-05,
"NI64": 4.005163933412346e-06,
"MO92": 7.140180476114493e-05,
"MO94": 4.4505841916481845e-05,
"MO95": 7.659816252004227e-05,
"MO96": 8.02548587207478e-05,
"MO97": 4.594927462728666e-05,
"MO98": 0.00011610009956095838,
"MO100": 4.6334190016834624e-05,
"W182": 3.663619370317025e-05,
"W183": 1.9783544599711936e-05,
"W184": 4.235973352562047e-05,
"W186": 3.9304414603061506e-05,
}
linerAdjustment = 1.014188527784268
cladDensity = {nuc: dens * linerAdjustment for nuc, dens in steelDensity.items()}
fuelDensity = {
"AM241": 2.3605999999999997e-05,
"PU238": 3.7387e-06,
"PU239": 0.0028603799999999996,
"PU240": 0.000712945,
"PU241": 9.823120000000004e-05,
"PU242": 2.02221e-05,
"U235": 0.00405533,
"U238": 0.0134125,
}
self.expectedComponentDensities = [
fuelDensity,
sodiumDensity,
cladDensity,
steelDensity,
sodiumDensity,
steelDensity,
sodiumDensity,
]
self.expectedComponentAreas = [
99.54797488948871,
29.719913442616843,
30.07759373476877,
1.365897776727751,
63.184097853691235,
17.107013842808822,
1.9717608091694139,
]
def test_ComponentAverage1DCylinder(self):
"""Tests that the cross-section group manager calculates the expected component atom density
and component area correctly.
Order of components is also checked since in 1D cases the order of the components matters.
.. test:: Create representative blocks using custom cylindrical averaging.
:id: T_ARMI_XSGM_CREATE_REPR_BLOCKS1
:tests: R_ARMI_XSGM_CREATE_REPR_BLOCKS
"""
xsgm = self.o.getInterface("xsGroups")
xsgm.interactBOL()
# Check that the correct defaults are propagated after the interactBOL
# from the cross section group manager is called.
xsOpt = self.o.cs[CONF_CROSS_SECTION]["ZA"]
self.assertEqual(xsOpt.blockRepresentation, "ComponentAverage1DCylinder")
xsgm.createRepresentativeBlocks()
xsgm.updateNuclideTemperatures()
representativeBlockList = list(xsgm.representativeBlocks.values())
representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume())
reprBlock = xsgm.representativeBlocks["ZA"]
self.assertEqual(reprBlock.name, "1D_CYL_AVG_ZA")
self.assertEqual(reprBlock.p.percentBu, 0.0)
refTemps = {"fuel": 600.0, "coolant": 450.0, "structure": 462.4565}
for c, compDensity, compArea in zip(reprBlock, self.expectedComponentDensities, self.expectedComponentAreas):
self.assertEqual(compArea, c.getArea())
cNucs = c.getNuclides()
for nuc in cNucs:
self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity.get(nuc, 0.0))
if "fuel" in c.getType():
compTemp = refTemps["fuel"]
elif any(sodium in c.getType() for sodium in ["bond", "coolant"]):
compTemp = refTemps["coolant"]
else:
compTemp = refTemps["structure"]
self.assertAlmostEqual(
compTemp,
xsgm.avgNucTemperatures["ZA"][nuc],
2,
f"{nuc} temperature does not match expected value of {compTemp}",
)
def test_ComponentAverageDuctHet1DCylinder(self):
"""
Tests that the cross-section group manager calculates the expected component atom density,
component area, and average nuclide temperature correctly for a duct heterogeneous cylindrical
block collection.
"""
self.o.cs[CONF_CROSS_SECTION]["ZA"].ductHeterogeneous = True
xsgm = self.o.getInterface("xsGroups")
xsgm.interactBOL()
# Check that the correct defaults are propagated after the interactBOL
# from the cross section group manager is called.
xsOpt = self.o.cs[CONF_CROSS_SECTION]["ZA"]
self.assertEqual(xsOpt.blockRepresentation, "ComponentAverage1DCylinder")
xsgm.createRepresentativeBlocks()
xsgm.updateNuclideTemperatures()
representativeBlockList = list(xsgm.representativeBlocks.values())
representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume())
reprBlock = xsgm.representativeBlocks["ZA"]
self.assertEqual(reprBlock.name, "1D_CYL_DUCT_HET_AVG_ZA")
self.assertEqual(reprBlock.p.percentBu, 0.0)
refTemps = {"fuel": 600.0, "coolant": 450.0, "structure": 462.4565}
for c, compDensity, compArea in zip(reprBlock, self.expectedComponentDensities, self.expectedComponentAreas):
self.assertEqual(compArea, c.getArea())
cNucs = c.getNuclides()
for nuc in cNucs:
self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity.get(nuc, 0.0))
if "fuel" in c.getType():
compTemp = refTemps["fuel"]
elif any(sodium in c.getType() for sodium in ["bond", "coolant"]):
compTemp = refTemps["coolant"]
else:
compTemp = refTemps["structure"]
if any(comp in c.getType() for comp in ["fuel", "bond", "coolant"]):
# only 1 fuel component, and bond and coolant are both at same temperature
# the component temp should match the avg nuc temp
self.assertAlmostEqual(
compTemp,
xsgm.avgNucTemperatures["ZA"][nuc],
2,
f"{nuc} temperature does not match expected value of {compTemp} for component {c}",
)
else:
# steel components are at different temperatures
# the temperatures should be different
diff = abs(compTemp - xsgm.avgNucTemperatures["ZA"][nuc])
self.assertGreater(
diff,
1.0,
f"{nuc} temperature should be different from {compTemp} for component {c}",
)
def test_checkComponentConsistency(self):
xsgm = self.o.getInterface("xsGroups")
xsgm.interactBOL()
blockCollectionsByXsGroup = xsgm.makeCrossSectionGroups()
blockCollection = blockCollectionsByXsGroup["ZA"]
baseComponents = self.r.core.getFirstBlock(Flags.CONTROL).getComponents()
densities = {
"control": baseComponents[0].getNumberDensities(),
"clad": baseComponents[2].getNumberDensities(),
"coolant": baseComponents[4].getNumberDensities(),
}
controlComponent, cladComponent, coolantComponent = self._makeComponents(7, densities)
# reference block
refBlock = HexBlock("refBlock")
refBlock.add(controlComponent)
refBlock.add(cladComponent)
refBlock.add(coolantComponent)
# matching block
matchingBlock = HexBlock("matchBlock")
matchingBlock.add(controlComponent)
matchingBlock.add(cladComponent)
matchingBlock.add(coolantComponent)
# unsorted block
unsortedBlock = HexBlock("unsortedBlock")
unsortedBlock.add(cladComponent)
unsortedBlock.add(coolantComponent)
unsortedBlock.add(controlComponent)
# non-matching block length
nonMatchingLengthBlock = HexBlock("blockLengthDiff")
nonMatchingLengthBlock.add(controlComponent)
nonMatchingLengthBlock.add(coolantComponent)
# non-matching component multiplicity
nonMatchingMultBlock = HexBlock("blockComponentDiff")
control, clad, coolant = self._makeComponents(19, densities)
nonMatchingMultBlock.add(control)
nonMatchingMultBlock.add(clad)
nonMatchingMultBlock.add(coolant)
# different nuclides
nucDiffBlock = HexBlock("blockNucDiff")
mixedDensities = {
"clad": baseComponents[0].getNumberDensities(),
"coolant": baseComponents[2].getNumberDensities(),
"control": baseComponents[4].getNumberDensities(),
}
control, clad, coolant = self._makeComponents(7, mixedDensities)
nucDiffBlock.add(control)
nucDiffBlock.add(clad)
nucDiffBlock.add(coolant)
# additional non-important nuclides
negligibleNucDiffBlock = HexBlock("blockNegligibleNucDiff")
negligibleNuc = {"N14": 1.0e-5}
modControl = baseComponents[0].getNumberDensities()
modClad = baseComponents[2].getNumberDensities()
modCoolant = baseComponents[4].getNumberDensities()
modControl.update(negligibleNuc)
modClad.update(negligibleNuc)
modCoolant.update(negligibleNuc)
mixedDensities = {
"control": modControl,
"clad": modClad,
"coolant": modCoolant,
}
control, clad, coolant = self._makeComponents(7, mixedDensities)
negligibleNucDiffBlock.add(control)
negligibleNucDiffBlock.add(clad)
negligibleNucDiffBlock.add(coolant)
# nuclides at zero number density should be okay
zeroNucBlock = HexBlock("blockNucZero")
mixedDensities = {
"control": baseComponents[0].getNumberDensities(),
"clad": baseComponents[2].getNumberDensities(),
"coolant": baseComponents[4].getNumberDensities(),
}
control, clad, coolant = self._makeComponents(7, mixedDensities)
# set some nuclide number densities to zero
control.setNumberDensity("U235", 0.0)
control.setNumberDensity("O16", 0.0)
clad.setNumberDensity("FE56", 0.0)
coolant.setNumberDensity("NA23", 0.0)
coolant.setNumberDensity("PU239", 0.0)
zeroNucBlock.add(control)
zeroNucBlock.add(clad)
zeroNucBlock.add(coolant)
blockCollection._checkComponentConsistency(refBlock, matchingBlock)
blockCollection._checkComponentConsistency(refBlock, unsortedBlock)
blockCollection._checkComponentConsistency(refBlock, negligibleNucDiffBlock)
blockCollection._checkComponentConsistency(refBlock, zeroNucBlock)
for b in (nonMatchingMultBlock, nonMatchingLengthBlock, nucDiffBlock):
with self.assertRaises(ValueError):
blockCollection._checkComponentConsistency(refBlock, b)
def _makeComponents(self, multiplicity, densities):
from armi.reactor import components
baseComponents = self.r.core.getFirstBlock(Flags.CONTROL).getComponents()
controlComponent = components.Circle(
"control",
baseComponents[0].material,
100.0,
100.0,
id=0.0,
od=0.6,
mult=multiplicity,
)
cladComponent = components.Circle(
"clad",
baseComponents[2].material,
100.0,
100.0,
id=0.6,
od=0.7,
mult=multiplicity,
)
coolantComponent = components.Circle(
"coolant",
baseComponents[4].material,
100.0,
100.0,
id=0.7,
od=0.8,
mult=multiplicity,
)
controlComponent.setNumberDensities(densities["control"])
cladComponent.setNumberDensities(densities["clad"])
coolantComponent.setNumberDensities(densities["coolant"])
return controlComponent, cladComponent, coolantComponent
class TestBlockCollFluxWeightAvg(unittest.TestCase):
@classmethod
def setUpClass(cls):
fpFactory = test_lumpedFissionProduct.getDummyLFPFile()
cls.blockList = makeBlocks(5)
for bi, b in enumerate(cls.blockList):
b.setType("fuel")
b.p.percentBu = bi / 4.0 * 100
b.setLumpedFissionProducts(fpFactory.createLFPsFromFile())
b.setNumberDensity("U235", bi)
b.p.gasReleaseFraction = bi * 2 / 8.0
b.p.flux = bi + 1
def setUp(self):
self.bc = FluxWeightedAverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)
self.bc.extend(self.blockList)
def test_createRepresentativeBlock(self):
self.bc[1].p.flux = 1e99 # only the 2nd block values should show up
avgB = self.bc.createRepresentativeBlock()
self.assertNotIn(avgB, self.bc)
self.assertAlmostEqual(avgB.getNumberDensity("U235"), 1.0)
self.assertEqual(avgB.p.percentBu, 25.0)
def test_invalidWeights(self):
self.bc[0].p.flux = 0.0
with self.assertRaises(ValueError):
self.bc.createRepresentativeBlock()
class TestXSGM(unittest.TestCase):
def setUp(self):
cs = settings.Settings()
self.blockList = makeBlocks(20)
self.csm = CrossSectionGroupManager(self.blockList[0].core.r, cs)
for bi, b in enumerate(self.blockList):
b.p.percentBu = bi / 19.0 * 100
self.csm._setBuGroupBounds([3, 10, 30, 100])
self.csm.interactBOL()
def test_enableEnvGroupUpdates(self):
self.csm._envGroupUpdatesEnabled = False
self.csm.enableEnvGroupUpdates()
self.assertTrue(self.csm._envGroupUpdatesEnabled)
# test flipping again keeps true
self.csm.enableEnvGroupUpdates()
self.assertTrue(self.csm._envGroupUpdatesEnabled)
def test_disableEnvGroupUpdates(self):
self.csm._envGroupUpdatesEnabled = True
wasEnabled = self.csm.disableEnvGroupUpdates()
self.assertTrue(wasEnabled)
self.assertFalse(self.csm._envGroupUpdatesEnabled)
wasEnabled = self.csm.disableEnvGroupUpdates()
self.assertFalse(wasEnabled)
self.assertFalse(self.csm._envGroupUpdatesEnabled)
def test_updateBurnupGroups(self):
self.blockList[1].p.percentBu = 3.1
self.blockList[2].p.percentBu = 10.0
self.csm._updateEnvironmentGroups(self.blockList)
self.assertEqual(self.blockList[0].p.envGroup, "A")
self.assertEqual(self.blockList[1].p.envGroup, "B")
self.assertEqual(self.blockList[2].p.envGroup, "B")
self.assertEqual(self.blockList[-1].p.envGroup, "D")
def test_setBuGroupBounds(self):
self.assertAlmostEqual(self.csm._buGroupBounds[2], 30.0)
with self.assertRaises(ValueError):
self.csm._setBuGroupBounds([3, 10, 300])
with self.assertRaises(ValueError):
self.csm._setBuGroupBounds([-5, 3, 10, 30.0])
with self.assertRaises(ValueError):
self.csm._setBuGroupBounds([1, 5, 3])
def test_setTempGroupBounds(self):
# negative temps in C are allowed
self.csm._setTempGroupBounds([-5, 3, 10, 300])
self.assertAlmostEqual(self.csm._tempGroupBounds[2], 10.0)
with self.assertRaises(ValueError):
self.csm._setTempGroupBounds([1, 5, 3])
def test_addXsGroupsFromBlocks(self):
blockCollectionsByXsGroup = {}
blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList)
self.assertEqual(len(blockCollectionsByXsGroup), 4)
self.assertIn("AB", blockCollectionsByXsGroup)
def test_getMissingBlueprintBlocks(self):
"""Test the function to get missing blueprints blocks."""
self.csm._setTempGroupBounds([0, 100, 200])
blockCollectionsByXsGroup = {}
blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList)
missingBlueprintBlocks = self.csm._getMissingBlueprintBlocks(blockCollectionsByXsGroup)
envGroups = set(b.p.envGroup for b in missingBlueprintBlocks)
self.assertGreater(len(envGroups), 1, "Blueprint block environment groups were not updated!")
def test_calcWeightedBurnup(self):
self.blockList[1].p.percentBu = 3.1
self.blockList[2].p.percentBu = 10.0
self.blockList[3].p.percentBu = 1.5
for b in self.blockList[4:]:
b.p.percentBu = 0.0
self.csm._updateEnvironmentGroups(self.blockList)
blockCollectionsByXsGroup = {}
blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList)
ABcollection = blockCollectionsByXsGroup["AB"]
self.assertEqual(blockCollectionsByXsGroup["AA"]._calcWeightedBurnup(), 1 / 12.0)
self.assertEqual(
ABcollection.getWeight(self.blockList[1]),
ABcollection.getWeight(self.blockList[2]),
"The two blocks in AB do not have the same weighting!",
)
self.assertEqual(ABcollection._calcWeightedBurnup(), 6.55)
def test_getNextAvailableXsType(self):
blockCollectionsByXsGroup = {}
blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList)
xsType1, xsType2, xsType3 = self.csm.getNextAvailableXsTypes(3)
self.assertEqual("B", xsType1)
self.assertEqual("C", xsType2)
self.assertEqual("D", xsType3)
# verify that we can get lowercase letters
xsTypes = self.csm.getNextAvailableXsTypes(26)
self.assertEqual("Y", xsTypes[-4])
self.assertEqual("a", xsTypes[-3])
self.assertEqual("b", xsTypes[-2])
self.assertEqual("c", xsTypes[-1])
# verify that we can get lowercase letters
if sys.platform.startswith("win"):
with mockRunLogs.BufferLog() as mock:
xsTypes = self.csm.getNextAvailableXsTypes(27)
self.assertIn("Mixing upper and lower-case XS", mock.getStdout())
def test_getRepresentativeBlocks(self):
"""Test that we can create the representative blocks for a reactor.
.. test:: Build representative blocks for a reactor.
:id: T_ARMI_XSGM_CREATE_XS_GROUPS0
:tests: R_ARMI_XSGM_CREATE_XS_GROUPS
"""
_o, r = test_reactors.loadTestReactor(TEST_ROOT)
self.csm.r = r
# Assumption: All sodium in fuel blocks for this test is 450 C and this is the expected
# sodium temperature. These lines of code take the first sodium block and decrease the
# temperature of the block, but change the atom density to approximately zero. Checking
# later on the nuclide temperature of sodium is asserted to be still 450. This perturbation
# proves that altering the temperature of an component with near zero atom density does not
# affect the average temperature of the block collection. This demonstrates that the
# temperatures of a block collection are atom weighted rather than just the average
# temperature.
regularFuel = r.core.getFirstBlock(Flags.FUEL, exact=True)
intercoolant = regularFuel.getComponent(Flags.INTERCOOLANT)
intercoolant.setTemperature(100) # just above melting
intercoolant.setNumberDensity("NA23", units.TRACE_NUMBER_DENSITY)
self.csm.createRepresentativeBlocks()
blocks = list(self.csm.representativeBlocks.values())
self.assertGreater(len(blocks), 0)
# Test ability to get average nuclide temperature in block.
u235 = self.csm.getNucTemperature("AA", "U235")
fe = self.csm.getNucTemperature("AA", "FE56")
na = self.csm.getNucTemperature("AA", "NA23")
self.assertAlmostEqual(na, 450.0, msg="Na temp was {}, not 450".format(na))
self.assertGreater(u235, fe)
self.assertGreater(fe, na)
self.assertTrue(0.0 < na < fe)
# trace nuclides should also be at fuel temp.
self.assertAlmostEqual(self.csm.getNucTemperature("AA", "LFP35"), u235)
# Test that retrieving temperatures fails if a representative block for a given XS ID does not exist
self.assertEqual(self.csm.getNucTemperature("Z", "U235"), None)
# Test dimensions
self.assertEqual(blocks[0].getHeight(), 25.0)
self.assertEqual(blocks[1].getHeight(), 25.0)
self.assertAlmostEqual(blocks[0].getVolume(), 6074.356308731789)
self.assertAlmostEqual(blocks[1].getVolume(), 6074.356308731789)
# Number densities haven't been calculated yet
self.assertIsNone(blocks[0].p.detailedNDens)
self.assertIsNone(blocks[1].p.detailedNDens)
def test_checkForUnrepresentedXSIDs(self):
blockCollectionsByXsGroup = self.csm.makeCrossSectionGroups()
self.csm.createRepresentativeBlocks()
# set valid flags to something the fuel block would not have to trigger unrepresented block
fuelXStype = "AD"
blocksWithType = [b for b in self.csm.r.core.iterBlocks(Flags.FUEL) if b.getMicroSuffix() == fuelXStype]
fuelCollection = blockCollectionsByXsGroup[fuelXStype]
fuelCollection._validRepresentativeBlockTypes = Flags.CLAD
# check for unrepresented XS ID, assert that it is found
self.csm._checkForUnrepresentedXSIDs(blockCollectionsByXsGroup)
self.assertListEqual(self.csm._unrepresentedXSIDs, [fuelXStype])
# modify unrepresented XS ID, assert that first character is the same
self.csm._modifyUnrepresentedXSIDs(blockCollectionsByXsGroup)
for b in blocksWithType:
modifiedType = b.getMicroSuffix()
self.assertEqual(modifiedType[0], fuelXStype[0])
self.assertNotEqual(modifiedType[1], fuelXStype[1])
def _createRepresentativeBlocksUsingExistingBlocks(self, validBlockTypes):
"""Reusable code used in multiple unit tests."""
o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml")
# set a few random non-default settings on AA to be copied to the new BA group
o.cs[CONF_CROSS_SECTION].update(
{
"AA": XSModelingOptions(
"AA",
geometry="0D",
averageByComponent=True,
xsMaxAtomNumber=60,
criticalBuckling=False,
xsPriority=2,
)
}
)
o.cs[CONF_CROSS_SECTION].setDefaults(crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION, validBlockTypes)
aaSettings = o.cs[CONF_CROSS_SECTION]["AA"]
self.csm.cs = copy.deepcopy(o.cs)
self.csm.createRepresentativeBlocks()
unperturbedReprBlocks = copy.deepcopy(self.csm.representativeBlocks)
self.assertNotIn("BA", unperturbedReprBlocks)
block = r.core.getFirstBlock()
blockXSID = block.getMicroSuffix()
blockList = [block]
(
_bCollect,
newRepresentativeBlocks,
origXSIDsFromNew,
) = self.csm.createRepresentativeBlocksUsingExistingBlocks(blockList, unperturbedReprBlocks)
self.assertIn("BA", newRepresentativeBlocks)
oldReprBlock = unperturbedReprBlocks[blockXSID]
newReprBlock = newRepresentativeBlocks["BA"]
self.assertEqual(newReprBlock.getMicroSuffix(), "BA")
self.assertEqual(newReprBlock.getNumberDensities(), oldReprBlock.getNumberDensities())
self.assertEqual(origXSIDsFromNew["BA"], "AA")
# check that settings were copied correctly
baSettings = self.csm.cs[CONF_CROSS_SECTION]["BA"]
self.assertEqual(baSettings.xsID, "BA")
for setting, baSettingValue in baSettings.__dict__.items():
if setting == "xsID":
continue
self.assertEqual(baSettingValue, aaSettings.__dict__[setting])
def test_createRepBlocksUsingExistingBlocks(self):
"""
Demonstrates that a new representative block can be generated from an existing representative block.
Notes
-----
This tests that the XS ID of the new representative block is correct and that the
compositions are identical between the original and the new representative blocks.
"""
self._createRepresentativeBlocksUsingExistingBlocks(["fuel"])
def test_createRepBlocksDisableValidBlockTypes(self):
"""
Demonstrates that a new representative block can be generated from an existing representative block.
Notes
-----
This tests that the XS ID of the new representative block is correct and that the
compositions are identical between the original and the new representative blocks.
"""
self._createRepresentativeBlocksUsingExistingBlocks(True)
def test_interactBOL(self):
"""Test `BOL` lattice physics update frequency.
.. test:: The cross-section group manager frequency depends on the LPI frequency at BOL.
:id: T_ARMI_XSGM_FREQ0
:tests: R_ARMI_XSGM_FREQ
"""
self.assertFalse(self.csm.representativeBlocks)
self.blockList[0].core.r.p.timeNode = 0
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "BOL"
self.csm.interactBOL()
self.assertTrue(self.csm.representativeBlocks)
def test_interactBOC(self):
"""Test `BOC` lattice physics update frequency.
.. test:: The cross-section group manager frequency depends on the LPI frequency at BOC.
:id: T_ARMI_XSGM_FREQ1
:tests: R_ARMI_XSGM_FREQ
"""
self.assertFalse(self.csm.representativeBlocks)
self.blockList[0].core.r.p.timeNode = 0
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "BOC"
self.csm.interactBOL()
self.csm.interactBOC()
self.assertTrue(self.csm.representativeBlocks)
def test_interactEveryNode(self):
"""Test `everyNode` lattice physics update frequency.
.. test:: The cross-section group manager frequency depends on the LPI frequency at every
time node.
:id: T_ARMI_XSGM_FREQ2
:tests: R_ARMI_XSGM_FREQ
"""
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "BOC"
self.csm.interactBOL()
self.csm.interactEveryNode()
self.assertFalse(self.csm.representativeBlocks)
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "everyNode"
self.csm.interactBOL()
self.csm.interactEveryNode()
self.assertTrue(self.csm.representativeBlocks)
def test_interactFirstCoupledIteration(self):
"""Test `firstCoupledIteration` lattice physics update frequency.
.. test:: The cross-section group manager frequency depends on the LPI frequency during
first coupled iteration.
:id: T_ARMI_XSGM_FREQ3
:tests: R_ARMI_XSGM_FREQ
"""
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "everyNode"
self.csm.interactBOL()
self.csm.interactCoupled(iteration=0)
self.assertFalse(self.csm.representativeBlocks)
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "firstCoupledIteration"
self.csm.interactBOL()
self.csm.interactCoupled(iteration=0)
self.assertTrue(self.csm.representativeBlocks)
def test_interactAllCoupled(self):
"""Test `all` lattice physics update frequency.
.. test:: The cross-section group manager frequency depends on the LPI frequency during coupling.
:id: T_ARMI_XSGM_FREQ4
:tests: R_ARMI_XSGM_FREQ
"""
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "firstCoupledIteration"
self.csm.interactBOL()
self.csm.interactCoupled(iteration=1)
self.assertFalse(self.csm.representativeBlocks)
self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = "all"
self.csm.interactBOL()
self.csm.interactCoupled(iteration=1)
self.assertTrue(self.csm.representativeBlocks)
def test_xsgmIsRunBeforeXS(self):
"""Test that the XSGM is run before the cross sections are calculated.
.. test:: Test that the cross-section group manager is run before the cross sections are calculated.
:id: T_ARMI_XSGM_FREQ5
:tests: R_ARMI_XSGM_FREQ
"""
from armi.interfaces import STACK_ORDER
self.assertLess(crossSectionGroupManager.ORDER, STACK_ORDER.CROSS_SECTIONS)
def test_copyPregeneratedFiles(self):
"""
Tests copying pre-generated cross section and flux files using reactor that is built from a
case settings file.
"""
o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName="smallestTestReactor/armiRunSmallest.yaml")
# Need to overwrite the relative paths with absolute
o.cs[CONF_CROSS_SECTION]["XA"].xsFileLocation = [os.path.join(THIS_DIR, "ISOXA")]
o.cs[CONF_CROSS_SECTION]["YA"].fluxFileLocation = os.path.join(THIS_DIR, "rzmflxYA")
csm = CrossSectionGroupManager(r, o.cs)
with TemporaryDirectoryChanger(root=THIS_DIR):
csm._copyPregeneratedXSFile("XA")
csm._copyPregeneratedFluxSolutionFile("YA")
self.assertTrue(os.path.exists("ISOXA"))
self.assertTrue(os.path.exists("rzmflxYA"))
class TestXSGMWithTempGrouping(unittest.TestCase):
def setUp(self):
cs = settings.Settings()
cs["tempGroups"] = [300, 400, 500]
self.blockList = makeBlocks(11)
buAndTemps = (
(1, 340),
(2, 150),
(6, 410),
(10.5, 290),
(2.5, 360),
(4, 460),
(15, 370),
(16, 340),
(15, 700),
(14, 720),
)
for b, env in zip(self.blockList, buAndTemps):
bu, temp = env
comps = b.getComponents(Flags.FUEL)
self.assertEqual(len(comps), 1)
c = next(iter(comps))
c.setTemperature(temp)
b.p.percentBu = bu
core = self.blockList[0].core
def getBlocks(includeAll=True):
return self.blockList
# this sets XSGM to only analyze the blocks in the block list.
core.getBlocks = getBlocks
self.csm = CrossSectionGroupManager(self.blockList[0].core.r, cs)
self.csm._setBuGroupBounds([3, 10, 30, 100])
self.csm.interactBOL()
def test_updateEnvironmentGroups(self):
"""Test creation of a cross section groups with temperature grouping.
.. test:: Create representative blocks using temperature groups.
:id: T_ARMI_XSGM_CREATE_XS_GROUPS1
:tests: R_ARMI_XSGM_CREATE_XS_GROUPS, R_ARMI_XSGM_CREATE_REPR_BLOCKS
"""
self.csm.createRepresentativeBlocks()
BL = self.blockList
loners = [BL[1], BL[3]]
self.assertNotEqual(loners[0].getMicroSuffix(), loners[1].getMicroSuffix())
sameGroups = [(BL[0], BL[4]), (BL[2], BL[5]), (BL[6], BL[7]), (BL[8], BL[9])]
# check that likes have like and different are different
for group in sameGroups:
b1, b2 = group
xsSuffix = b1.getMicroSuffix()
self.assertEqual(xsSuffix, b2.getMicroSuffix())
for group in sameGroups:
newb1, newb2 = group
if b1 is newb1:
continue
self.assertNotEqual(xsSuffix, newb1.getMicroSuffix())
self.assertNotEqual(xsSuffix, newb2.getMicroSuffix())
for lone in loners:
self.assertNotEqual(xsSuffix, lone.getMicroSuffix())
self.assertNotEqual(loners[0].getMicroSuffix(), loners[1].getMicroSuffix())
# calculated based on the average of buAndTemps
expectedIDs = ["AF", "AA", "AL", "AC", "AH", "AR"]
expectedTemps = [
(340 + 360) / 2,
150,
(410 + 460) / 2,
290,
(370 + 340) / 2,
(700 + 720) / 2,
]
expectedBurnups = (1.75, 2, 5, 10.5, 15.5, 14.5)
for xsID, expectedTemp, expectedBurnup in zip(expectedIDs, expectedTemps, expectedBurnups):
b = self.csm.representativeBlocks[xsID]
thisTemp = self.csm.avgNucTemperatures[xsID]["U238"]
self.assertAlmostEqual(thisTemp, expectedTemp)
self.assertAlmostEqual(b.p.percentBu, expectedBurnup)
class TestXSNumberConverters(unittest.TestCase):
def test_conversion(self):
label = crossSectionGroupManager.getXSTypeLabelFromNumber(65)
self.assertEqual(label, "A")
num = crossSectionGroupManager.getXSTypeNumberFromLabel("A")
self.assertEqual(num, 65)
def test_conversion_2digit(self):
label = crossSectionGroupManager.getXSTypeLabelFromNumber(6570)
self.assertEqual(label, "AF")
num = crossSectionGroupManager.getXSTypeNumberFromLabel("ZZ")
self.assertEqual(num, 9090)
def makeBlocks(howMany=20):
_o, r = test_reactors.loadTestReactor(TEST_ROOT)
# shift y 3 to skip central assemblies 1/3 volume
return r.core.getBlocks(Flags.FUEL)[3 : howMany + 3]
================================================
FILE: armi/physics/neutronics/tests/test_crossSectionSettings.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XS Settings tests."""
import io
import unittest
import voluptuous as vol
from ruamel.yaml import YAML
from armi import settings
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.crossSectionSettings import (
CONF_BLOCK_REPRESENTATION,
CONF_GEOM,
XSModelingOptions,
XSSettingDef,
XSSettings,
xsSettingsValidator,
)
from armi.physics.neutronics.settings import (
CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,
CONF_XS_BLOCK_REPRESENTATION,
)
from armi.physics.neutronics.tests.test_neutronicsPlugin import XS_EXAMPLE
from armi.settings import caseSettings
class TestCrossSectionSettings(unittest.TestCase):
def test_crossSections(self):
xsModel = XSModelingOptions(
xsID="AA",
geometry="0D",
criticalBuckling=True,
blockRepresentation="Median",
requiredRAM=20.0,
)
self.assertEqual("AA", xsModel.xsID)
self.assertEqual("0D", xsModel.geometry)
self.assertEqual("Median", xsModel.blockRepresentation)
self.assertFalse(xsModel.fluxIsPregenerated)
self.assertFalse(xsModel.xsIsPregenerated)
self.assertTrue(xsModel.criticalBuckling)
self.assertEqual(20.0, xsModel.requiredRAM)
def test_pregeneratedCrossSections(self):
cs = settings.Settings()
xs = XSSettings()
xa = XSModelingOptions("XA", xsFileLocation=["ISOXA"])
xs["XA"] = xa
self.assertEqual(["ISOXA"], xa.xsFileLocation)
self.assertNotIn("XB", xs)
xs.setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
# Check that the file location of 'XB' still points to the same file location as 'XA'.
self.assertEqual(xa, xs["XB"])
self.assertFalse(xa.fluxIsPregenerated)
self.assertTrue(xa.xsIsPregenerated)
self.assertFalse(xa.criticalBuckling)
def test_pregeneratedFluxInputs(self):
xsModel = XSModelingOptions(
xsID="AA",
fluxFileLocation="ISOAA",
geometry="0D",
criticalBuckling=True,
blockRepresentation="Median",
)
self.assertEqual("AA", xsModel.xsID)
self.assertEqual("0D", xsModel.geometry)
self.assertEqual("ISOAA", xsModel.fluxFileLocation)
self.assertTrue(xsModel.fluxIsPregenerated)
self.assertTrue(xsModel.criticalBuckling)
self.assertEqual("Median", xsModel.blockRepresentation)
def test_prioritization(self):
xsModel = XSModelingOptions(
xsID="AA",
geometry="0D",
criticalBuckling=True,
xsPriority=2,
xsExecuteExclusive=True,
)
self.assertEqual("AA", xsModel.xsID)
self.assertEqual(True, xsModel.xsExecuteExclusive)
self.assertEqual(2, xsModel.xsPriority)
xsModel = XSModelingOptions(
xsID="AA",
geometry="0D",
criticalBuckling=True,
)
# defaults work
xsModel.setDefaults("Average", False)
self.assertEqual(False, xsModel.xsExecuteExclusive)
self.assertEqual(5, xsModel.xsPriority)
def test_homogeneousXsDefaultSettingAssignment(self):
"""
Make sure the object can whip up an unspecified xsID by default.
This is used when user hasn't specified anything.
"""
cs = settings.Settings()
xsModel = XSSettings()
xsModel.setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self.assertNotIn("YA", xsModel)
self.assertEqual(xsModel["YA"].geometry, "0D")
self.assertEqual(xsModel["YA"].criticalBuckling, True)
self.assertEqual(xsModel["YA"].ductHeterogeneous, False)
self.assertEqual(xsModel["YA"].traceIsotopeThreshold, 0.0)
self.assertEqual(xsModel["YA"].requiredRAM, 0.0)
def test_setDefSettingsByLowestEnvGroupHomog(self):
# Initialize some micro suffix in the cross sections
cs = settings.Settings()
xs = XSSettings()
jd = XSModelingOptions("JD", geometry="0D", criticalBuckling=False)
xs["JD"] = jd
xs.setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self.assertIn("JD", xs)
# Check that new micro suffix `JF` with higher burn-up group gets assigned the same settings as `JD`
self.assertNotIn("JF", xs)
self.assertEqual(xs["JD"], xs["JF"])
self.assertNotIn("JF", xs)
# Check that new micro suffix `JG` with higher burn-up group gets assigned the same settings as `JD`
self.assertNotIn("JG", xs)
self.assertEqual(xs["JG"], xs["JD"])
# Check that new micro suffix `JB` with lower burn-up group does NOT get assigned the same settings as `JD`
self.assertNotIn("JB", xs)
self.assertNotEqual(xs["JD"], xs["JB"])
def test_setDefSettingsByLowestEnvGroup1D(self):
# Initialize some micro suffix in the cross sections
cs = settings.Settings()
xsModel = XSSettings()
rq = XSModelingOptions(
"RQ",
geometry="1D cylinder",
blockRepresentation="ComponentAverage1DCylinder",
meshSubdivisionsPerCm=1.0,
)
xsModel["RQ"] = rq
xsModel.setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
# Check that new micro suffix `RY` with higher burn-up group gets assigned the same settings as `RQ`
self.assertNotIn("RY", xsModel)
self.assertEqual(xsModel["RY"], xsModel["RQ"])
# Check that new micro suffix `RZ` with higher burn-up group gets assigned the same settings as `RQ`
self.assertNotIn("RZ", xsModel)
self.assertEqual(xsModel["RZ"], xsModel["RQ"])
# Check that new micro suffix `RA` with lower burn-up group does NOT get assigned the same settings as `RQ`
self.assertNotIn("RA", xsModel)
self.assertNotEqual(xsModel["RA"], xsModel["RQ"])
def test_optionalKey(self):
"""Test that optional key shows up with default value."""
cs = settings.Settings()
xsModel = XSSettings()
da = XSModelingOptions(
"DA",
geometry="1D cylinder",
meshSubdivisionsPerCm=1.0,
ductHeterogeneous=True,
traceIsotopeThreshold=1.0e-5,
)
xsModel["DA"] = da
xsModel.setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self.assertEqual(xsModel["DA"].mergeIntoClad, ["gap"])
self.assertEqual(xsModel["DA"].meshSubdivisionsPerCm, 1.0)
self.assertEqual(xsModel["DA"].ductHeterogeneous, True)
self.assertEqual(xsModel["DA"].traceIsotopeThreshold, 1.0e-5)
self.assertEqual(xsModel["DA"].mergeIntoFuel, [])
def test_badCrossSections(self):
with self.assertRaises(TypeError):
# This will fail because it is not the required
# Dict[str: Dict] structure
xsSettingsValidator({CONF_GEOM: "4D"})
with self.assertRaises(vol.error.MultipleInvalid):
# This will fail because it has an invalid type for ``driverID``
xsSettingsValidator({"AA": {"driverId": 0.0}})
with self.assertRaises(vol.error.MultipleInvalid):
# This will fail because it has an invalid value for
# the ``blockRepresentation``
xsSettingsValidator({"AA": {CONF_BLOCK_REPRESENTATION: "Invalid"}})
with self.assertRaises(vol.error.MultipleInvalid):
# This will fail because the ``xsID`` is not one or two
# characters
xsSettingsValidator({"AAA": {CONF_BLOCK_REPRESENTATION: "Average"}})
class TestXSSettings(unittest.TestCase):
def test_yamlIO(self):
"""Ensure we can read/write this custom setting object to yaml."""
yaml = YAML()
inp = yaml.load(io.StringIO(XS_EXAMPLE))
xs = XSSettingDef("TestSetting")
xs.setValue(inp)
self.assertEqual(xs.value["BA"].geometry, "1D slab")
outBuf = io.StringIO()
output = xs.dump()
yaml.dump(output, outBuf)
outBuf.seek(0)
inp2 = yaml.load(outBuf)
self.assertEqual(inp.keys(), inp2.keys())
def test_caseSettings(self):
"""
Test the setting of the cross section setting using the case settings object.
Notes
-----
The purpose of this test is to ensure that the cross sections sections can
be removed from an existing case settings object once they have been set.
"""
def _setInitialXSSettings():
cs = caseSettings.Settings()
cs[CONF_CROSS_SECTION] = XSSettings()
cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", geometry="0D")
cs[CONF_CROSS_SECTION]["BA"] = XSModelingOptions("BA", geometry="0D")
self.assertIn("AA", cs[CONF_CROSS_SECTION])
self.assertIn("BA", cs[CONF_CROSS_SECTION])
self.assertNotIn("CA", cs[CONF_CROSS_SECTION])
self.assertNotIn("DA", cs[CONF_CROSS_SECTION])
return cs
cs = _setInitialXSSettings()
cs[CONF_CROSS_SECTION] = {"AA": {}, "BA": {}}
self.assertDictEqual(cs[CONF_CROSS_SECTION], {})
self.assertTrue(isinstance(cs[CONF_CROSS_SECTION], XSSettings))
# Produce an error if the setting is set to
# a None value
cs = _setInitialXSSettings()
with self.assertRaises(TypeError):
cs[CONF_CROSS_SECTION] = None
cs = _setInitialXSSettings()
cs[CONF_CROSS_SECTION] = {"AA": None, "BA": {}}
self.assertDictEqual(cs[CONF_CROSS_SECTION], {})
# Test that a new XS setting can be added to an existing
# caseSetting using the ``XSModelingOptions`` or using
# a dictionary.
cs = _setInitialXSSettings()
cs[CONF_CROSS_SECTION].update({"CA": XSModelingOptions("CA", geometry="0D"), "DA": {CONF_GEOM: "0D"}})
self.assertIn("AA", cs[CONF_CROSS_SECTION])
self.assertIn("BA", cs[CONF_CROSS_SECTION])
self.assertIn("CA", cs[CONF_CROSS_SECTION])
self.assertIn("DA", cs[CONF_CROSS_SECTION])
# Clear out the settings by setting the value to a None.
# This will be interpreted as a empty dictionary.
cs[CONF_CROSS_SECTION] = {}
self.assertDictEqual(cs[CONF_CROSS_SECTION], {})
self.assertTrue(isinstance(cs[CONF_CROSS_SECTION], XSSettings))
# This will fail because the ``setDefaults`` method on the
# ``XSSettings`` has not yet been called.
with self.assertRaises(ValueError):
cs[CONF_CROSS_SECTION]["AA"]
cs[CONF_CROSS_SECTION].setDefaults(
blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION],
validBlockTypes=cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
cs[CONF_CROSS_SECTION]["AA"]
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].geometry, "0D")
def test_csBlockRepresentation(self):
"""
Test that the XS block representation is applied globally,
but only to XS modeling options where the blockRepresentation
has not already been assigned.
"""
cs = caseSettings.Settings()
cs[CONF_XS_BLOCK_REPRESENTATION] = "FluxWeightedAverage"
cs[CONF_CROSS_SECTION] = XSSettings()
cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", geometry="0D")
cs[CONF_CROSS_SECTION]["BA"] = XSModelingOptions("BA", geometry="0D", blockRepresentation="Average")
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, None)
self.assertEqual(cs[CONF_CROSS_SECTION]["BA"].blockRepresentation, "Average")
cs[CONF_CROSS_SECTION].setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, "FluxWeightedAverage")
self.assertEqual(cs[CONF_CROSS_SECTION]["BA"].blockRepresentation, "Average")
def test_csBlockRepresentationFileLocation(self):
"""
Test that default blockRepresentation is applied correctly to a
XSModelingOption that has the ``xsFileLocation`` attribute defined.
"""
cs = caseSettings.Settings()
cs[CONF_XS_BLOCK_REPRESENTATION] = "FluxWeightedAverage"
cs[CONF_CROSS_SECTION] = XSSettings()
cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", xsFileLocation=[])
# Check FluxWeightedAverage
cs[CONF_CROSS_SECTION].setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, "FluxWeightedAverage")
# Check Average
cs[CONF_XS_BLOCK_REPRESENTATION] = "Average"
cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", xsFileLocation=[])
cs[CONF_CROSS_SECTION].setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, "Average")
# Check Median
cs[CONF_XS_BLOCK_REPRESENTATION] = "Average"
cs[CONF_CROSS_SECTION]["AA"] = XSModelingOptions("AA", xsFileLocation=[], blockRepresentation="Median")
cs[CONF_CROSS_SECTION].setDefaults(
cs[CONF_XS_BLOCK_REPRESENTATION],
cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],
)
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].blockRepresentation, "Median")
def test_xsSettingsSetDefault(self):
"""Test the configuration options of the ``setDefaults`` method."""
cs = caseSettings.Settings()
cs[CONF_XS_BLOCK_REPRESENTATION] = "FluxWeightedAverage"
cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=None)
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].validBlockTypes, None)
cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=True)
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].validBlockTypes, None)
cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=False)
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].validBlockTypes, ["fuel"])
cs[CONF_CROSS_SECTION].setDefaults(
blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION],
validBlockTypes=["control", "fuel", "plenum"],
)
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].validBlockTypes, ["control", "fuel", "plenum"])
================================================
FILE: armi/physics/neutronics/tests/test_crossSectionTable.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cross section table for depletion."""
import unittest
from armi.nuclearDataIO.cccc import isotxs
from armi.physics.neutronics.isotopicDepletion import (
crossSectionTable,
)
from armi.physics.neutronics.isotopicDepletion import (
isotopicDepletionInterface as idi,
)
from armi.physics.neutronics.latticePhysics import ORDER
from armi.reactor.tests.test_blocks import loadTestBlock
from armi.settings import Settings
from armi.testing import loadTestReactor
from armi.tests import ISOAA_PATH
class TestCrossSectionTable(unittest.TestCase):
def test_makeTable(self):
"""Test making a cross section table.
.. test:: Generate cross section table.
:id: T_ARMI_DEPL_TABLES
:tests: R_ARMI_DEPL_TABLES
"""
obj = loadTestBlock()
obj.p.mgFlux = range(33)
core = obj.parent.parent
core.lib = isotxs.readBinary(ISOAA_PATH)
table = crossSectionTable.makeReactionRateTable(obj)
self.assertEqual(len(obj.getNuclides()), len(table))
self.assertEqual(obj.getName(), "B0001-000")
self.assertEqual(table.getName(), "B0001-000")
self.assertTrue(table.hasValues())
xSecTable = table.getXsecTable()
self.assertEqual(len(xSecTable), 11)
self.assertIn("xsecs", xSecTable[0])
self.assertIn("mcnpId", xSecTable[-1])
def test_isotopicDepletionInterface(self):
"""
Test isotopic depletion interface.
.. test:: ARMI provides a base class to deplete isotopes.
:id: T_ARMI_DEPL_ABC
:tests: R_ARMI_DEPL_ABC
"""
_o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
cs = Settings()
aid = idi.AbstractIsotopicDepleter(r, cs)
self.assertIsNone(aid.efpdToBurn)
self.assertEqual(len(aid._depleteByName), 0)
self.assertEqual(len(aid.getToDeplete()), 0)
self.assertEqual(ORDER, 5.0)
================================================
FILE: armi/physics/neutronics/tests/test_energyGroups.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Energy group tests."""
import unittest
from armi.physics.neutronics import energyGroups
class TestEnergyGroups(unittest.TestCase):
def test_invalidGroupStructureType(self):
"""Test that the reverse lookup fails on non-existent energy group bounds.
.. test:: Check the neutron energy group bounds logic fails correctly for the wrong structure.
:id: T_ARMI_EG_NE0
:tests: R_ARMI_EG_NE
"""
modifier = 1e-5
for groupStructureType in energyGroups.GROUP_STRUCTURE:
energyBounds = energyGroups.getGroupStructure(groupStructureType)
energyBounds[0] = energyBounds[0] * modifier
with self.assertRaises(ValueError):
energyGroups.getGroupStructureType(energyBounds)
def test_consistenciesBetweenGSAndGSType(self):
"""Test that the reverse lookup of the energy group structures work.
.. test:: Check the neutron energy group bounds for a given group structure.
:id: T_ARMI_EG_NE1
:tests: R_ARMI_EG_NE
"""
for groupStructureType in energyGroups.GROUP_STRUCTURE:
self.assertEqual(
groupStructureType,
energyGroups.getGroupStructureType(energyGroups.getGroupStructure(groupStructureType)),
)
def test_getFastFluxGroupCutoff(self):
"""Test ability to get the ARMI energy group index contained in energy threshold.
.. test:: Return the energy group index which contains a given energy threshold.
:id: T_ARMI_EG_FE
:tests: R_ARMI_EG_FE
"""
group, frac = energyGroups.getFastFluxGroupCutoff([100002, 100001, 100000, 99999, 0])
self.assertListEqual([group, frac], [2, 0])
================================================
FILE: armi/physics/neutronics/tests/test_macroXSGenerationInterface.py
================================================
# Copyright 2021 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MacroXSGenerationInterface tests."""
import unittest
from collections import defaultdict
from armi.nuclearDataIO import isotxs
from armi.nuclearDataIO.xsCollections import XSCollection
from armi.physics.neutronics.macroXSGenerationInterface import (
MacroXSGenerationInterface,
)
from armi.settings import Settings
from armi.testing import loadTestReactor
from armi.tests import ISOAA_PATH
class TestMacroXSGenerationInterface(unittest.TestCase):
def test_macroXSGenerationInterfaceBasics(self):
"""Test the macroscopic XS generating interfaces.
.. test:: Build macroscopic cross sections for all blocks in the reactor.
:id: T_ARMI_MACRO_XS
:tests: R_ARMI_MACRO_XS
"""
cs = Settings()
_o, r = loadTestReactor(inputFileName="smallestTestReactor/armiRunSmallest.yaml")
# Before: verify there are no macro XS on each block
for b in r.core.iterBlocks():
self.assertIsNone(b.macros)
# create the macro XS interface
i = MacroXSGenerationInterface(r, cs)
self.assertEqual(i.minimumNuclideDensity, 1e-15)
self.assertEqual(i.name, "macroXsGen")
# Mock up a nuclide library
mockLib = isotxs.readBinary(ISOAA_PATH)
mockLib.__dict__["_nuclides"] = defaultdict(
lambda: mockLib.__dict__["_nuclides"]["CAA"], mockLib.__dict__["_nuclides"]
)
# This is the meat of it: build the macro XS
self.assertIsNone(i.macrosLastBuiltAt)
i.buildMacros(mockLib, buildScatterMatrix=False)
self.assertEqual(i.macrosLastBuiltAt, 0)
# After: verify there are macro XS on each block
for b in r.core.iterBlocks():
self.assertIsNotNone(b.macros)
self.assertTrue(isinstance(b.macros, XSCollection))
================================================
FILE: armi/physics/neutronics/tests/test_neutronicsPlugin.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the neutronics plugin."""
import io
import unittest
from ruamel.yaml import YAML
from armi import getPluginManagerOrFail, settings, tests
from armi.physics import neutronics
from armi.physics.neutronics.const import CONF_CROSS_SECTION
from armi.physics.neutronics.settings import (
CONF_BOUNDARIES,
CONF_DPA_XS_SET,
CONF_GEN_XS,
CONF_GLOBAL_FLUX_ACTIVE,
CONF_GRID_PLATE_DPA_XS_SET,
CONF_GROUP_STRUCTURE,
CONF_INNERS_,
CONF_LATTICE_PHYSICS_FREQUENCY,
CONF_NEUTRONICS_KERNEL,
CONF_OUTERS_,
getNeutronicsSettingValidators,
)
from armi.settings import caseSettings, settingsValidation
from armi.settings.fwSettings.globalSettings import CONF_RUN_TYPE
from armi.tests import TEST_ROOT
from armi.tests.test_plugins import TestPlugin
from armi.utils import directoryChangers
XS_EXAMPLE = """AA:
geometry: 0D
criticalBuckling: true
blockRepresentation: Median
BA:
geometry: 1D slab
blockRepresentation: Median
"""
class TestNeutronicsPlugin(TestPlugin):
plugin = neutronics.NeutronicsPlugin
def setUp(self):
self.td = directoryChangers.TemporaryDirectoryChanger()
self.td.__enter__()
def tearDown(self):
self.td.__exit__(None, None, None)
def test_customSettingObjectIO(self):
"""Check specialized settings can build objects as values and write."""
cs = caseSettings.Settings()
yaml = YAML()
inp = yaml.load(io.StringIO(XS_EXAMPLE))
cs[CONF_CROSS_SECTION] = inp
self.assertEqual(cs[CONF_CROSS_SECTION]["AA"].geometry, "0D")
fname = "test_setting_obj_io_.yaml"
cs.writeToYamlFile(fname)
outText = open(fname, "r").read()
self.assertIn("geometry: 0D", outText)
def test_customSettingRoundTrip(self):
"""Check specialized settings can go back and forth."""
cs = caseSettings.Settings()
yaml = YAML()
inp = yaml.load(io.StringIO(XS_EXAMPLE))
cs[CONF_CROSS_SECTION] = inp
cs[CONF_CROSS_SECTION] = cs[CONF_CROSS_SECTION]
fname = "test_setting_obj_io_round.yaml"
cs.writeToYamlFile(fname)
outText = open(fname, "r").read()
self.assertIn("geometry: 0D", outText)
self.assertIn("geometry: 1D", outText)
def test_neutronicsSettingsLoaded(self):
"""Check that various special neutronics-specifics settings are loaded."""
cs = caseSettings.Settings()
self.assertIn(CONF_INNERS_, cs)
self.assertIn(CONF_OUTERS_, cs)
self.assertIn(CONF_NEUTRONICS_KERNEL, cs)
class NeutronicsReactorTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
# prepare the input files. This is important so the unit tests run from wherever
# they need to run from.
cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT)
cls.directoryChanger.open()
@classmethod
def tearDownClass(cls):
cls.directoryChanger.close()
@staticmethod
def __getModifiedSettings(customSettings):
cs = settings.Settings()
newSettings = {}
for key, val in customSettings.items():
newSettings[key] = val
return cs.modified(newSettings=newSettings)
def test_kineticsParameterAssignment(self):
"""Test that the delayed neutron fraction and precursor decay constants are applied from settings."""
r = tests.getEmptyHexReactor()
self.assertIsNone(r.core.p.beta)
self.assertIsNone(r.core.p.betaComponents)
self.assertIsNone(r.core.p.betaDecayConstants)
# Test that the group-wise beta and decay constants are assigned
# together given that they are the same length.
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={
"beta": [0.0] * 6,
"decayConstants": [1.0] * 6,
}
)
dbLoad = False
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
r.core.setOptionsFromCs(cs)
self.assertEqual(r.core.p.beta, sum(cs["beta"]))
self.assertListEqual(list(r.core.p.betaComponents), cs["beta"])
self.assertListEqual(list(r.core.p.betaDecayConstants), cs["decayConstants"])
# Test the assignment of total beta as a float
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={"beta": 0.00670},
)
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
self.assertEqual(r.core.p.beta, cs["beta"])
self.assertIsNone(r.core.p.betaComponents)
self.assertIsNone(r.core.p.betaDecayConstants)
# Test that nothing is assigned if the beta is specified as a list
# without a corresponding decay constants list.
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={
"beta": [0.0] * 6,
},
)
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
self.assertIsNone(r.core.p.beta)
self.assertIsNone(r.core.p.betaComponents)
self.assertIsNone(r.core.p.betaDecayConstants)
# Test that 1 group beta components and decay constants can be assigned.
# Since beta is a list, ensure that it's assigned to the `betaComponents`
# parameter.
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={"beta": [0.0], "decayConstants": [1.0]},
)
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
self.assertEqual(r.core.p.beta, sum(cs["beta"]))
self.assertListEqual(list(r.core.p.betaComponents), cs["beta"])
self.assertListEqual(list(r.core.p.betaDecayConstants), cs["decayConstants"])
# Test that decay constants are not assigned without a corresponding
# group-wise beta input.
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={"decayConstants": [1.0] * 6},
)
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
self.assertIsNone(r.core.p.beta)
self.assertIsNone(r.core.p.betaComponents)
self.assertIsNone(r.core.p.betaDecayConstants)
# Test that decay constants are not assigned without a corresponding
# group-wise beta input. This also demonstrates that the total beta
# is still assigned.
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={"decayConstants": [1.0] * 6, "beta": 0.0},
)
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
self.assertEqual(r.core.p.beta, cs["beta"])
self.assertIsNone(r.core.p.betaComponents)
self.assertIsNone(r.core.p.betaDecayConstants)
# Test the demonstrates that None values are acceptable
# and that nothing is assigned.
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={"decayConstants": None, "beta": None},
)
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
self.assertEqual(r.core.p.beta, cs["beta"])
self.assertIsNone(r.core.p.betaComponents)
self.assertIsNone(r.core.p.betaDecayConstants)
# Test that an error is raised if the decay constants
# and group-wise beta are inconsistent sizes
with self.assertRaises(ValueError):
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={"decayConstants": [1.0] * 6, "beta": [0.0]},
)
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
# Test that an error is raised if the decay constants
# and group-wise beta are inconsistent sizes
with self.assertRaises(ValueError):
r = tests.getEmptyHexReactor()
cs = self.__getModifiedSettings(
customSettings={"decayConstants": [1.0] * 6, "beta": [0.0] * 5},
)
getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)
@staticmethod
def __autoCorrectAllQueries(settingsValidator):
"""Force-Correct (resolve() to "YES") all queries in a Settings Validator."""
for query in settingsValidator:
try:
query.correction()
except FileNotFoundError:
# to make testing easier, let's ignore settings that require input files
pass
def test_neutronicsSettingsValidators(self):
# grab the neutronics validators
cs = settings.Settings()
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.assertEqual(len(sv), 8)
# Test the Query: boundaries are now "Extrapolated", not "Generalized"
cs = cs.modified(newSettings={CONF_BOUNDARIES: "Generalized"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_BOUNDARIES], "Extrapolated")
# Test the Query: genXS are no longer True/False
cs = cs.modified(newSettings={CONF_GEN_XS: "True"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_GEN_XS], "Neutron")
cs = cs.modified(newSettings={CONF_GEN_XS: "False"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_GEN_XS], "")
# Test the Query: CONF_GLOBAL_FLUX_ACTIVE are no longer True/False
cs = cs.modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: "True"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_GLOBAL_FLUX_ACTIVE], "Neutron")
cs = cs.modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: "False"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_GLOBAL_FLUX_ACTIVE], "")
# Test the Query: try to migrate the Group Structure name
cs = cs.modified(newSettings={CONF_GROUP_STRUCTURE: "armi45"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_GROUP_STRUCTURE], "ARMI45")
cs = cs.modified(newSettings={CONF_GROUP_STRUCTURE: "bad_value"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_GROUP_STRUCTURE], "ANL33")
# Test the Query: migrating some common shortened names for dpa XS sets
cs = cs.modified(newSettings={CONF_DPA_XS_SET: "dpaHT9_33"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_DPA_XS_SET], "dpaHT9_ANL33_TwrBol")
cs = cs.modified(newSettings={CONF_GRID_PLATE_DPA_XS_SET: "dpa_SS316"})
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_GRID_PLATE_DPA_XS_SET], "dpaSS316_ANL33_TwrBol")
cs = cs.modified(
newSettings={
CONF_RUN_TYPE: "Snapshots",
CONF_LATTICE_PHYSICS_FREQUENCY: "BOC",
}
)
inspector = settingsValidation.Inspector(cs)
sv = getNeutronicsSettingValidators(inspector)
self.__autoCorrectAllQueries(sv)
self.assertEqual(inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY], "firstCoupledIteration")
================================================
FILE: armi/physics/neutronics/tests/test_neutronicsSymmetry.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Audit symmetry-aware parameters in neutronics.
See Also
--------
armi.testing.symmetryTesting
"""
from armi.physics.neutronics.parameters import getNeutronicsParameterDefinitions
from armi.reactor.blocks import Block
from armi.reactor.cores import Core
from armi.testing import symmetryTesting
class TestNeutronicsParamSym(symmetryTesting.BasicArmiSymmetryTestHelper):
def setUp(self):
pluginParameters = getNeutronicsParameterDefinitions()
self.coreParamsToTest = pluginParameters[Core]
self.blockParamsToTest = pluginParameters[Block]
self.expectedSymmetricBlockParams = [
"mgFlux",
"adjMgFlux",
"lastMgFlux",
"mgFluxGamma",
"reactionRates",
"power",
"powerGamma",
"powerNeutron",
"powerGenerated",
]
super().setUp()
================================================
FILE: armi/physics/safety/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Safety package for generic safety-related code."""
from armi import plugins
class SafetyPlugin(plugins.ArmiPlugin):
@staticmethod
@plugins.HOOKIMPL
def defineSettings():
"""Define settings for the plugin."""
return []
================================================
FILE: armi/physics/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/physics/tests/test_executers.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides tests for the generic Executers."""
import os
import subprocess
import unittest
from armi.physics import executers
from armi.reactor import geometry
from armi.utils import directoryChangers
class MockParams:
def __init__(self):
self.cycle = 1
self.timeNode = 2
class MockCore:
def __init__(self):
# just pick a random geomType
self.geomType = geometry.GeomType.CARTESIAN
self.symmetry = "full"
self.p = MockParams()
class MockReactor:
def __init__(self):
self.core = MockCore()
self.o = None
self.p = MockParams()
class TestExecutionOptions(unittest.TestCase):
def test_runningDirectoryPath(self):
"""
Test that the running directory path is set up correctly
based on the case title and label provided.
"""
e = executers.ExecutionOptions(label=None)
e.setRunDirFromCaseTitle(caseTitle="test")
self.assertEqual(os.path.basename(e.runDir), "508bc04f-0")
e = executers.ExecutionOptions(label="label")
e.setRunDirFromCaseTitle(caseTitle="test")
self.assertEqual(os.path.basename(e.runDir), "b07da087-0")
e = executers.ExecutionOptions(label="label2")
e.setRunDirFromCaseTitle(caseTitle="test")
self.assertEqual(os.path.basename(e.runDir), "9c1c83cb-0")
class TestExecuters(unittest.TestCase):
def setUp(self):
e = executers.ExecutionOptions(label=None)
self.executer = executers.DefaultExecuter(e, MockReactor())
def test_collectInputsAndOutputs(self):
"""Verify that the executer can select to not copy back output."""
self.executer.options.inputFile = "test.inp"
self.executer.options.outputFile = "test.out"
self.executer.options.copyOutput = False
inputs, outputs = self.executer._collectInputsAndOutputs()
self.assertEqual("test.inp", inputs[0], "Input file was not successfully identified.")
self.assertTrue(outputs == [], "Outputs were returned erroneously!")
self.executer.options.copyOutput = True
inputs, outputs = self.executer._collectInputsAndOutputs()
self.assertEqual("test.inp", inputs[0], "Input file was not successfully identified.")
self.assertEqual("test.out", outputs[0], "Output file was not successfully identified.")
def test_updateRunDir(self):
"""
Verify that runDir is updated when TemporaryDirectoryChanger is used and
not updated when ForcedCreationDirectoryChanger is used.
"""
self.assertEqual(self.executer.dcType, directoryChangers.TemporaryDirectoryChanger)
self.executer._updateRunDir("updatedRunDir")
self.assertEqual(self.executer.options.runDir, "updatedRunDir")
# change directoryChanger type, runDir not updated
self.executer.options.runDir = "runDir"
self.executer.dcType = directoryChangers.ForcedCreationDirectoryChanger
self.executer._updateRunDir("notThisString")
self.assertEqual(self.executer.options.runDir, "runDir")
def test_runExternalExecutable(self):
"""Run an external executable with an Executer.
.. test:: Run an external executable with an Executer.
:id: T_ARMI_EX
:tests: R_ARMI_EX
"""
filePath = "test_runExternalExecutable.py"
outFile = "tmp.txt"
label = "printExtraStuff"
class MockExecutionOptions(executers.ExecutionOptions):
pass
class MockExecuter(executers.Executer):
def run(self, args):
if self.options.label == label:
subprocess.run(["python", filePath, "extra stuff"])
else:
subprocess.run(["python", filePath, args])
with directoryChangers.TemporaryDirectoryChanger():
# build a mock external program (a little Python script)
self.__makeALittleTestProgram(filePath, outFile)
# make sure the output file doesn't exist yet
self.assertFalse(os.path.exists(outFile))
# set up an executer for our little test program
opts = MockExecutionOptions()
exe = MockExecuter(opts, None)
exe.run("")
# make sure the output file exists now
self.assertTrue(os.path.exists(outFile))
# run the executer with options
testString = "some options"
exe.run(testString)
# make sure the output file exists now
self.assertTrue(os.path.exists(outFile))
newTxt = open(outFile, "r").read()
self.assertIn(testString, newTxt)
# now prove the options object can affect the execution
exe.options.label = label
exe.run("")
newerTxt = open(outFile, "r").read()
self.assertIn("extra stuff", newerTxt)
@staticmethod
def __makeALittleTestProgram(filePath, outFile):
"""Helper method to write a tiny Python script.
We need "an external program" for testing.
"""
txt = f"""import sys
def main():
with open("{outFile}", "w") as f:
f.write(str(sys.argv))
if __name__ == "__main__":
main()
"""
with open(filePath, "w") as f:
f.write(txt)
================================================
FILE: armi/physics/thermalHydraulics/__init__.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Thermal Hydraulics package."""
from armi.physics.thermalHydraulics.plugin import ThermalHydraulicsPlugin # noqa: F401
================================================
FILE: armi/physics/thermalHydraulics/const.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This couldn't be packaged with the thermalHydraulics plugin because it
# ends up getting imported by the ARMI framework before plugins get imported.
ORIFICE_SETTING_ZONE_MAP = "zone map"
================================================
FILE: armi/physics/thermalHydraulics/parameters.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameter definitions for thermal hydraulic plugins."""
from armi.reactor import parameters
from armi.reactor.assemblies import Assembly
from armi.reactor.blocks import Block
from armi.reactor.parameters import ParamLocation
from armi.utils import units
def getParameterDefinitions():
"""Return ParameterDefinitionCollections for each appropriate ArmiObject."""
return {Assembly: _getAssemblyParams(), Block: _getBlockParams()}
def _getAssemblyParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(default=0.0, categories=["thermal hydraulics"]) as pb:
pb.defParam(
"THmassFlowRate",
units=f"{units.KG}/{units.SECONDS}",
description="The nominal assembly flow rate",
categories=["broadcast"],
location=ParamLocation.VOLUME_INTEGRATED,
)
pb.defParam(
"THcoolantInletT",
units=units.DEGC,
description="Assembly inlet temperature in C (cold temperature)",
)
with pDefs.createBuilder(
default=0.0,
location=ParamLocation.AVERAGE,
saveToDB=True,
categories=["thermal hydraulics"],
) as pb:
pb.defParam(
"THdeltaPTotal",
units=units.PASCALS,
description="Total pressure difference across the assembly",
categories=["broadcast"],
)
return pDefs
def _getBlockParams():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(default=0.0, categories=["thInterface"], saveToDB=True) as pb:
pb.defParam(
"THcoolantOutletT",
units=units.DEGC,
description="Coolant temperature at the outlet of this block",
location=ParamLocation.TOP,
)
pb.defParam(
"THmassFlowRate",
units=f"{units.KG}/{units.SECONDS}",
description="Mass flow rate",
location=ParamLocation.VOLUME_INTEGRATED,
)
pb.defParam(
"THcoolantInletT",
units=units.DEGC,
description="The nominal average bulk coolant inlet temperature into the block.",
location=ParamLocation.BOTTOM,
)
pb.defParam(
"THdeltaPTotal",
units=units.PASCALS,
description="Total pressure difference in a block",
location=ParamLocation.AVERAGE,
)
with pDefs.createBuilder(default=None, categories=["thermal hydraulics", "mongoose"], saveToDB=True) as pb:
pb.defParam(
"THcornTemp",
units=units.DEGC,
description="Mid-wall duct temperature for assembly corners",
location=ParamLocation.BOTTOM | ParamLocation.CORNERS,
)
pb.defParam(
"THedgeTemp",
units=units.DEGC,
description="Mid-wall duct temperature for assembly edges",
location=ParamLocation.BOTTOM | ParamLocation.EDGES,
)
return pDefs
================================================
FILE: armi/physics/thermalHydraulics/plugin.py
================================================
# Copyright 2020 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic Thermal/Hydraulics Plugin.
Thermal/hydraulics is concerned with temperatures, flows, pressures, and heat transfer.
"""
from armi import interfaces, plugins
ORDER = interfaces.STACK_ORDER.THERMAL_HYDRAULICS
class ThermalHydraulicsPlugin(plugins.ArmiPlugin):
"""Plugin for thermal/hydraulics."""
@staticmethod
@plugins.HOOKIMPL
def defineParameters():
"""Define additional parameters for the reactor data model."""
from armi.physics.thermalHydraulics import parameters
return parameters.getParameterDefinitions()
================================================
FILE: armi/physics/thermalHydraulics/tests/__init__.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/physics/thermalHydraulics/tests/test_thermalHydraulicsSymmetry.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Audit symmetry-aware parameters in thermal hydraulics.
See Also
--------
armi.testing.symmetryTesting
"""
from armi.physics.thermalHydraulics.parameters import getParameterDefinitions
from armi.reactor.blocks import Block
from armi.testing import symmetryTesting
class TestTHParamSymmetry(symmetryTesting.BasicArmiSymmetryTestHelper):
def setUp(self):
pluginParameters = getParameterDefinitions()
self.blockParamsToTest = pluginParameters[Block]
self.expectedSymmetricBlockParams = ["THmassFlowRate"]
super().setUp()
================================================
FILE: armi/pluginManager.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Slightly customized version of the stock pluggy ``PluginManager``."""
import pluggy
class ArmiPluginManager(pluggy.PluginManager):
"""
PluginManager implementation with ARMI-specific features.
The main point of this subclass is to make it possible to detect when the plugin
manager has been mutated, allowing for safe caching of expensive results derived
from the set of registered plugins. This is done by exposing a counter that is
incremented any time the set of registered plugins is modified. If a client caches
any results derived from calling plugin hooks, caching this counter along with that
data allows for cheaply testing that the cached results are still valid.
"""
def __init__(self, *args, **kwargs):
pluggy.PluginManager.__init__(self, *args, **kwargs)
self._counter = 0
@property
def counter(self):
return self._counter
def register(self, *args, **kwargs):
self._counter += 1
pluggy.PluginManager.register(self, *args, **kwargs)
def unregister(self, *args, **kwargs):
pluggy.PluginManager.unregister(self, *args, **kwargs)
================================================
FILE: armi/plugins.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Plugins allow various built-in or external functionality to be brought into the ARMI ecosystem.
This module defines the hooks that may be defined within plugins. Plugins are ultimately incorporated into a
:py:class:`armi.pluginManager.ArmiPluginManager`, which live inside of a :py:class:`armi.apps.App` object.
The ``ArmiPluginManager`` is derived from the ``PluginManager`` class provided by the ``pluggy`` package, which provides
a registry of known plugins. Rather than create one directly, we use the :py:func:`armi.plugins.getNewPluginManager()`
function, which handles some of the setup for us.
From a high-altitude perspective, the plugins provide numerous "hooks", which allow for ARMI to be extended in various
ways. Some of these extensions are subtle and play a part in how certain ARMI components are initialized or defined. As
such, it is necessary to register most plugins before some parts of ARMI are imported or exercised in a meaningful way.
These requirements are in flux, and will ultimately constitute part of the specification of the ARMI plugin
architecture. For now, to be safe, plugins should be registered as soon as possible.
After forming the ``PluginManager``, the plugin hooks can be accessed through the ``hook`` attribute. E.g.::
>>> armi.getPluginManagerOrFail().hook.exposeInterfaces(cs=cs)
Don't forget to use the keyword argument form for all arguments to hooks; ``pluggy`` requires them to enforce hook
specifications.
The :py:class:`armi.apps.App` class serves as the primary storage location of the PluginManager, and also provides some
methods to get data out of the plugins more ergonomically than through the hooks themselves.
Some things you may want to bring in via a plugin includes:
- :py:mod:`armi.settings` and their validators
- :py:mod:`armi.reactor.components` for custom geometry
- :py:mod:`armi.reactor.flags` for custom reactor components
- :py:mod:`armi.interfaces` to define new calculation sequences and interactions with new codes
- :py:mod:`armi.reactor.parameters` to represent new physical state on the reactor
- :py:mod:`armi.materials` for custom materials
- Elements of the :py:mod:`armi.gui`
- :py:mod:`armi.operators` for adding new operations on reactor models
- :py:mod:`armi.cli` for adding new operations on input files
Warning
-------
The plugin system was developed to support improved collaboration. It is new and should be considered under
development. The API is subject to change as the version of the ARMI framework approaches 1.0.
Notes
-----
Due to the nature of some of these components, there are a couple of restrictions on the order in which things can be
imported (lest we endeavor to redesign them considerably). Examples:
- Parameters: All parameter definitions must be present before any ``ArmiObject`` objects are instantiated. This is
mostly by choice, but also makes the most sense, because the ``ParameterCollection`` s are instance attributes of an
``ArmiObject``, which in turn use ``Parameter`` objects as *class* attributes. We should know what class attributes
we have before making instances.
- Blueprints: Since blueprints should be extendable with new sections, we must also be able to provide new *class*
attributes to extend their behavior. This is because blueprints use the yamlize package, which uses class attributes
to define much of the class's behavior through metaclassing. Therefore, we need to be able to import all plugins
*before* importing blueprints.
Plugins are currently stateless. They do not have ``__init__()`` methods, and when they are registered with the
PluginMagager, the PluginManager gets the Plugin's class object rather than an instance of that class. Also notice that
all of the hooks are ``@staticmethod``\ s. As a result, they can be called directly off of the class object, and only
have access to the state passed into them to perform their function. This is a deliberate design choice to keep the
plugin system simple and to preclude a large class of potential bugs. At some point it may make sense to revisit this.
**Other customization points**
While the Plugin API is the main place for ARMI framework customization, there are several other areas where ARMI may be
extended or customized. These typically pre-dated the Plugin-based architecture, and as the need arise may be migrated
to here.
- Component types: Component types are registered dynamically through some metaclass magic, found in
:py:class:`armi.reactor.components.component.ComponentType` and
:py:class:`armi.reactor.composites.CompositeModelType`. Simply defining a new Component subclass should register
it with the appropriate ARMI systems. While this is convenient, it does lead to potential issues, as the behavior
of ARMI becomes sensitive to module import order and the like; the containing module needs to be imported before
the registration occurs, which can be surprising.
- Interface input files: Interfaces used to be discovered dynamically, rather than explicitly as they are now in the
:py:meth:`armi.plugins.ArmiPlugin.exposeInterfaces` plugin hook. Essentially they functioned as ersatz plugins.
One of the ways that they would customize ARMI behavior is through the
:py:meth:`armi.physics.interface.Interface.specifyInputs` static method, which is still used to determine inter-
Case dependencies and support cloning and hashing Case inputs. Going forward, this approach will likely be
deprecated in favor of a plugin hook.
- Fuel handler logic: The :py:class:`armi.physics.fuelCycle.fuelHandlers.FuelHandlerInterface` supports
customization through the dynamic loading of fuel handler logic modules, based on user settings. This also
predated the plugin infrastructure, and may one day be replaced with plugin-based fuel handler logic.
"""
from typing import TYPE_CHECKING, Callable, Dict, List, Union
import pluggy
from armi import pluginManager
from armi.utils import flags
if TYPE_CHECKING:
from armi.reactor.composites import Composite
from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger
HOOKSPEC = pluggy.HookspecMarker("armi")
HOOKIMPL = pluggy.HookimplMarker("armi")
class ArmiPlugin:
"""
An ArmiPlugin exposes a collection of hooks that allow users to add a variety of things to their ARMI application:
Interfaces, parameters, settings, flags, and much more.
.. impl:: Plugins add code to the application through interfaces.
:id: I_ARMI_PLUGIN
:implements: R_ARMI_PLUGIN
Each plugin has the option of implementing the ``exposeInterfaces`` method, and this will be used as a plugin
hook to add one or more Interfaces to the ARMI Application. Interfaces can wrap external executables with
nuclear modeling codes in them, or directly implement their logic in Python. But because Interfaces are Python
code, they have direct access to read and write from ARMI's reactor data model. This Plugin to multiple
Interfaces to reactor data model connection is the primary way that developers add code to an ARMI application
and simulation.
"""
@staticmethod
@HOOKSPEC
def exposeInterfaces(cs) -> List:
"""
Function for exposing interface(s) to other code.
.. impl:: Plugins can add interfaces to the operator.
:id: I_ARMI_PLUGIN_INTERFACES
:implements: R_ARMI_PLUGIN_INTERFACES
This method takes in a Settings object and returns a list of Interfaces, the position of each Interface in
the Interface stack, and a list of arguments to pass to the Interface when initializing it later. These
Interfaces can then be used to add code to a simulation.
Returns
-------
list
Tuples containing:
- The insertion order to use when building an interface stack,
- an implementation of the Interface class
- a dictionary of kwargs to pass to an Operator when adding an instance of the interface class
If no Interfaces should be active given the passed case settings, this should return an empty list.
"""
@staticmethod
@HOOKSPEC
def defineParameters() -> Dict:
"""
Define additional parameters for the reactor data model.
.. impl:: Plugins can add parameters to the reactor data model.
:id: I_ARMI_PLUGIN_PARAMS
:implements: R_ARMI_PLUGIN_PARAMS
Through this method, plugin developers can create new Parameters. A parameter can represent any physical
property an analyst might want to track. And they can be added at any level of the reactor data model.
Through this, the developers can extend ARMI and what physical properties of the reactor they want to
calculate, track, and store to the database.
.. impl:: Define an arbitrary physical parameter.
:id: I_ARMI_PARAM0
:implements: R_ARMI_PARAM
Through this method, plugin developers can create new Parameters. A parameter can represent any physical
property an analyst might want to track. For example, through this method, a plugin developer can add a new
thermodynamic property that adds a thermodynamic parameter to every block in the reactor. Or they could add
a neutronics parameter to every fuel assembly. A parameter is quite generic. But these parameters will be
tracked in the reactor data model, extend what developers can do with ARMI, and will be saved to the output
database.
Returns
-------
dict
Keys should be subclasses of ArmiObject, values being a ParameterDefinitionCollection should be added to the
key's parameter definitions.
Example
-------
>>> pluginBlockParams = parameters.ParameterDefinitionCollection()
>>> with pluginBlockParams.createBuilder() as pb:
... pb.defParam("plugBlkP1", ...)
... # ...
>>> pluginAssemParams = parameters.ParameterDefinitionCollection()
>>> with pluginAssemParams.createBuilder() as pb:
... pb.defParam("plugAsmP1", ...)
... # ...
>>> return {blocks.Block: pluginBlockParams, assemblies.Assembly: pluginAssemParams}
"""
@staticmethod
@HOOKSPEC
def afterConstructionOfAssemblies(assemblies, cs) -> None:
"""
Function to call after a set of assemblies are constructed.
This hook can be used to:
- Verify that all assemblies satisfy constraints imposed by active interfaces and plugins
- Apply modifications to Assemblies based on modeling options and active interfaces
Implementers may alter the state of the passed Assembly objects.
Returns
-------
None
"""
@staticmethod
@HOOKSPEC
def onProcessCoreLoading(core, cs, dbLoad) -> None:
"""
Function to call whenever a Core object is newly built.
This is usually used to set initial parameter values from inputs, either after constructing a Core from
Blueprints, or after loading it from a database.
"""
@staticmethod
@HOOKSPEC
def beforeReactorConstruction(cs) -> None:
"""Function to call before the reactor is constructed."""
@staticmethod
@HOOKSPEC
def defineFlags() -> Dict[str, Union[int, flags.auto]]:
"""
Add new flags to the reactor data model, and the simulation.
.. impl:: Plugins can define new, unique flags to the system.
:id: I_ARMI_FLAG_EXTEND1
:implements: R_ARMI_FLAG_EXTEND
This method allows a plugin developers to provide novel values for the Flags system. This method returns a
dictionary mapping flag names to their desired numerical values. In most cases, no specific value is needed,
one can be automatically generated using :py:class:`armi.utils.flags.auto`. (For more information, see
:py:mod:`armi.reactor.flags`.)
See Also
--------
armi.reactor.flags
Example
-------
>>> def defineFlags():
... return {"FANCY": armi.utils.flags.auto()}
"""
@staticmethod
@HOOKSPEC
def defineBlockTypes() -> List:
"""
Function for providing novel Block types from a plugin.
This should return a list of tuples containing ``(compType, blockType)``, where ``blockType`` is a new ``Block``
subclass to register, and ``compType`` is the corresponding ``Component`` type that should activate it. For
instance a ``HexBlock`` would be created when the largest component is a ``Hexagon``::
[(Hexagon, HexBlock)]
Returns
-------
list
``[(compType, BlockType), ...]``
"""
@staticmethod
@HOOKSPEC
def defineAssemblyTypes() -> List:
"""
Function for providing novel Assembly types from a plugin.
This should return a list of tuples containing ``(blockType, assemType)``, where ``assemType`` is a new
``Assembly`` subclass to register, and ``blockType`` is the corresponding ``Block`` subclass that, if present in
the assembly, should trigger it to be of the corresponding ``assemType``.
Warning
-------
There is no guarantee that you will find subclassing ``Assembly`` useful.
Example
-------
.. code::
[
(HexBlock, HexAssembly),
(CartesianBlock, CartesianAssembly),
(ThRZBlock, ThRZAssembly),
]
Returns
-------
list
List of new Block&Assembly types
"""
@staticmethod
@HOOKSPEC
def defineBlueprintsSections() -> List:
"""
Return new sections for the blueprints input method.
This hook allows plugins to extend the blueprints functionality with their own sections.
Returns
-------
list
(name, section, resolutionMethod) tuples, where:
- name : The name of the attribute to add to the Blueprints class; this should be a valid Python identifier.
- section : An instance of ``yaml.Attribute`` defining the data that is described by the Blueprints section.
- resolutionMethod : A callable that takes a Blueprints object and case settings as arguments. This will be
called like an unbound instance method on the passed Blueprints object to initialize the state of the new
Blueprints section.
Notes
-----
Most of the sections that a plugin would want to add may be better served as settings, rather than blueprints
sections. These sections were added to the blueprints mainly because the schema is more flexible, allowing
namespaces and hierarchical collections of settings. Perhaps in the near future it would make sense to enhance
the settings system to support these features, moving the blueprints extensions out into settings. This is
discussed in more detail in T1671.
"""
@staticmethod
@HOOKSPEC
def defineEntryPoints() -> List:
"""
Return new entry points for the ARMI CLI.
This hook allows plugins to provide their own ARMI entry points, which each serve as a command in the command-
line interface.
Returns
-------
list
class objects which derive from the base EntryPoint class.
"""
@staticmethod
@HOOKSPEC
def defineSettings() -> List:
"""
Define configuration settings for this plugin.
.. impl:: Plugins can add settings to the run.
:id: I_ARMI_PLUGIN_SETTINGS
:implements: R_ARMI_PLUGIN_SETTINGS
This hook allows plugin developers to provide their own configuration settings, which can participate in the
:py:class:`armi.settings.caseSettings.Settings`. Plugins may provide entirely new settings to what are
already provided by ARMI, as well as new options or default values for existing settings. For instance, the
framework provides a ``neutronicsKernel`` setting for selecting which global physics solver to use. Since we
wish to enforce that the user specify a valid kernel, the settings validator will check to make sure
that the user's requested kernel is among the available options. If a plugin were to provide a new
neutronics kernel (let's say MCNP), it should also define a new option to tell the settings system that
``"MCNP"`` is a valid option.
Returns
-------
list
A list of Settings, Options, or Defaults to be registered.
See Also
--------
armi.physics.neutronics.NeutronicsPlugin.defineSettings
armi.settings.setting.Setting
armi.settings.setting.Option
armi.settings.setting.Default
"""
return []
@staticmethod
@HOOKSPEC
def defineSettingsValidators(inspector) -> List:
"""
Define the high-level settings input validators by adding them to an inspector.
Parameters
----------
inspector : :py:class:`armi.settings.settingsValidation.Inspector` instance
The inspector to add queries to. See note below, this is not ideal.
Notes
-----
These are higher-level than the input-level SCHEMA defined in :py:meth:`defineSettings` and are intended to be
used for more complex cross-plugin info.
We would prefer to not manipulate objects passed in directly, but rather have the inspection happen in a
measurable hook. This would help find misbehaving plugins.
See Also
--------
armi.settings.settingsValidation.Inspector : Runs the queries
Returns
-------
list
Query objects to attach
"""
return []
@staticmethod
@HOOKSPEC
def defineCaseDependencies(case, suite):
r"""
Function for defining case dependencies.
Some Cases depend on the results of other ``Case``\ s in the same ``CaseSuite``. Which dependencies exist, and
how they are discovered depends entirely on the type of analysis and active interfaces, etc. This function
allows a plugin to inspect settings and declare dependencies between the passed ``case`` and any other cases in
the passed ``suite``.
Parameters
----------
case : Case
The specific case for which we want to find dependencies.
suite : CaseSuite
A CaseSuite object to which the Case and other potential dependencies belong.
Returns
-------
dependencies : set of Cases
This should return a set containing ``Case`` objects that are considered dependencies of the passed
``case``. They should be members of the passed ``suite``.
"""
@staticmethod
@HOOKSPEC
def defineGuiWidgets() -> List:
"""
Define which settings should go in the GUI.
Rather than making widgets here, this simply returns metadata as a nested dictionary saying which tab to put
which settings on, and a little bit about how to group them.
Returns
-------
widgetData : list of dict
Each dict is nested. First level contains the tab name (e.g. 'Global Flux'). Second level contains a box
name. Third level contains help and a list of setting names
See Also
--------
armi.gui.submitter.layout.abstractTab.AbstractTab.addSectionsFromPlugin : uses data structure
Example
-------
>>> widgets = {
... 'Global Flux': {
... 'MCNP Solver Settings': {
... 'help': "Help message"
... 'settings': [
... "mcnpAddTallies",
... "useSrctp",
... ]
... }
... }
... }
"""
@staticmethod
@HOOKSPEC
def getOperatorClassFromRunType(runType: str):
"""Return an Operator subclass if the runType is recognized by this plugin."""
@staticmethod
@HOOKSPEC
def defineParameterRenames() -> Dict:
"""
Return a mapping from old parameter names to new parameter names.
Occasionally, it may become necessary to alter the name of an existing parameter. This can lead to frustration
when attempting to load from old database files that use the previous name. This hook allows a plugin to define
mappings from the old name to the new name, allowing the old database to be read in and translated to the new
parameter name.
The following rules are followed when applying these renames:
* When state is loaded from a database, if the parameter name in the database file is found in the rename
dictionary, it will be mapped to that renamed parameter.
* If the renamed parameter is found in the renames, then it will be mapped again to new parameter name. This
process is repeated until there are no more renames left. This allows for parameters to be renamed multiple
times, and for a database from several generations prior to still be readable, so long as the history of
renames is intact.
* If at the end of the above process, the parameter name is not a defined parameter for the appropriate
``ArmiObject`` type, an exception is raised.
* If any of the ``renames`` keys match any currently-defined parameters, an exception is raised.
* If any of the ``renames`` collide with another plugin's ``renames``, an exception is raised.
Returns
-------
renames : dict
Keys should be an old parameter name, where the corresponding values are the new parameter name.
Example
-------
The following would allow databases with values for either ``superOldParam`` or ``oldParam`` to be read into
``currentParam``::
return {"superOldParam": "oldParam", "oldParam": "currentParam"}
"""
@staticmethod
@HOOKSPEC
def mpiActionRequiresReset(cmd) -> bool:
"""
Flag indicating when a reactor reset is required.
Commands are sent through operators either as strings (old) or as MpiActions (newer). After some are sent, the
reactor must be reset. This hook says when to reset. The reset operation is a (arguably suboptimal) response to
some memory issues in very large and long-running cases.
Parameters
----------
cmd : str or MpiAction
The ARMI mpi command being sent
Returns
-------
bool
See Also
--------
armi.operators.operatorMPI.OperatorMPI.workerOperate : Handles these flags
"""
@staticmethod
@HOOKSPEC
def getReportContents(r, cs, report, stage, blueprint) -> None:
"""
To generate a report.
Parameters
----------
r : Reactor
cs : Settings
report : ReportContent
Report object to add contents to
stage : ReportStage
begin/standard/or end (stage of the report for when inserting BOL vs. EOL content)
blueprint : Blueprint, optional
for a reactor (if None, only partial contents created)
"""
@staticmethod
@HOOKSPEC
def defineSystemBuilders() -> Dict[str, Callable[[str], "Composite"]]:
"""
Convert a user-string from the systems section into a valid composite builder.
Parameters
----------
name : str
Name of the system type defined by the user, e.g., ``"core"``
Returns
-------
dict
Dictionary that maps a grid type from the input file (e.g., ``"core"``)
to a function responsible for building a grid of that type, e.g.,
.. code::
{
"core": armi.reactor.reactors.Core,
"excore": armi.reactor.excoreStructure.ExcoreStructure,
"sfp": armi.reactor.spentFuelPool.SpentFuelPool,
}
Notes
-----
The default :class:`~armi.reactor.ReactorPlugin` defines a ``"core"`` lookup and a ``"sfp"`` lookup, triggered
to run after all other hooks have been run.
"""
@staticmethod
@HOOKSPEC(firstresult=True)
def getAxialExpansionChanger() -> type["AxialExpansionChanger"]:
"""Produce the class responsible for performing axial expansion.
Plugins can provide this hook to override or negate axial expansion. Will be used during initial construction of
the core and assemblies, and can be a class to perform custom axial expansion routines.
The first object returned that is not ``None`` will be used. Plugins are encouraged to add the ``tryfirst=True``
arguments to their ``HOOKIMPL`` invocations to make sure their specific are earlier in the hook call sequence.
Returns
-------
type of :class:`armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger`
Notes
-----
This hook **should not** provide an instance of the class. The construction of the changer will be handled by
applications and plugins that need it.
This hook should only be provided by one additional plugin in your application. Otherwise the `order of hook
execution `_ may not provide the behavior
you expect.
Examples
--------
>>> class MyPlugin(ArmiPlugin):
... @staticmethod
... @HOOKIMPL(tryfirst=True)
... def getAxialExpansionChanger():
... from myproject.physics import BespokeAxialExpansion
...
... return BespokeAxialExpansion
"""
class UserPlugin(ArmiPlugin):
"""
A variation on the ArmiPlugin meant to be created at runtime, from the ``userPlugins`` setting.
This is obviously a more limited use-case than the usual ArmiPlugin, as those are meant to be defined at import
time, instead of run time. As such, this class has some built-in tooling to limit how these run-time plugins are
used. They are meant to be more limited.
Notes
-----
The usual ArmiPlugin is much more flexible, if the UserPlugin does not support what you want to do, just use an
ArmiPlugin.
"""
def __init__(self, *args, **kwargs):
ArmiPlugin.__init__(self, *args, **kwargs)
self.__enforceLimitations()
def __enforceLimitations(self):
"""
This method enforces that UserPlugins are more limited than regular ArmiPlugins.
UserPlugins are different from regular plugins in that they can be defined during a run, and as such, we want to
limit how flexible they are, so we can correctly corral their side effects during a run.
"""
if issubclass(self.__class__, UserPlugin):
assert len(self.__class__.defineParameters()) == 0, (
"UserPlugins cannot define parameters, consider using an ArmiPlugin."
)
assert len(self.__class__.defineParameterRenames()) == 0, (
"UserPlugins cannot define parameter renames, consider using an ArmiPlugin."
)
assert len(self.__class__.defineSettings()) == 0, (
"UserPlugins cannot define new Settings, consider using an ArmiPlugin."
)
# NOTE: These are the methods that we are staunchly _not_ allowing people to change in this class. If you
# need these, please use a regular ArmiPlugin.
self.defineParameterRenames = lambda: {}
self.defineSettings = lambda: []
self.defineSettingsValidators = lambda: []
@staticmethod
@HOOKSPEC
def defineParameters():
"""
Prevents defining additional parameters.
.. warning:: This is not overridable.
Notes
-----
It is a designed limitation of user plugins that they not define parameters. Parameters are defined when the
App() is read in, which is LONG before the settings file has been read. So the parameters are defined before we
discover the user plugin. If this is a feature you need, just use an ArmiPlugin.
"""
return {}
@staticmethod
@HOOKSPEC
def defineParameterRenames():
"""
Prevents parameter renames.
Warning
-------
This is not overridable.
Notes
-----
It is a designed limitation of user plugins that they not generate parameter renames, Parameters are defined
when the App() is read in, which is LONG before the settings file has been read. So the parameters are defined
before we discover the user plugin. If this is a feature you need, just use a normal Plugin.
"""
return {}
@staticmethod
@HOOKSPEC
def defineSettings():
"""
Prevents new settings.
Warning
-------
This is not overridable.
Notes
-----
It is a designed limitation of user plugins that they not define new settings, so that they are able to be added
to the plugin stack during run time.
"""
return []
@staticmethod
@HOOKSPEC
def defineSettingsValidators(inspector):
"""
Prevents new settings validators.
.. warning:: This is not overridable.
Notes
-----
It is a designed limitation of user plugins that they not define new settings, so that they are able to be added
to the plugin stack during run time.
"""
return []
def getNewPluginManager() -> pluginManager.ArmiPluginManager:
"""Return a new plugin manager with all of the hookspecs pre-registered."""
pm = pluginManager.ArmiPluginManager("armi")
pm.add_hookspecs(ArmiPlugin)
return pm
def collectInterfaceDescriptions(mod, cs):
"""
Adapt old-style ``describeInterfaces`` to the new plugin interface.
Old describeInterfaces implementations would return an interface class and kwargs for adding to an operator. Now we
expect an ORDER as well. This takes a module and case settings and staples the module's ORDER attribute to the tuple
and checks to make sure that a None is replaced by an empty list.
"""
from armi import interfaces
val = mod.describeInterfaces(cs)
if val is None:
return []
if isinstance(val, list):
return [interfaces.InterfaceInfo(mod.ORDER, klass, kwargs) for klass, kwargs in val]
klass, kwargs = val
return [interfaces.InterfaceInfo(mod.ORDER, klass, kwargs)]
class PluginError(RuntimeError):
"""
Special exception class for use when a plugin appears to be non-conformant.
These should always come from some form of programmer error, and indicates conditions such as:
- A plugin improperly implementing a hook, when possible to detect.
- A collision between components provided by plugins (e.g. two plugins providing the same Blueprints section)
"""
================================================
FILE: armi/reactor/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
The reactor package houses the data model used in ARMI to represent the reactor during its
simulation. It contains definitions of the reactor, assemblies, blocks, components, etc.
See :doc:`/developer/index`.
"""
from typing import TYPE_CHECKING, Callable, Dict, Union
from armi import materials, plugins
if TYPE_CHECKING:
from armi.reactor.excoreStructure import ExcoreStructure
from armi.reactor.reactors import Core
from armi.reactor.spentFuelPool import SpentFuelPool
class ReactorPlugin(plugins.ArmiPlugin):
"""Plugin exposing built-in reactor components, blocks, assemblies, etc."""
@staticmethod
@plugins.HOOKIMPL
def beforeReactorConstruction(cs) -> None:
"""Just before reactor construction, update the material "registry" with user settings,
if it is set. Often it is set by the application.
"""
from armi.settings.fwSettings.globalSettings import (
CONF_MATERIAL_NAMESPACE_ORDER,
)
if cs[CONF_MATERIAL_NAMESPACE_ORDER]:
materials.setMaterialNamespaceOrder(cs[CONF_MATERIAL_NAMESPACE_ORDER])
@staticmethod
@plugins.HOOKIMPL
def defineBlockTypes():
from armi.reactor import blocks
from armi.reactor.components.basicShapes import Hexagon, Rectangle
from armi.reactor.components.volumetricShapes import RadialSegment
return [
(Rectangle, blocks.CartesianBlock),
(RadialSegment, blocks.ThRZBlock),
(Hexagon, blocks.HexBlock),
]
@staticmethod
@plugins.HOOKIMPL
def defineAssemblyTypes():
from armi.reactor.assemblies import CartesianAssembly, HexAssembly, ThRZAssembly
from armi.reactor.blocks import CartesianBlock, HexBlock, ThRZBlock
return [
(HexBlock, HexAssembly),
(CartesianBlock, CartesianAssembly),
(ThRZBlock, ThRZAssembly),
]
@staticmethod
@plugins.HOOKIMPL(trylast=True)
def defineSystemBuilders() -> Dict[str, Callable[[str], Union["Core", "ExcoreStructure", "SpentFuelPool"]]]:
from armi.reactor.excoreStructure import ExcoreStructure
from armi.reactor.reactors import Core
from armi.reactor.spentFuelPool import SpentFuelPool
return {
"core": Core,
"excore": ExcoreStructure,
"sfp": SpentFuelPool,
}
@staticmethod
@plugins.HOOKIMPL(trylast=True)
def getAxialExpansionChanger():
from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger
return AxialExpansionChanger
================================================
FILE: armi/reactor/assemblies.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Assemblies are collections of Blocks.
Generally, Blocks are stacked from bottom to top.
"""
import copy
import math
import pickle
from collections.abc import Iterable
from random import randint
from typing import ClassVar, Optional, Type
import numpy as np
from scipy import interpolate
from armi import runLog
from armi.materials.material import Fluid
from armi.reactor import assemblyParameters, blocks, composites, grids
from armi.reactor.flags import Flags, TypeSpec
from armi.reactor.parameters import ParamLocation
from armi.reactor.spentFuelPool import SpentFuelPool
class Assembly(composites.Composite):
"""
A single assembly in a reactor made up of blocks built from the bottom up.
Append blocks to add them up. Index blocks with 0 being the bottom.
"""
_BLOCK_TYPE: ClassVar[Optional[Type[blocks.Block]]] = None
pDefs = assemblyParameters.getAssemblyParameterDefinitions()
# For assemblies coming in from the database, waiting to be loaded to their old
# position. This is a necessary distinction, since we need to make sure that a bunch
# of fuel management stuff doesn't treat its re-placement into the core as a new move
DATABASE = "database"
LOAD_QUEUE = "LoadQueue"
SPENT_FUEL_POOL = "SFP"
DELETE = "Delete"
NOT_CREATED_YET = "NotCreatedYet" # used in assembly location history tracking
NOT_IN_CORE = [LOAD_QUEUE, SPENT_FUEL_POOL, DELETE, NOT_CREATED_YET]
def __init__(self, typ, assemNum=None):
"""
Parameters
----------
typ : str
Name of assembly design (e.g. the name from the blueprints input file).
assemNum : int, optional
The unique ID number of this assembly. If None is provided, we generate a
random int. This makes it clear that it is a placeholder. When an assembly with
a negative ID is placed into a Reactor, it will be given a new, positive ID.
"""
# If no assembly number is provided, generate a random number as a placeholder.
if assemNum is None:
assemNum = randint(-9000000000000, -1)
name = self.makeNameFromAssemNum(assemNum)
composites.Composite.__init__(self, name)
self.p.assemNum = assemNum
self.setType(typ)
self._current = 0 # for iterating
self.p.buLimit = self.getMaxParam("buLimit")
self.lastLocationLabel = self.LOAD_QUEUE
self.p.orientation = np.array((0.0, 0.0, 0.0))
self.p.ringPosHist = []
def __repr__(self):
msg = "<{typeName} Assembly {name} at {loc}>".format(
name=self.getName(), loc=self.getLocation(), typeName=self.getType()
)
return msg
def __lt__(self, other):
"""
Compare two assemblies by location.
Notes
-----
As with other ArmiObjects, Assemblies are sorted based on location. Assemblies are more
permissive in the grid consistency checks to accommodate situations where assemblies might be
children of the same Core, but not in the same grid as each other (like in the spent fuel
pool). In these situations, the operator returns ``False``. This behavior may lead to some
strange sorting behavior when two or more Assemblies are being compared that do not live in
the same grid.
See Also
--------
armi.reactor.composites.ArmiObject.__lt__
"""
try:
return composites.ArmiObject.__lt__(self, other)
except ValueError:
return False
def renameBlocksAccordingToAssemblyNum(self):
"""
Updates the names of all blocks to comply with the assembly number.
Useful after an assembly number/name has been loaded from a snapshot and you
want to update all block names to be consistent.
It may be better to store block numbers on each block as params. A database that
can hold strings would be even better.
Notes
-----
You must run armi.reactor.reactors.Reactor.regenAssemblyLists after calling this.
"""
assemNum = self.getNum()
for bi, b in enumerate(self):
b.setName(b.makeName(assemNum, bi))
@staticmethod
def makeNameFromAssemNum(assemNum):
"""
Set the name of this assembly (and the containing blocks) based on an assemNum.
AssemNums are like serial numbers for assemblies.
"""
return "A{0:04d}".format(int(assemNum))
def renumber(self, newNum):
"""
Change the assembly number of this assembly.
And handle the downstream impacts of changing the name of this Assembly and all
of the Blocks within this Assembly.
Parameters
----------
newNum : int
The new Assembly number.
"""
self.p.assemNum = int(newNum)
self.name = self.makeNameFromAssemNum(self.p.assemNum)
self.renameBlocksAccordingToAssemblyNum()
def makeUnique(self):
"""
Function to make an assembly unique by getting a new assembly number.
This also adjusts the assembly's blocks IDs. This is necessary when using
``deepcopy`` to get a unique ``assemNum`` since a deepcopy implies it would
otherwise have been the same object.
"""
# Default to a random negative assembly number (unique enough)
self.p.assemNum = randint(-9000000000000, -1)
self.renumber(self.p.assemNum)
def _checkPotentialChild(self, obj: blocks.Block, action: str = "add"):
"""An internal helper method to ensure the Block type is valid for this Assembly."""
if self._BLOCK_TYPE is None or isinstance(obj, self._BLOCK_TYPE):
# this is the right Block, pass on
return
# if we got here, this Block is not the right type for this Assembly
msg = f"Cannot {action} {obj} to this Assembly, it is not a {self._BLOCK_TYPE}."
runLog.error(msg)
raise TypeError(msg)
def add(self, obj: blocks.Block):
"""
Add an object to this assembly.
The simple act of adding a block to an assembly fully defines the location of
the block in 3-D.
.. impl:: Assemblies are made up of type Block.
:id: I_ARMI_ASSEM_BLOCKS
:implements: R_ARMI_ASSEM_BLOCKS
Adds a unique Block to the top of the Assembly. If the Block already
exists in the Assembly, an error is raised in
:py:meth:`armi.reactor.composites.Composite.add`.
The spatialLocator of the Assembly is updated to account for
the new Block. In ``reestablishBlockOrder``, the Assembly spatialGrid
is reinitialized and Block-wise spatialLocator and name objects
are updated. The axial mesh and other Block geometry parameters are
updated in ``calculateZCoords``.
"""
self._checkPotentialChild(obj, "add")
composites.Composite.add(self, obj)
obj.spatialLocator = self.spatialGrid[0, 0, len(self) - 1]
# more work is needed, make a new mesh
self.reestablishBlockOrder()
self.calculateZCoords()
def insert(self, index, obj):
"""Insert an object at a given index position with the assembly."""
self._checkPotentialChild(obj, "insert")
composites.Composite.insert(self, index, obj)
obj.spatialLocator = self.spatialGrid[0, 0, index]
def moveTo(self, locator):
"""Move an assembly somewhere else."""
oldSymmetryFactor = self.getSymmetryFactor()
composites.Composite.moveTo(self, locator)
if self.lastLocationLabel != self.DATABASE:
self.p.numMoves += 1
self.p.daysSinceLastMove = 0.0
self.parent.childrenByLocator[locator] = self
# symmetry may have changed (either moving on or off of symmetry line)
self.clearCache()
self.scaleParamsToNewSymmetryFactor(oldSymmetryFactor)
def scaleParamsToNewSymmetryFactor(self, oldSymmetryFactor):
scalingFactor = oldSymmetryFactor / self.getSymmetryFactor()
if scalingFactor == 1:
return
blockVolIntegratedParamsToScale = self[0].p.paramDefs.atLocation(ParamLocation.VOLUME_INTEGRATED)
for b in self:
self._scaleParams(b, blockVolIntegratedParamsToScale, scalingFactor)
assemblyVolIntegratedParamsToScale = self.p.paramDefs.atLocation(ParamLocation.VOLUME_INTEGRATED)
self._scaleParams(self, assemblyVolIntegratedParamsToScale, scalingFactor)
@staticmethod
def _scaleParams(obj, params, scalingFactor):
for param in params:
name = param.name
if obj.p[name] is None or isinstance(obj.p[name], str):
continue
elif isinstance(obj.p[name], Iterable):
obj.p[name] = [value * scalingFactor for value in obj.p[name]]
else:
# numpy array or other
obj.p[name] = obj.p[name] * scalingFactor
def getNum(self):
"""Return unique integer for this assembly."""
return int(self.p.assemNum)
def getLocation(self):
"""
Get string label representing this object's location.
.. impl:: Assembly location is retrievable.
:id: I_ARMI_ASSEM_POSI0
:implements: R_ARMI_ASSEM_POSI
This method returns a string label indicating the location
of an Assembly. There are three options: 1) the Assembly
is not within a Core object and is interpreted as in the
"load queue"; 2) the Assembly is within the spent fuel pool;
3) the Assembly is within a Core object, so it has a physical
location within the Core.
"""
# just use ring and position, not axial (which is 0)
if not self.parent:
return self.LOAD_QUEUE
elif isinstance(self.parent, SpentFuelPool):
return self.SPENT_FUEL_POOL
return self.parent.spatialGrid.getLabel(self.spatialLocator.getCompleteIndices()[:2])
def coords(self):
"""Return the location of the assembly in the plane using cartesian global coordinates.
.. impl:: Assembly coordinates are retrievable.
:id: I_ARMI_ASSEM_POSI1
:implements: R_ARMI_ASSEM_POSI
In this method, the spatialLocator of an Assembly is leveraged to return its physical
(x,y) coordinates in cm.
"""
x, y, _z = self.spatialLocator.getGlobalCoordinates()
return (x, y)
def getArea(self):
"""
Return the area of the assembly by looking at its first block.
The assumption is that all blocks in an assembly have the same area. Calculate the total
assembly volume in cm^3.
"""
try:
return self[0].getArea()
except IndexError:
runLog.warning(f"{self} has no blocks and therefore no area.")
return None
def getVolume(self):
"""Calculate the total assembly volume in cm^3."""
return self.getArea() * self.getTotalHeight()
def getPinPlenumVolumeInCubicMeters(self) -> float:
"""
Return the total volume of the plenum for an assembly in m^3.
Notes
-----
If there is no plenum blocks in the assembly, a plenum volume of 0.0 is returned.
Warning
-------
This is a bit design-specific for pinned assemblies.
Returns
-------
float: Total plenum volume for an assembly.
"""
plenumVolume = 0.0
for b in self.iterChildrenWithFlags(Flags.PLENUM):
length = b.getHeight()
for c in b.iterChildrenWithFlags(Flags.CLAD):
cladId = c.getDimension("id")
plenumVolume += math.pi * (cladId / 2.0) ** 2.0 * length
# convert vol from cm^3 to m^3
plenumVolume *= 1e-6
return plenumVolume
def getAveragePlenumTemperature(self):
"""Return the average of the plenum block outlet temperatures."""
plenumBlocks = self.iterChildrenWithFlags(Flags.PLENUM)
plenumTemps = [b.p.THcoolantOutletT for b in plenumBlocks]
# no plenum blocks, use the top block of the assembly for plenum temperature
if not plenumTemps:
runLog.warning("No plenum blocks exist. Using outlet coolant temperature.")
plenumTemps = [self[-1].p.THcoolantOutletT]
return sum(plenumTemps) / len(plenumTemps)
def adjustResolution(self, refA):
"""Split the blocks in this assembly to have the same mesh structure as refA."""
newBlockStack = []
newBlocks = 0 # number of new blocks we've added so far.
for i, b in enumerate(self):
refB = refA[i + newBlocks] # pick the block that is "supposed to" line up with refB.
if refB.getHeight() == b.getHeight():
# these blocks line up
newBlockStack.append(b)
continue
elif refB.getHeight() > b.getHeight():
raise RuntimeError(
"can't split {0} ({1}cm) into larger blocks to match ref block {2} ({3}cm)".format(
b, b.getHeight(), refB, refB.getHeight()
)
)
else:
# b is larger than refB. Split b up by splitting it into several smaller blocks of
# refBs
heightToChop = b.getHeight()
heightChopped = 0.0
while abs(heightChopped - heightToChop) > 1e-5: # stop when they are equal. floating point.
# update which ref block we're on (does nothing on the first pass)
refB = refA[i + newBlocks]
newB = copy.deepcopy(b)
newB.setHeight(refB.getHeight()) # make block match ref mesh
newBlockStack.append(newB)
heightChopped += refB.getHeight()
newBlocks += 1
runLog.important(f"Added a new block {newB} of height {newB.getHeight()}")
runLog.important(f"Chopped {heightChopped} of {heightToChop}")
# subtract one because we eliminated the original b completely.
newBlocks -= 1
self.removeAll()
self.spatialGrid = grids.AxialGrid.fromNCells(len(newBlockStack))
for b in newBlockStack:
self.add(b)
self.reestablishBlockOrder()
def getAxialMesh(self, centers=False, zeroAtFuel=False):
"""
Make a list of the block z-mesh tops from bottom to top in cm.
Parameters
----------
centers : bool, optional
Return centers instead of tops. If centers and zeroesAtFuel the zero point will be
center of first fuel.
zeroAtFuel : bool, optional
If true will make the (bottom or center depending on centers) of the first fuel block be
the zero point instead of the bottom of the first block.
See Also
--------
armi.reactor.assemblies.Assembly.makeAxialSnapList : makes index-based lookup of axial mesh
armi.reactor.reactors.Reactor.findAllAxialMeshPoints : gets a global list of all of these,
plus finer res.
"""
bottom = 0.0
meshVals = []
fuelIndex = None
for bi, b in enumerate(self):
top = bottom + b.getHeight()
if centers:
center = bottom + (top - bottom) / 2.0
meshVals.append(center)
else:
meshVals.append(top)
bottom = top
if fuelIndex is None and b.isFuel():
fuelIndex = bi
if zeroAtFuel:
# adjust the mesh to put zero at the first fuel block.
zeroVal = meshVals[fuelIndex]
meshVals = [mv - zeroVal for mv in meshVals]
return meshVals
def calculateZCoords(self):
"""
Set the center z-coords of each block and the params for axial expansion.
See Also
--------
reestablishBlockOrder
"""
bottom = 0.0
mesh = [bottom]
for bi, b in enumerate(self):
b.p.z = bottom + (b.getHeight() / 2.0)
b.p.zbottom = bottom
top = bottom + b.getHeight()
b.p.ztop = top
mesh.append(top)
bottom = top
b.spatialLocator = self.spatialGrid[0, 0, bi]
# also update the 1-D axial assembly level grid (this is intended to replace z,
# ztop, zbottom, etc.)
# length of this is numBlocks + 1
bounds = list(self.spatialGrid._bounds)
bounds[2] = np.array(mesh)
self.spatialGrid._bounds = tuple(bounds)
def getTotalHeight(self, typeSpec=None):
"""
Determine the height of this assembly in cm.
Parameters
----------
typeSpec : See :py:meth:`armi.composites.Composite.hasFlags`
Returns
-------
height : float
the height in cm
"""
h = 0.0
for b in self:
if b.hasFlags(typeSpec):
h += b.getHeight()
return h
def getHeight(self, typeSpec=None):
return self.getTotalHeight(typeSpec)
def getReactiveHeight(self, enrichThresh=0.02):
"""
Returns the zBottom and total height in cm that has fissile enrichment over
enrichThresh.
"""
reactiveH = 0.0
zBot = None
z = 0.0
for b in self:
h = b.getHeight()
if b.getFissileMass() > 0.01 and b.getFissileMassEnrich() > enrichThresh:
if zBot is None:
zBot = z
reactiveH += h
z += h
return zBot, reactiveH
def getElevationBoundariesByBlockType(self, blockType=None):
"""
Gets of list of elevations, ordered from bottom to top of all boundaries of the block of specified type.
Useful for determining location of the top of the upper grid plate or active
fuel, etc by using [0] to get the lowest boundary and [-1] to get highest
Notes
-----
The list will have duplicates when blocks of the same type share a boundary.
this is intentional. It makes it easy to grab pairs off the list and know that
the first item in a pair is the bottom boundary and the second is the top.
Parameters
----------
blockType : str
Block type to find. empty accepts all
Returns
-------
elevation : list of floats
Every float in the list is an elevation of a block boundary for the block
type specified (has duplicates)
"""
elevation, elevationsWithBlockBoundaries = 0.0, []
# loop from bottom to top, stopping at the first instance of blockType
for b in self:
if b.hasFlags(blockType):
elevationsWithBlockBoundaries.append(elevation) # bottom Boundary
elevationsWithBlockBoundaries.append(elevation + b.getHeight()) # top Boundary
elevation += b.getHeight()
return elevationsWithBlockBoundaries
def getElevationsMatchingParamValue(self, param, value):
"""
Return the elevations (z-coordinates) where the specified param takes the
specified value.
Uses linear interpolation, assuming params correspond to block centers
Parameters
----------
param : str
Name of param to try and match
value: float
Returns
-------
heights : list
z-coordinates where the specified param takes the specified value
"""
heights = []
# loop from bottom to top
for i in range(0, len(self) - 1):
diff1 = self[i].p[param] - value
diff2 = self[i + 1].p[param] - value
z1 = (self[i].p.zbottom + self[i].p.ztop) / 2
z2 = (self[i + 1].p.zbottom + self[i + 1].p.ztop) / 2
if diff1 == diff2: # params are flat
if diff1 != 0: # no match
continue
else:
if z1 not in heights:
heights.append(z1)
if z2 not in heights:
heights.append(z2)
# check if param is bounded by two adjacent blocks
elif diff1 * diff2 <= 0:
tie = diff1 / (diff1 - diff2)
z = z1 + tie * (z2 - z1)
if z not in heights: # avoid duplicates
heights.append(z)
return heights
def getAge(self):
"""Gets a height-averaged residence time of this assembly in days."""
at = 0.0
for b in self:
at += b.p.residence * b.getHeight()
return at / self.getTotalHeight()
def makeAxialSnapList(self, refAssem=None, refMesh=None, force=False):
"""
Creates a list of block indices that should track axially with refAssem's.
When axially expanding, the control rods, shields etc. need to maintain mesh
lines with the rest of the core. To do this, we'll just keep track of which
indices of a reference assembly we should stick with. This method writes the
indices of the top of a block to settings as topIndex.
Keep in mind that assemblies can have different number of blocks. This is why
this function is useful. So this makes a list of reference indices that
correspond to different axial mesh points on this assembly.
This is the depletion mesh we're returning, useful for snapping after axial
extension. Note that the neutronics mesh on rebusOutputs might be different.
See Also
--------
setBlockMesh : applies a snap.
"""
if not force and self[-1].p.topIndex > 0:
return
refMesh = refAssem.getAxialMesh() if refMesh is None else refMesh
selfMesh = self.getAxialMesh()
# make a list relating this assemblies axial mesh points to indices of the
# reference assembly
z = 0.0
for b in self:
top = z + b.getHeight()
try:
b.p.topIndex = np.where(np.isclose(refMesh, top))[0].tolist()[0]
except IndexError:
runLog.error(
"Height {0} in this assembly ({1} in {4}) is not in the reactor mesh "
"list from {2}\nThis has: {3}\nIf you want to run "
"a case with non-uniform axial mesh, activate the `detailedAxialExpansion` "
"setting".format(top, self, refMesh, selfMesh, self.parent)
)
raise
z = top
def _shouldMassBeConserved(self, belowFuelColumn, b):
"""
Determine from a rule set if the mass of a block component should be conserved during axial expansion.
Parameters
----------
belowFuelColumn : boolean
Determines whether a block is below the fuel column or not in fuel
assemblies
b : armi block
The block that is being examined for modification
Returns
-------
conserveMass : boolean
Should the mass be conserved in this block
conserveComponents : list of components
What components should have their mass conserved (if any)
belowFuelColumn : boolean
Update whether the block is above or below a fuel column
See Also
--------
armi.assemblies.Assembly.setBlockMesh
"""
if b.hasFlags(Flags.FUEL):
# fuel block
conserveMass = True
conserveComponents = b.getComponents(Flags.FUEL)
elif self.hasFlags(Flags.FUEL):
# non-fuel block of a fuel assembly.
if belowFuelColumn:
# conserve mass of everything below the fuel so as to not invalidate
# grid-plate dose calcs.
conserveMass = True
# conserve mass of everything except fluids.
conserveComponents = [comp for comp in b.getComponents() if not isinstance(comp.material, Fluid)]
else:
# plenum or above block in fuel assembly. don't conserve mass.
conserveMass = False
conserveComponents = []
else:
# non fuel block in non-fuel assem. Don't conserve mass.
conserveMass = False
conserveComponents = []
return conserveMass, conserveComponents
def setBlockMesh(self, blockMesh, conserveMassFlag=False):
"""
Snaps the axial mesh points of this assembly to correspond with the reference mesh.
Notes
-----
This function only conserves mass on certain conditions:
1) Fuel Assembly
a) Structural material below the assembly conserves mass to accurate
depict grid plate shielding Sodium is not conserved.
b) Fuel blocks only conserve mass of the fuel, not the structure since
the fuel slides up through the cladding (thus fuel/cladding should be
reduced).
c) Structure above the assemblies (expected to be plenum) do not
conserve mass since plenum regions have their height reduced to
conserve the total structure mass when the fuel grows in the
cladding. See b)
2) Reflectors, shields, and control rods
a) These assemblies do not conserve mass since they should remain
uniform to keep radial shielding accurate. This approach should be
conservative.
b) Control rods do not have their mass conserved and the control rod
interface is required to be run after this function is called to
correctly place mass of poison axially.
Parameters
----------
blockMesh : iterable
A list of floats describing the upper mesh points of each block in cm.
conserveMassFlag : bool or str
Option for how to treat mass conservation when the block mesh changes.
Conservation of mass for fuel components is enabled by
conserveMassFlag="auto". If not auto, a boolean value should be
passed. The default is False, which does not conserve any masses.
True conserves mass for all components.
See Also
--------
makeAxialSnapList : Builds the lookup table used by this method
getAxialMesh : builds a mesh compatible with this
"""
# Just adjust the heights and everything else will fall into place
zBottom = 0.0
belowFuelColumn = True
if self[-1].p.topIndex == 0:
runLog.warning(
"Reference uniform mesh not being applied to {}. It was likely "
"excluded through the setting `nonUniformAssemFlags`.".format(self.p.type)
)
return
for b in self:
if b.isFuel():
belowFuelColumn = False
topIndex = b.p.topIndex
if not 0 <= topIndex < len(blockMesh):
runLog.warning(
"index {0} does not exist in topvals (len:{1}). 0D case? Skipping snap".format(
topIndex, len(blockMesh)
)
)
return
newTop = blockMesh[topIndex]
if newTop is None:
runLog.warning("Skipping axial snapping on {0}".format(self), 1)
return
if conserveMassFlag == "auto":
conserveMass, conserveComponents = self._shouldMassBeConserved(belowFuelColumn, b)
else:
conserveMass = conserveMassFlag
conserveComponents = b.getComponents()
oldBlockHeight = b.getHeight()
b.setHeight(newTop - zBottom)
if conserveMass:
heightRatio = oldBlockHeight / b.getHeight()
for c in conserveComponents:
c.changeNDensByFactor(heightRatio)
zBottom = newTop
self.calculateZCoords()
def setBlockHeights(self, blockHeights):
"""Set the block heights of all blocks in the assembly."""
mesh = np.cumsum(blockHeights)
self.setBlockMesh(mesh)
def dump(self, fName=None):
"""Pickle the assembly and write it to a file."""
if not fName:
fName = self.getName() + ".dump.pkl"
with open(fName, "w") as pkl:
pickle.dump(self, pkl)
def iterBlocks(self, typeSpec=None, exact=False):
"""Produce an iterator over all blocks in this assembly from bottom to top.
Parameters
----------
typeSpec : Flags or list of Flags, optional
Restrict returned blocks to have these flags.
exact : bool, optional
If true, only produce blocks that have those exact flags.
Returns
-------
iterable of Block
See Also
--------
* :meth:`__iter__` - if no type spec provided, assemblies can be
naturally iterated upon.
* :meth:`iterChildrenWithFlags` - alternative if you know you have
a type spec that isn't ``None``.
"""
if typeSpec is None:
return iter(self)
return self.iterChildrenWithFlags(typeSpec, exact)
def getBlocks(self, typeSpec=None, exact=False):
"""
Get blocks in an assembly from bottom to top.
Parameters
----------
typeSpec : Flags or list of Flags, optional
Restrict returned blocks to those of this type.
exact : bool, optional
If true, will only return if there's an exact match in typeSpec
Returns
-------
blocks : list
List of blocks.
"""
return list(self.iterBlocks(typeSpec, exact))
def getBlocksAndZ(self, typeSpec=None, returnBottomZ=False, returnTopZ=False):
"""
Get blocks and their z-coordinates from bottom to top.
This method is useful when you need to know the z-coord of a block.
Parameters
----------
typeSpec : Flags or list of Flags, optional
Block type specification to restrict to
returnBottomZ : bool, optional
If true, will return bottom coordinates instead of centers.
Returns
-------
blocksAndCoords, list
(block, zCoord) tuples
Examples
--------
for block, bottomZ in a.getBlocksAndZ(returnBottomZ=True):
print({0}'s bottom mesh point is {1}'.format(block, bottomZ))
"""
if returnBottomZ and returnTopZ:
raise ValueError("Both returnTopZ and returnBottomZ are set to `True`")
blocks, zCoords = [], []
bottom = 0.0
for b in self:
top = bottom + b.getHeight()
mid = (bottom + top) / 2.0
if b.hasFlags(typeSpec):
blocks.append(b)
if returnBottomZ:
val = bottom
elif returnTopZ:
val = top
else:
val = mid
zCoords.append(val)
bottom = top
return zip(blocks, zCoords)
def hasContinuousCoolantChannel(self):
return all(b.containsAtLeastOneChildWithFlags(Flags.COOLANT) for b in self)
def getFirstBlock(self, typeSpec: TypeSpec = None, exact: bool = False) -> Optional[blocks.Block]:
"""Find the first block that matches the spec.
Parameters
----------
typeSpec
Specification to require on the returned block.
exact
Require block to exactly match ``typeSpec``
Returns
-------
Block or None
First block that matches if such a block could be found.
"""
if typeSpec is None:
items = iter(self)
else:
items = self.iterChildrenWithFlags(typeSpec, exact)
try:
# Create an iterator and attempt to advance it to the first value.
return next(items)
except StopIteration:
# No items found in the iteration -> no blocks match the request
return None
def getFirstBlockByType(self, typeName: str) -> Optional[blocks.Block]:
blocks = filter(lambda b: b.getType() == typeName, self)
try:
return next(blocks)
except StopIteration:
return None
def getBlockAtElevation(self, elevation: float) -> Optional[blocks.Block]:
"""
Returns the block at a specified axial dimension elevation (given in cm).
If height matches the exact top of the block, the block is considered at that
height.
Parameters
----------
elevation : float
The elevation of interest to grab a block (cm)
Returns
-------
targetBlock : block or None
The block that exists at the specified height in the reactor. ``None``
if a block was not found.
"""
bottomOfBlock = 0.0
for b in self:
topOfBlock = bottomOfBlock + b.getHeight()
if (
topOfBlock > elevation or abs(topOfBlock - elevation) / elevation < 1e-10
) and bottomOfBlock < elevation:
return b
bottomOfBlock = topOfBlock
return None
def getBIndexFromZIndex(self, zIndex):
"""
Returns the ARMI block axial index corresponding to a DIF3D node axial index.
Parameters
----------
zIndex : float
The axial index (beginning with 0) of a DIF3D node.
Returns
-------
bIndex : int
The axial index (beginning with 0) of the ARMI block containing the
DIF3D node corresponding to zIndex.
"""
zIndexTot = -1
for bIndex, b in enumerate(self):
zIndexTot += b.p.axMesh
if zIndexTot >= zIndex:
return bIndex
return -1 # no block index found
def getBlocksBetweenElevations(self, zLower, zUpper, eps=1e-10):
"""
Return block(s) between two axial elevations and their corresponding heights.
Parameters
----------
zLower, zUpper : float
Elevations in cm where blocks should be found.
eps : float, optional
Lower bound for relative block height fraction that we care about.
Below this bound, small slivers of overlapping block are ignored.
Returns
-------
blockInfo : list
list of (blockObj, overlapHeightInCm) tuples
Examples
--------
If the block structure looks like:
50.0 to 100.0 Block3
25.0 to 50.0 Block2
0.0 to 25.0 Block1
Then,
>>> a.getBlocksBetweenElevations(0, 50)
[(Block1, 25.0), (Block2, 25.0)]
>>> a.getBlocksBetweenElevations(0, 30)
[(Block1, 25.0), (Block2, 5.0)]
"""
blocksHere = []
for b in self:
if b.p.ztop >= zLower and b.p.zbottom <= zUpper:
# at least some of this block overlaps the window of interest
top = min(b.p.ztop, zUpper)
bottom = max(b.p.zbottom, zLower)
heightHere = top - bottom
# Filter out blocks that have an extremely small height fraction
if heightHere / b.getHeight() > eps:
blocksHere.append((b, heightHere))
return blocksHere
def getParamValuesAtZ(self, param, elevations, interpType="linear", fillValue=np.nan):
"""
Interpolates a param axially to find it at any value of elevation z.
By default, assumes that all parameters are for the center of a block. So for
parameters such as THoutletTemperature that are defined on the top, this may be
off. See the paramDefinedAt parameters.
Defaults to linear interpolations.
Notes
-----
This caches interpolators for each param and must be cleared if new params are
set or new heights are set.
Warning
-------
Fails when requested to extrapolate. With higher order splines it is possible to interpolate
non-physical values, for example, a negative flux or dpa. Please use caution when going off
default in interpType and be certain that interpolated values are physical.
Parameters
----------
param : str
the parameter to interpolate
elevations : array of float
the elevations from the bottom of the assembly in cm at which you want the point.
interpType: str or int
used in interp1d. interp1d documentation: Specifies the kind of interpolation
as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of
first, second or third order) or as an integer specifying the order of the
spline interpolator to use. Default is 'linear'.
fillValue: str
Rough pass through to scipy.interpolate.interp1d. If 'extend', then the
lower and upper bounds are used as the extended value. If 'extrapolate',
then extrapolation is permitted.
Returns
-------
valAtZ : np.ndarray
This will be of the shape (z,data-shape)
"""
interpolator = self.getParamOfZFunction(param, interpType=interpType, fillValue=fillValue)
return interpolator(elevations)
def getParamOfZFunction(self, param, interpType="linear", fillValue=np.nan):
"""
Interpolates a param axially to find it at any value of elevation z.
By default, assumes that all parameters are for the center of a block. So for
parameters such as THoutletTemperature that are defined on the top, this may be
off. See the paramDefinedAt parameters.
Defaults to linear interpolations.
Notes
-----
This caches interpolators for each param and must be cleared if new params are
set or new heights are set.
Warning
-------
Fails when requested to extrapololate. With higher order splines it is possible to
interpolate nonphysical values, for example, a negative flux or dpa. Please use caution when
going off default in interpType and be certain that interpolated values are physical.
Parameters
----------
param : str
the parameter to interpolate
interpType: str or int
used in interp1d. interp1d documentation: Specifies the kind of interpolation
as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of
first, second or third order) or as an integer specifying the order of the
spline interpolator to use. Default is 'linear'.
fillValue: float
Rough pass through to scipy.interpolate.interp1d. If 'extend', then the
lower and upper bounds are used as the extended value. If 'extrapolate',
then extrapolation is permitted.
Returns
-------
valAtZ : np.ndarray
This will be of the shape (z,data-shape)
"""
paramDef = self[0].p.paramDefs[param]
if not isinstance(paramDef.location, ParamLocation):
raise Exception(
"Cannot interpolate on `{}`. The ParamDefinition does not define a "
"valid location `{}`.\nValid locations are {}".format(
param,
paramDef.location,
", ".join([str(pl) for pl in ParamLocation]),
)
)
atCenter = bool(paramDef.location & (ParamLocation.CENTROID | ParamLocation.VOLUME_INTEGRATED))
z = self.getAxialMesh(atCenter)
if paramDef.location & ParamLocation.BOTTOM:
z.insert(0, 0.0)
z.pop(-1)
z = np.asarray(z)
values = self.getChildParamValues(param).transpose()
boundsError = None
if fillValue == "extend":
boundsError = False
if values.ndim == 1:
fillValue = values[0], values[-1]
elif values.ndim == 2:
fillValue = values[:, 0], values[:, 1]
else:
raise Exception(
'Unsupported shape ({}) returned from getChildParamValues("{}").'
"Shape must be 1 or 2 dimensions".format(values.shape, param)
)
interpolater = interpolate.interp1d(
z,
values,
kind=interpType,
fill_value=fillValue,
assume_sorted=True,
bounds_error=boundsError,
)
return interpolater
def reestablishBlockOrder(self):
"""
The block ordering has changed, so the spatialGrid and Block-wise spatialLocator and name objects need updating.
See Also
--------
calculateZCoords : updates the ztop/zbottom params on each block after
reordering.
"""
# replace grid with one that has the right number of locations
self.spatialGrid = grids.AxialGrid.fromNCells(len(self))
self.spatialGrid.armiObject = self
for zi, b in enumerate(self):
b.spatialLocator = self.spatialGrid[0, 0, zi]
# update the name too. NOTE: You must update the history tracker.
b.setName(b.makeName(self.p.assemNum, zi))
def countBlocksWithFlags(self, blockTypeSpec=None):
"""
Returns the number of blocks of a specified type.
blockTypeSpec : Flags or list
Restrict to only these types of blocks. typeSpec is None, return all of the
blocks
Returns
-------
blockCounter : int
number of blocks of this type
"""
return sum(1 for _ in self.iterBlocks(blockTypeSpec))
def getDim(self, typeSpec, dimName):
"""
With a preference for fuel blocks, find the first component in the Assembly with
flags that match ``typeSpec`` and return dimension as specified by ``dimName``.
Example: getDim(Flags.WIRE, 'od') will return a wire's OD in cm.
"""
# prefer fuel blocks.
bList = self.getBlocks(Flags.FUEL)
if not bList:
# no fuel blocks. take first block.
bList = self
for b in bList:
dim = b.getDim(typeSpec, dimName)
if dim:
return dim
# return none if there is nothing to return
return None
def getSymmetryFactor(self):
"""Return the symmetry factor of this assembly."""
return self[0].getSymmetryFactor()
def rotate(self, rad):
"""Rotates the spatial variables on an assembly by the specified angle.
Each Block on the Assembly is rotated in turn.
Parameters
----------
rad : float
number (in radians) specifying the angle of counter clockwise rotation
"""
self.p.orientation[2] += math.degrees(rad)
for b in self:
b.rotate(rad)
def isOnWhichSymmetryLine(self):
grid = self.parent.spatialGrid
return grid.overlapsWhichSymmetryLine(self.spatialLocator.getCompleteIndices())
def orientBlocks(self, parentSpatialGrid):
"""Add special grids to the blocks inside this Assembly, respecting their orientation.
Parameters
----------
parentSpatialGrid : Grid
Spatial Grid of the parent of this Assembly (probably a system-level grid).
"""
for b in self:
if b.spatialGrid is None:
try:
b.autoCreateSpatialGrids(parentSpatialGrid)
except (ValueError, NotImplementedError) as e:
runLog.extra(str(e), single=True)
# Do more grid initializations from a manual or auto created grid
if b.spatialGrid is not None:
b.assignPinIndices()
class HexAssembly(Assembly):
"""An assembly that is hexagonal in cross-section."""
_BLOCK_TYPE = blocks.HexBlock
def rotate(self, rad: float):
"""Rotate an assembly and its children.
.. impl:: A hexagonal assembly shall support rotating around the z-axis in 60 degree increments.
:id: I_ARMI_ROTATE_HEX_ASSEM
:implements: R_ARMI_ROTATE_HEX
This method loops through every ``Block`` in this ``HexAssembly`` and rotates it by a
given angle (in radians). The rotation angle is positive in the counter-clockwise
direction. To perform the ``Block`` rotation, the
:meth:`armi.reactor.blocks.HexBlock.rotate` method is called.
Parameters
----------
rad : float
Counter clockwise rotation in radians. **MUST** be in increments of 60 degrees (PI / 3)
Raises
------
ValueError
If rotation is not divisible by pi / 3.
"""
if math.isclose(rad % (math.pi / 3), 0, abs_tol=1e-12):
return super().rotate(rad)
msg = f"Rotation must be in 60 degree increments, got {math.degrees(rad)} degrees ({rad} radians)."
runLog.error(msg)
raise ValueError(msg)
class CartesianAssembly(Assembly):
"""An assembly that is rectangular in cross-section."""
_BLOCK_TYPE = blocks.CartesianBlock
class RZAssembly(Assembly):
"""
RZAssembly are assemblies in RZ geometry; they need to be different objects than HexAssembly
because they use different locations and need to have Radial Meshes in their setting.
Notes
-----
ThRZAssemblies should be a subclass of Assemblies because they should have a common place to put
information about subdividing the global mesh for transport. This is similar to how blocks have
'AxialMesh' in their blocks.
"""
def __init__(self, name, assemNum=None):
Assembly.__init__(self, name, assemNum)
self.p.RadMesh = 1
def radialOuter(self):
"""Returns the outer radial boundary of this assembly."""
return self[0].radialOuter()
def radialInner(self):
"""Returns the inner radial boundary of this assembly."""
return self[0].radialInner()
def thetaOuter(self):
"""Returns the outer azimuthal boundary of this assembly."""
return self[0].thetaOuter()
def thetaInner(self):
"""Returns the outer azimuthal boundary of this assembly."""
return self[0].thetaInner()
class ThRZAssembly(RZAssembly):
"""
ThRZAssembly are assemblies in ThetaRZ geometry, they need to be different objects
than HexAssembly because they use different locations and need to have Radial Meshes
in their setting.
"""
def __init__(self, assemType, assemNum=None):
RZAssembly.__init__(self, assemType, assemNum)
self.p.AziMesh = 1
================================================
FILE: armi/reactor/assemblyParameters.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Assembly Parameter Definitions."""
from armi import runLog
from armi.reactor import parameters
from armi.reactor.parameters import ParamLocation
from armi.reactor.parameters.parameterDefinitions import isNumpyArray
from armi.utils import units
def getAssemblyParameterDefinitions():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb:
pb.defParam(
"orientation",
units=units.DEGREES,
description=(
"Triple representing rotations counterclockwise around each spatial axis. "
"For example, a hex assembly rotated by 1/6th has orientation (0, 0, 60.0)"
),
default=None,
)
with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb:
pb.defParam(
"arealPd",
units=f"{units.MW}/{units.METERS}^2",
description="Power in assembly divided by its XY cross-sectional area. Related to PCT.",
)
pb.defParam(
"buLimit",
units=units.UNITLESS,
description="buLimit",
default=parameters.NoDefault,
)
pb.defParam(
"chargeBu",
units=units.PERCENT_FIMA,
description="Max block-average burnup in this assembly when it most recently entered "
"the core. If the assembly was discharged and then re-charged, this value will only "
"reflect the most recent charge.",
)
pb.defParam(
"chargeCycle",
units=units.UNITLESS,
description="Cycle number that this assembly most recently entered the core. If the "
"assembly was discharged and then re-charged, this value will only reflect the most "
"recent charge.",
)
pb.defParam(
"chargeFis",
units=units.KG,
description="Fissile mass in assembly when it most recently entered the core. If the "
"assembly was discharged and then re-charged, this value will only reflect the most "
"recent charge.",
)
pb.defParam(
"chargeTime",
units=units.YEARS,
description="Time at which this assembly most recently entered the core. If the "
"assembly was discharged and then re-charged, this value will only reflect the most "
"recent charge.",
default=parameters.NoDefault,
)
pb.defParam(
"multiplicity",
units=units.UNITLESS,
description="The number of physical assemblies that the associated object represents. "
"This is typically 1, but may need to change when the assembly is moved between "
"containers with different types of symmetry. For instance, if an assembly moves from "
"a Core with 1/3rd symmetry into a spent-fuel pool with full symmetry, rather than "
"splitting the assembly into 3, the multiplicity can be set to 3. For now, this is a "
"bit of a hack to make fuel handling work; multiplicity in the 1/3 core should be 3 to "
"begin with, in which case this parameter could be used as the primary means of "
"handling symmetry and fractional domains throughout ARMI. We will probably roll that "
"out once the dust settles on some of this SFP work. For now, the Core stores "
"multiplicity as 1 always, since the powerMultiplier to adjust to full-core "
"quantities.",
default=1,
)
pb.defParam("daysSinceLastMove", units=units.UNITLESS, description="daysSinceLastMove")
pb.defParam("kInf", units=units.UNITLESS, description="kInf")
pb.defParam("maxDpaPeak", units=units.DPA, description="maxDpaPeak")
pb.defParam("maxPercentBu", units=units.PERCENT, description="maxPercentBu")
pb.defParam("numMoves", units=units.UNITLESS, description="numMoves")
pb.defParam("timeToLimit", units=units.DAYS, description="timeToLimit", default=1e6)
pb.defParam(
"guideTubeTopElevation",
units=units.CM,
description=("Elevation of the top of the guide tube relative to the bottom of the duct."),
categories=[parameters.Category.assignInBlueprints],
saveToDB=True,
)
with pDefs.createBuilder(location=ParamLocation.AVERAGE) as pb:
pb.defParam(
"detailedNDens",
setter=isNumpyArray("detailedNDens"),
units=f"atoms/(bn*{units.CM})",
description=(
"High-fidelity number density vector with up to thousands of nuclides. "
"Used in high-fi depletion runs where low-fi depletion may also be occurring. "
"This param keeps the hi-fi and low-fi depletion values from interfering."
),
saveToDB=True,
default=None,
)
def _enforceNotesRestrictions(self, value):
"""Enforces that notes can only be of type str with max length of 1000."""
if type(value) is not str:
runLog.error("Values stored in the `notes` parameter must be strings of less than 1000 characters!")
raise ValueError
elif len(value) > 1000:
runLog.warning(
"Strings stored in the `notes` parameter must be less than 1000 characters. "
f"Truncating the note starting with {value[0:15]}... at 1000 characters!"
)
self._p_notes = value[0:1000]
else:
self._p_notes = value
pb.defParam(
"notes",
units=units.UNITLESS,
description="A string with notes about the assembly, limited to 1000 characters. This "
"parameter is not meant to store data. Needlessly storing large strings on this "
"parameter for every assembly is potentially unwise from a memory perspective.",
saveToDB=True,
default="",
setter=_enforceNotesRestrictions,
)
with pDefs.createBuilder(location=ParamLocation.NA, default=0.0, categories=["control rods"]) as pb:
pb.defParam(
"crCriticalFraction",
units=units.UNITLESS,
description=(
"The insertion fraction when the control rod assembly is in its critical "
"configuration. Note that the default of -1.0 is a trigger for this value not "
"being set yet."
),
saveToDB=True,
default=-1.0,
)
pb.defParam(
"crCurrentElevation",
units=units.CM,
description="The current elevation of the bottom of the moveable section of a control rod assembly.",
categories=[parameters.Category.assignInBlueprints],
saveToDB=True,
)
pb.defParam(
"crInsertedElevation",
units=units.CM,
description=(
"The elevation of the furthest-most insertion point of a control rod assembly. For "
"a control rod assembly inserted from the top, this will be the lower tip of the "
"bottom-most moveable section in the assembly when fully inserted."
),
categories=[parameters.Category.assignInBlueprints],
saveToDB=True,
)
pb.defParam(
"crRodLength",
units=units.CM,
description="length of the control material within the control rod",
saveToDB=True,
)
pb.defParam(
"crWithdrawnElevation",
units=units.CM,
description=(
"The elevation of the tip of a control rod assembly when it is fully withdrawn. "
"For a control rod assembly inserted from the top, this will be the lower tip of "
"the bottom-most moveable section in the assembly when fully withdrawn."
),
categories=[parameters.Category.assignInBlueprints],
saveToDB=True,
)
with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0, categories=["thermal hydraulics"]) as pb:
pb.defParam(
"THcoolantOutletT",
units=units.DEGC,
description="The nominal average bulk coolant outlet temperature out of the block.",
categories=["broadcast"],
)
with pDefs.createBuilder() as pb:
pb.defParam(
"type",
units=units.UNITLESS,
description="The name of the assembly input on the blueprints input",
default="defaultAssemType",
saveToDB=True,
)
pb.defParam(
"ringPosHist",
units=units.UNITLESS,
description=(
"Ring and position history for this assembly written at BOC. Index 1 corresponds to position at BOC1."
),
default=None,
saveToDB=True,
)
pb.defParam(
"nozzleType",
units=units.UNITLESS,
description="nozzle type for assembly",
default="Default",
saveToDB=True,
categories=[parameters.Category.assignInBlueprints],
)
with pDefs.createBuilder(default=0.0) as pb:
pb.defParam("assemNum", units=units.UNITLESS, description="Assembly number")
pb.defParam(
"dischargeTime",
units=units.YEARS,
description="Time the Assembly was removed from the Reactor.",
)
pb.defParam(
"hotChannelFactors",
units=units.UNITLESS,
description="Definition of set of HCFs to be applied to assembly.",
default="Default",
saveToDB=True,
categories=[parameters.Category.assignInBlueprints],
)
with pDefs.createBuilder(categories=["radialGeometry"]) as pb:
pb.defParam(
"AziMesh",
units=units.UNITLESS,
description="Number of points in the Azimuthal mesh.",
saveToDB=False,
default=1,
)
pb.defParam(
"RadMesh",
units=units.UNITLESS,
description="Number of points in the Radial mesh.",
saveToDB=False,
default=1,
)
return pDefs
================================================
FILE: armi/reactor/blockParameters.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameter definitions for Blocks."""
from armi import runLog
from armi.physics.neutronics import crossSectionGroupManager
from armi.reactor import parameters
from armi.reactor.parameters import ParamLocation
from armi.reactor.parameters.parameterDefinitions import isNumpyArray
from armi.utils import units
from armi.utils.units import ASCII_LETTER_A, ASCII_LETTER_Z, ASCII_LETTER_a
def getBlockParameterDefinitions():
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb:
pb.defParam(
"orientation",
units=units.DEGREES,
description=(
"Triple representing rotations counterclockwise around each spatial axis. For "
"example, a hex assembly rotated by 1/6th has orientation (0, 0, 60.0)"
),
default=None,
)
pb.defParam(
"detailedNDens",
setter=isNumpyArray("detailedNDens"),
units=f"atoms/(bn*{units.CM})",
description=(
"High-fidelity number density vector with up to thousands of nuclides. "
"Used in high-fi depletion runs where low-fi depletion may also be occurring. "
"This param keeps the hi-fi and low-fi depletion values from interfering."
),
location=ParamLocation.AVERAGE,
saveToDB=False,
default=None,
)
with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE, categories=["depletion"]) as pb:
pb.defParam(
"newDPA",
units=units.DPA,
description="Dose in DPA accrued during the current time step",
)
pb.defParam(
"percentBu",
units=units.PERCENT_FIMA,
description="Percentage of the initial heavy metal atoms that have been fissioned",
categories=["cumulative"],
)
pb.defParam(
"percentBuByPin",
units=units.PERCENT_FIMA,
description="Percent burnup of the initial heavy metal atoms that have been fissioned for each pin",
default=None,
saveToDB=False,
location=ParamLocation.CHILDREN,
)
pb.defParam(
"residence",
units=units.DAYS,
description=(
"Duration that a block has been in the core multiplied by the fraction "
"of full power generated in that time."
),
categories=["cumulative"],
)
with pDefs.createBuilder(default=0.0, location=ParamLocation.VOLUME_INTEGRATED, categories=["depletion"]) as pb:
pb.defParam(
"molesHmNow",
units=f"{units.MOLES}",
description="Total number of atoms of heavy metal",
)
pb.defParam(
"molesHmBOL",
units=f"{units.MOLES}",
description="Total number of atoms of heavy metal at BOL.",
)
pb.defParam(
"massHmBOL",
units=units.GRAMS,
description="Mass of heavy metal at BOL",
)
pb.defParam(
"initialB10ComponentVol",
units=f"{units.CM}^3",
description=(
"cc's of un-irradiated, cold B10 containing component (includes full volume of any components with B10)"
),
)
with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE) as pb:
def envGroup(self, envGroupChar):
if isinstance(envGroupChar, (int, float)):
intValue = int(envGroupChar)
runLog.warning(
f"Attempting to set `b.p.envGroup` to int value ({envGroupChar})."
"Possibly loading from old database",
single=True,
label="env group as int " + str(intValue),
)
self.envGroupNum = intValue
return
elif not isinstance(envGroupChar, str):
raise Exception(f"Wrong type for envGroupChar {envGroupChar}: {type(envGroupChar)}")
if envGroupChar.islower():
# if lower case find the distance from lowercase a and add the span of A to Z
lowerCaseOffset = ASCII_LETTER_Z - ASCII_LETTER_A + 1 # 26
envGroupNum = ord(envGroupChar) - ASCII_LETTER_a + lowerCaseOffset
else:
envGroupNum = ord(envGroupChar) - ASCII_LETTER_A
self._p_envGroup = envGroupChar
self._p_envGroupNum = envGroupNum
envGroupNumDef = parameters.ALL_DEFINITIONS["envGroupNum"]
envGroupNumDef.assigned = parameters.SINCE_ANYTHING
pb.defParam(
"envGroup",
units=units.UNITLESS,
description="The environment group letter of this block",
default="A",
setter=envGroup,
)
def envGroupNum(self, envGroupNum):
# support capital and lowercase alpha chars (52= 26*2)
if envGroupNum > 52:
raise RuntimeError("Invalid env group number ({}): too many groups. 52 is the max.".format(envGroupNum))
self._p_envGroupNum = envGroupNum
lowerCaseOffset = ASCII_LETTER_Z - ASCII_LETTER_A
if envGroupNum > lowerCaseOffset:
envGroupNum = envGroupNum - (lowerCaseOffset + 1)
self._p_envGroup = chr(envGroupNum + ASCII_LETTER_a)
else:
self._p_envGroup = chr(envGroupNum + ASCII_LETTER_A)
envGroupDef = parameters.ALL_DEFINITIONS["envGroup"]
envGroupDef.assigned = parameters.SINCE_ANYTHING
pb.defParam(
"envGroupNum",
units=units.UNITLESS,
description="An integer representation of the environment group "
"(burnup/temperature/etc.). linked to envGroup.",
default=0,
setter=envGroupNum,
)
pb.defParam(
"buRate",
units=f"{units.PERCENT_FIMA}/{units.DAYS}",
# This is very related to power, but normalized to %FIMA.
description=(
"Current rate of burnup accumulation. Useful for estimating times when burnup limits may be exceeded."
),
)
pb.defParam(
"buRatePeak",
units=f"{units.PERCENT_FIMA}/{units.DAYS}",
description="Current rate of burnup accumulation at peak location",
location=ParamLocation.MAX,
)
pb.defParam(
"detailedDpa",
units=units.DPA,
description="displacements per atom",
categories=["cumulative", "detailedAxialExpansion", "depletion"],
)
pb.defParam(
"detailedDpaPeak",
units=units.DPA,
description="displacements per atom with peaking factor",
categories=["cumulative", "detailedAxialExpansion", "depletion"],
location=ParamLocation.MAX,
)
pb.defParam(
"detailedDpaRate",
units=f"{units.DPA}/{units.SECONDS}",
description="Current time derivative of average detailed DPA",
categories=["detailedAxialExpansion", "depletion"],
)
pb.defParam(
"displacementX",
units=units.METERS,
description="Assembly displacement in the x direction",
)
pb.defParam(
"displacementY",
units=units.METERS,
description="Assembly displacement in the y direction",
)
pb.defParam(
"heliumInB4C",
units=f"He/{units.SECONDS}/{units.CM}^3",
description="Alpha particle production rate in B4C control and shield material.",
location=ParamLocation.AVERAGE,
)
pb.defParam(
"powerRx",
units=f"{units.WATTS}/{units.CM}^3",
description="Power density of the reactor",
location=ParamLocation.AVERAGE,
)
pb.defParam(
"timeToLimit",
units=units.DAYS,
description="Time unit block violates its burnup limit.",
)
pb.defParam(
"zbottom",
units=units.CM,
description="Axial position of the bottom of this block",
categories=[parameters.Category.retainOnReplacement],
)
pb.defParam(
"ztop",
units=units.CM,
description="Axial position of the top of this block",
categories=[parameters.Category.retainOnReplacement],
)
pb.defParam(
"nHMAtBOL",
units=f"atoms/(bn*{units.CM})",
description="Ndens of heavy metal at BOL",
saveToDB=False,
)
pb.defParam(
"z",
units=units.CM,
description="Center axial dimension of this block",
categories=[parameters.Category.retainOnReplacement],
)
with pDefs.createBuilder() as pb:
pb.defParam(
"axialExpTargetComponent",
units=units.UNITLESS,
description=(
"The name of the target component used for axial expansion and contraction of solid components."
),
default="",
saveToDB=True,
)
pb.defParam(
"topIndex",
units=units.UNITLESS,
description=(
"the axial block index within its parent assembly (0 is bottom block). This index with regard to the "
"mesh of the reference assembly so it does not increase by 1 for each block. It is used to keep the "
"mesh of the assemblies uniform with axial expansion. See setBlockMesh, makeAxialSnapList",
),
default=0,
saveToDB=True,
categories=[parameters.Category.retainOnReplacement],
)
pb.defParam(
"eqRegion",
units=units.UNITLESS,
description="Equilibrium shuffling region. Corresponds to how many full cycles fuel here has gone through.",
default=0.0,
)
pb.defParam(
"id",
units=units.UNITLESS,
description="Inner diameter of the Block.",
default=None,
)
pb.defParam(
"height",
units=units.CM,
description="the block height",
default=None,
categories=[parameters.Category.retainOnReplacement],
)
def xsType(self, value):
self._p_xsType = value
self._p_xsTypeNum = crossSectionGroupManager.getXSTypeNumberFromLabel(value)
xsTypeNumDef = parameters.ALL_DEFINITIONS["xsTypeNum"]
xsTypeNumDef.assigned = parameters.SINCE_ANYTHING
pb.defParam(
"xsType",
units=units.UNITLESS,
description="The xs group letter of this block",
default="A",
setter=xsType,
)
def xsTypeNum(self, value):
self._p_xsTypeNum = value
self._p_xsType = crossSectionGroupManager.getXSTypeLabelFromNumber(value)
xsTypeDef = parameters.ALL_DEFINITIONS["xsType"]
xsTypeDef.assigned = parameters.SINCE_ANYTHING
pb.defParam(
"xsTypeNum",
units=units.UNITLESS,
description="An integer representation of the cross section type, linked to xsType.",
default=65, # NOTE: buGroupNum actually starts at 0
setter=xsTypeNum,
)
pb.defParam(
"type",
units=units.UNITLESS,
description="string name of the input block",
default="defaultType",
saveToDB=True,
)
with pDefs.createBuilder(default=0.0) as pb:
pb.defParam(
"assemNum",
units=units.UNITLESS,
description="Index that refers, nominally, to the assemNum parameter of the containing "
"Assembly object. This is stored on the Block to aid in visualizing shuffle patterns "
"and the like, and should not be used within the code. These are not guaranteed to be "
"consistent with the containing Assembly, so they should not be used as a reliable "
"means to reconstruct the model.",
categories=[parameters.Category.retainOnReplacement],
)
pb.defParam(
"breedRatio",
units=units.UNITLESS,
description="Breeding ratio",
categories=["detailedAxialExpansion"],
location=ParamLocation.AVERAGE,
)
pb.defParam("buLimit", units=units.PERCENT_FIMA, description="Burnup limit")
pb.defParam(
"heightBOL",
units=units.CM,
description="As-fabricated height of this block (as input). Used in fuel performance. Should be constant.",
location=ParamLocation.AVERAGE,
categories=[parameters.Category.retainOnReplacement],
)
pb.defParam(
"intrinsicSource",
units=units.UNITLESS,
description="Intrinsic neutron source from spontaneous fissions before a decay period",
location=ParamLocation.AVERAGE,
)
pb.defParam(
"kgFis",
units=units.KG,
description="Mass of fissile material in block",
location=ParamLocation.VOLUME_INTEGRATED,
)
pb.defParam(
"kgHM",
units=units.KG,
description="Mass of heavy metal in block",
location=ParamLocation.VOLUME_INTEGRATED,
)
pb.defParam("nPins", units=units.UNITLESS, description="Number of pins")
pb.defParam(
"percentBuPeak",
units=units.PERCENT_FIMA,
description="Peak percentage of the initial heavy metal atoms that have been fissioned",
location=ParamLocation.MAX,
categories=["cumulative", "eq cumulative shift"],
)
pb.defParam(
"puFrac",
units=units.UNITLESS,
description="Current Pu number density relative to HM at BOL",
location=ParamLocation.AVERAGE,
)
pb.defParam(
"smearDensity",
units=units.UNITLESS,
description=(
"Smear density of fuel pins in this block. Defined as the ratio of fuel "
"area to total space inside cladding."
),
location=ParamLocation.AVERAGE,
)
return pDefs
================================================
FILE: armi/reactor/blocks/__init__.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ARMI provides several different Block types for downstream users.
The generic Block is meant to be a base class. And then ARMI provides different geometries that might be interesting or
useful, such as hexagonal or cartesian blocks.
ARMI encourages you to build your own subclass of an ARMI Block type, to simplify your reactor blueprints.
"""
# ruff: noqa: F401
from armi.reactor.blocks.block import PIN_COMPONENTS, Block
from armi.reactor.blocks.cartesianBlock import CartesianBlock
from armi.reactor.blocks.hexBlock import HexBlock
from armi.reactor.blocks.thRZBlock import ThRZBlock
================================================
FILE: armi/reactor/blocks/block.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The generic Block base class. This is meant to be the basis of all Blocks you use in your modeling. ARMI encourages you
to build your own subclass of an ARMI Block type, to simplify your reactor blueprints.
Blocks are axial chunks of assemblies. They contain most of the state variables, including power, flux, and homogenized
number densities. Blocks are further divided into components.
"""
import collections
import copy
import math
from typing import ClassVar, Optional, Tuple, Type
import numpy as np
from armi import runLog
from armi.bookkeeping import report
from armi.nuclearDataIO import xsCollections
from armi.reactor import (
blockParameters,
components,
composites,
grids,
parameters,
)
from armi.reactor.components import basicShapes
from armi.reactor.flags import Flags
from armi.utils import densityTools, units
from armi.utils.plotting import plotBlockFlux
from armi.utils.units import TRACE_NUMBER_DENSITY
PIN_COMPONENTS = [
Flags.CONTROL,
Flags.PLENUM,
Flags.SHIELD,
Flags.FUEL,
Flags.CLAD,
Flags.PIN,
Flags.WIRE,
]
_PitchDefiningComponent = Optional[Tuple[Type[components.Component], ...]]
class Block(composites.Composite):
"""
An axial slice of an assembly.
Blocks are Composite objects with extra parameter bindings, and utility methods that let them
play nicely with their containing Assembly.
"""
uniqID = 0
# dimension used to determine which component defines the block's pitch
PITCH_DIMENSION = "op"
# component type that can be considered a candidate for providing pitch
PITCH_COMPONENT_TYPE: ClassVar[_PitchDefiningComponent] = None
pDefs = blockParameters.getBlockParameterDefinitions()
def __init__(self, name: str, height: float = 1.0):
"""
Builds a new ARMI block.
name : str
The name of this block
height : float, optional
The height of the block in cm. Defaults to 1.0 so that ``getVolume`` assumes unit height.
"""
composites.Composite.__init__(self, name)
self.p.height = height
self.p.heightBOL = height
self.p.orientation = np.array((0.0, 0.0, 0.0))
self.points = []
self.macros = None
# flag to indicated when DerivedShape children must be updated.
self.derivedMustUpdate = False
# which component to use to determine block pitch, along with its 'op'
self._pitchDefiningComponent = (None, 0.0)
# Manually set some parameters at BOL
for problemParam in ["THcornTemp", "THedgeTemp"]:
self.p[problemParam] = []
def __repr__(self):
# be warned, changing this might break unit tests on input file generations
return "<{type} {name} at {loc} XS: {xs} ENV GP: {env}>".format(
type=self.getType(),
name=self.getName(),
xs=self.p.xsType,
env=self.p.envGroup,
loc=self.getLocation(),
)
def __deepcopy__(self, memo):
"""
Custom deepcopy behavior to prevent duplication of macros and _lumpedFissionProducts.
We detach the recursive links to the parent and the reactor to prevent blocks carrying large
independent copies of stale reactors in memory. If you make a new block, you must add it to
an assembly and a reactor.
"""
# add self to memo to prevent child objects from duplicating the parent block
memo[id(self)] = b = self.__class__.__new__(self.__class__)
# use __getstate__ and __setstate__ pickle-methods to initialize
state = self.__getstate__() # __getstate__ removes parent
del state["macros"]
del state["_lumpedFissionProducts"]
b.__setstate__(copy.deepcopy(state, memo))
# assign macros and LFP
b.macros = self.macros
b._lumpedFissionProducts = self._lumpedFissionProducts
return b
def createHomogenizedCopy(self, pinSpatialLocators=False):
"""
Create a copy of a block.
Notes
-----
Used to implement a copy function for specific block types that can be much faster than a
deepcopy by glossing over details that may be unnecessary in certain contexts.
This base class implementation is just a deepcopy of the block, in full detail (not
homogenized).
"""
return copy.deepcopy(self)
@property
def core(self):
from armi.reactor.reactors import Core
c = self.getAncestor(lambda c: isinstance(c, Core))
return c
def makeName(self, assemNum, axialIndex):
"""
Generate a standard block from assembly number.
This also sets the block-level assembly-num param.
Once, we used a axial-character suffix to represent the axial index, but this is inherently limited so we
switched to a numerical name. The axial suffix needs can be brought in to plugins that require them.
Examples
--------
>>> makeName(120, 5)
'B0120-005'
"""
self.p.assemNum = assemNum
return "B{0:04d}-{1:03d}".format(assemNum, axialIndex)
def getSmearDensity(self, cold=True):
"""
Compute the smear density of pins in this block.
Smear density is the area of the fuel divided by the area of the space available for fuel inside the cladding.
Other space filled with solid materials is not considered available. If all the area is fuel, it has 100% smear
density. Lower smear density allows more room for swelling.
Warning
-------
This requires circular fuel and circular cladding. Designs that vary from this will be wrong. It may make sense
in the future to put this somewhere a bit more design specific.
Notes
-----
This only considers circular objects. If you have a cladding that is not a circle, it will be ignored.
Negative areas can exist for void gaps in the fuel pin. A negative area in a gap represents overlap area between
two solid components. To account for this additional space within the pin cladding the abs(negativeArea) is
added to the inner cladding area.
Parameters
----------
cold : bool, optional
If false, returns the smear density at hot temperatures
Returns
-------
float
The smear density as a fraction.
"""
fuels = self.getComponents(Flags.FUEL)
if not fuels:
# smear density is not computed for non-fuel blocks
return 0.0
elif not self.getNumPins():
# smear density is only defined for pinned blocks
return 0.0
circles = self.getComponentsOfShape(components.Circle)
if not circles:
raise ValueError(f"Cannot get smear density of {self}. There are no circular components.")
clads = set(self.getComponents(Flags.CLAD)).intersection(set(circles))
if not clads:
raise ValueError(f"Cannot get smear density of {self}. There are no clad components.")
# Compute component areas
innerCladdingArea = sum(
math.pi * clad.getDimension("id", cold=cold) ** 2 / 4.0 * clad.getDimension("mult") for clad in clads
)
sortedClads = sorted(clads)
sortedCompsInsideClad = self.getSortedComponentsInsideOfComponent(sortedClads.pop())
return self.computeSmearDensity(innerCladdingArea, sortedCompsInsideClad, cold)
@staticmethod
def computeSmearDensity(innerCladdingArea: float, sortedCompsInsideClad: list[components.Component], cold: bool):
"""Compute the smear density for a sorted list of components.
Parameters
----------
innerCladdingArea : float
Circular area inside the cladding.
sortedCompsInsideClad : list
A sorted list of Components inside the cladding.
cold : bool
If false, returns the smear density at hot temperatures
Returns
-------
float
The smear density as a fraction.
"""
fuelComponentArea = 0.0
unmovableComponentArea = 0.0
negativeArea = 0.0
for c in sortedCompsInsideClad:
componentArea = c.getArea(cold=cold)
if c.isFuel():
fuelComponentArea += componentArea
elif c.hasFlags(Flags.CLAD):
# this is another component's clad; don't count it towards unmoveable area
pass
elif c.hasFlags([Flags.SLUG, Flags.DUMMY]):
# this flag designates that this clad/slug combination isn't fuel and shouldn't be in the average
pass
else:
if c.containsSolidMaterial():
unmovableComponentArea += componentArea
elif c.containsVoidMaterial() and componentArea < 0.0:
if cold: # will error out soon
runLog.error(
"{} with id {} and od {} has negative area at cold dimensions".format(
c,
c.getDimension("id", cold=True),
c.getDimension("od", cold=True),
)
)
negativeArea += abs(componentArea)
if cold and negativeArea:
raise ValueError(
"Negative component areas found. Check the cold dimensions are properly aligned and no components "
"overlap."
)
innerCladdingArea += negativeArea # See note 2 of self.getSmearDensity
totalMovableArea = innerCladdingArea - unmovableComponentArea
if totalMovableArea <= 0.0:
return 0.0
else:
return fuelComponentArea / totalMovableArea
def autoCreateSpatialGrids(self, systemSpatialGrid=None):
"""
Creates a spatialGrid for a Block.
Blocks do not always have a spatialGrid from Blueprints, but some Blocks can have their
spatialGrids inferred based on the multiplicity of their components. This would add the
ability to create a spatialGrid for a Block and give its children the corresponding
spatialLocators if certain conditions are met.
Parameters
----------
systemSpatialGrid : Grid, optional
Spatial Grid of the system-level parent of this Assembly that contains this Block.
Raises
------
ValueError
If the multiplicities of the block are not only 1 or N or if generated ringNumber leads
to more positions than necessary.
"""
if self.spatialGrid is None:
self.spatialGrid = systemSpatialGrid
def assignPinIndices(self):
pass
def getMgFlux(self, adjoint=False, average=False, gamma=False):
"""
Returns the multigroup neutron flux in [n/cm^2/s].
The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as
set in the ISOTXS library.
It is stored integrated over volume on self.p.mgFlux
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
average : bool, optional
If true, will return average flux between latest and previous. Doesn't work for pin detailed yet.
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
flux : multigroup neutron flux in [n/cm^2/s]
"""
flux = composites.ArmiObject.getMgFlux(self, adjoint=adjoint, average=False, gamma=gamma)
if average and np.any(self.p.lastMgFlux):
volume = self.getVolume()
lastFlux = self.p.lastMgFlux / volume
flux = (flux + lastFlux) / 2.0
return flux
def setPinMgFluxes(self, fluxes, adjoint=False, gamma=False):
"""
Store the pin-detailed multi-group neutron flux.
Parameters
----------
fluxes : np.ndarray
The block-level pin multigroup fluxes. ``fluxes[i, g]`` represents the flux in group g for
pin ``i`` located at ``self.getPinLocations()[i]``. Flux units are the standard n/cm^2/s.
adjoint : bool, optional
Whether to set real or adjoint data.
gamma : bool, optional
Whether to set gamma or neutron data.
"""
if gamma:
if adjoint:
raise ValueError("Adjoint gamma flux is currently unsupported.")
else:
self.p.pinMgFluxesGamma = fluxes
else:
if adjoint:
self.p.pinMgFluxesAdj = fluxes
else:
self.p.pinMgFluxes = fluxes
def getMicroSuffix(self):
"""
Returns the microscopic library suffix (e.g. 'AB') for this block.
DIF3D and MC2 are limited to 6 character nuclide labels. ARMI by convention uses the first 4
for nuclide name (e.g. U235, PU39, etc.) and then uses the 5th character for cross-section
type and the 6th for burnup group. This allows a variety of XS sets to be built modeling
substantially different blocks.
Notes
-----
The single-letter use for xsType and envGroup limit users to 52 groups of each. ARMI will
allow 2-letter xsType designations if and only if the `envGroup` setting has length 1 (i.e.
no burnup/temp groups are defined). This is useful for high-fidelity XS modeling.
"""
env = self.p.envGroup
if not env:
raise RuntimeError(
"Cannot get MicroXS suffix because {0} in {1} does not have a environment(env) group".format(
self, self.parent
)
)
xsType = self.p.xsType
if len(xsType) == 1:
return xsType + env
elif len(xsType) == 2 and ord(env) != ord("A"):
# default is "A" so if we got an off default 2 char, there is no way to resolve.
raise ValueError("Use of non-default env groups is not allowed with multi-character xs groups!")
else:
# ignore env group, multi Char XS type to support assigning 2 chars in blueprints
return xsType
def getHeight(self):
"""Return the block height."""
return self.p.height
def setHeight(self, modifiedHeight, conserveMass=False, adjustList=None):
"""
Set a new height of the block.
Parameters
----------
modifiedHeight : float
The height of the block in cm
conserveMass : bool, optional
Conserve mass of nuclides in ``adjustList``.
adjustList : list, optional
Nuclides that will be conserved in conserving mass in the block. It is recommended to
pass a list of all nuclides in the block.
Notes
-----
There is a coupling between block heights, the parent assembly axial mesh, and the
ztop/zbottom/z params of the sibling blocks. When you set a height, all those things are
invalidated. Thus, this method has to go through and update them via
``parent.calculateZCoords``.
See Also
--------
armi.reactor.reactors.Core.updateAxialMesh
May need to be called after this.
armi.reactor.assemblies.Assembly.calculateZCoords
Recalculates z-coords, automatically called by this.
"""
originalHeight = self.getHeight() # get before modifying
if modifiedHeight < 0.0:
raise ValueError(f"Cannot set height of block {self} to height of {modifiedHeight} cm")
self.p.height = modifiedHeight
self.clearCache()
if conserveMass:
if originalHeight != modifiedHeight:
if not adjustList:
raise ValueError("Nuclides in ``adjustList`` must be provided to conserve mass.")
self.adjustDensity(originalHeight / modifiedHeight, adjustList)
if self.parent:
self.parent.calculateZCoords()
def getWettedPerimeter(self):
raise NotImplementedError
def getFlowAreaPerPin(self):
"""
Return the flowing coolant area of the block in cm^2, normalized to the number of pins in the block.
NumPins looks for max number of fuel, clad, control, etc.
See Also
--------
armi.reactor.blocks.Block.getNumPins
figures out numPins
"""
numPins = self.getNumPins()
try:
return self.getComponent(Flags.COOLANT, exact=True).getArea() / numPins
except ZeroDivisionError:
raise ZeroDivisionError(
f"Block {self} has 0 pins (fuel, clad, control, shield, etc.). Thus, its flow area "
"per pin is undefined."
)
def getHydraulicDiameter(self):
raise NotImplementedError
def adjustUEnrich(self, newEnrich):
"""
Adjust U-235/U-238 mass ratio to a mass enrichment.
Parameters
----------
newEnrich : float
New U-235 enrichment in mass fraction
Notes
-----
completeInitialLoading must be run because adjusting the enrichment actually changes the
mass slightly and you can get negative burnups, which you do not want.
"""
fuels = self.getChildrenWithFlags(Flags.FUEL)
if fuels:
for fuel in fuels:
fuel.adjustMassEnrichment(newEnrich)
else:
# no fuel in this block
tU = self.getNumberDensity("U235") + self.getNumberDensity("U238")
if tU:
self.setNumberDensity("U235", tU * newEnrich)
self.setNumberDensity("U238", tU * (1.0 - newEnrich))
self.completeInitialLoading()
def getLocation(self):
"""Return a string representation of the location.
.. impl:: Location of a block is retrievable.
:id: I_ARMI_BLOCK_POSI0
:implements: R_ARMI_BLOCK_POSI
If the block does not have its ``core`` attribute set, if the block's parent does not
have a ``spatialGrid`` attribute, or if the block does not have its location defined by
its ``spatialLocator`` attribute, return a string indicating that it is outside of the
core.
Otherwise, use the :py:class:`~armi.reactor.grids.Grid.getLabel` static method to
convert the block's indices into a string like "XXX-YYY-ZZZ". For hexagonal geometry,
"XXX" is the zero-padded hexagonal core ring, "YYY" is the zero-padded position in that
ring, and "ZZZ" is the zero-padded block axial index from the bottom of the core.
"""
if self.core and self.parent.spatialGrid and self.spatialLocator:
return self.core.spatialGrid.getLabel(self.spatialLocator.getCompleteIndices())
else:
return "ExCore"
def coords(self):
"""
Returns the coordinates of the block.
.. impl:: Coordinates of a block are queryable.
:id: I_ARMI_BLOCK_POSI1
:implements: R_ARMI_BLOCK_POSI
Calls to the :py:meth:`~armi.reactor.grids.locations.IndexLocation.getGlobalCoordinates`
method of the block's ``spatialLocator`` attribute, which recursively calls itself on
all parents of the block to get the coordinates of the block's centroid in 3D cartesian
space.
"""
return self.spatialLocator.getGlobalCoordinates()
def setBuLimitInfo(self):
"""Sets burnup limit based on igniter, feed, etc."""
if self.p.buRate == 0:
# might be cycle 1 or a non-burning block
self.p.timeToLimit = 0.0
else:
timeLimit = (self.p.buLimit - self.p.percentBu) / self.p.buRate + self.p.residence
self.p.timeToLimit = (timeLimit - self.p.residence) / units.DAYS_PER_YEAR
def getMaxArea(self):
raise NotImplementedError
def getArea(self, cold=False):
"""
Return the area of a block for a full core or a 1/3 core model.
Area is consistent with the area in the model, so if you have a central assembly in a 1/3
symmetric model, this will return 1/3 of the total area of the physical assembly. This way,
if you take the sum of the areas in the core (or count the atoms in the core, etc.), you
will have the proper number after multiplying by the model symmetry.
Parameters
----------
cold : bool
flag to indicate that cold (as input) dimensions are required
Notes
-----
This might not work for a 1/6 core model (due to symmetry line issues).
Returns
-------
area : float (cm^2)
See Also
--------
armi.reactor.blocks.Block.getMaxArea
return the full area of the physical assembly disregarding model symmetry
"""
# this caching requires that you clear the cache every time you adjust anything including
# temperature and dimensions.
area = self._getCached("area")
if area:
return area
a = 0.0
for c in self:
myArea = c.getArea(cold=cold)
a += myArea
fullArea = a
# correct the fullHexArea by the symmetry factor this factor determines if the hex has been
# clipped by symmetry lines
area = fullArea / self.getSymmetryFactor()
self._setCache("area", area)
return area
def getVolume(self):
"""
Return the volume of a block.
Returns
-------
volume : float
Block or component volume in cm^3
"""
# use symmetryFactor in case the assembly is sitting on a boundary and needs to be cut in half, etc.
vol = sum(c.getVolume() for c in self)
return vol / self.getSymmetryFactor()
def getSymmetryFactor(self):
"""
Return a scaling factor due to symmetry on the area of the block or its components.
Takes into account assemblies that are bisected or trisected by symmetry lines
In 1/3 symmetric cases, the central assembly is 1/3 a full area. If edge assemblies are
included in a model, the symmetry factor along both edges for overhanging assemblies should
be 2.0. However, ARMI runs in most scenarios with those assemblies on the 120-edge removed,
so the symmetry factor should generally be just 1.0.
See Also
--------
armi.reactor.converters.geometryConverter.EdgeAssemblyChanger.scaleParamsRelatedToSymmetry
"""
return 1.0
def adjustDensity(self, frac, adjustList, returnMass=False):
"""
Adjusts the total density of each nuclide in adjustList by frac.
Parameters
----------
frac : float
The fraction of the current density that will remain after this operation
adjustList : list
List of nuclide names that will be adjusted.
returnMass : bool
If true, will return mass difference.
Returns
-------
mass : float
Mass difference in grams. If you subtract mass, mass will be negative.
If returnMass is False (default), this will always be zero.
"""
self._updateDetailedNdens(frac, adjustList)
mass = 0.0
if returnMass:
# do this with a flag to enable faster operation when mass is not needed.
volume = self.getVolume()
numDensities = self.getNuclideNumberDensities(adjustList)
for nuclideName, dens in zip(adjustList, numDensities):
if not dens:
# don't modify zeros.
continue
newDens = dens * frac
# add a little so components remember
self.setNumberDensity(nuclideName, newDens + TRACE_NUMBER_DENSITY)
if returnMass:
mass += densityTools.getMassInGrams(nuclideName, volume, newDens - dens)
return mass
def _updateDetailedNdens(self, frac, adjustList):
"""
Update detailed number density which is used by hi-fi depleters such as ORIGEN.
Notes
-----
This will perturb all number densities so it is assumed that if one of the active densities
is perturbed, all of htem are perturbed.
"""
if self.p.detailedNDens is None:
# BOL assems get expanded to a reference so the first check is needed so it won't call
# .blueprints on None since BOL assems don't have a core/r
return
if any(nuc in self.core.r.blueprints.activeNuclides for nuc in adjustList):
self.p.detailedNDens *= frac
# Other power densities do not need to be updated as they are calculated in the global
# flux interface, which occurs after axial expansion on the interface stack.
self.p.pdensDecay *= frac
def completeInitialLoading(self, bolBlock=None):
"""
Does some BOL bookkeeping to track things like BOL HM density for burnup tracking.
This should run after this block is loaded up at BOC (called from Reactor.initialLoading).
The original purpose of this was to get the moles HM at BOC for the moles Pu/moles HM at BOL
calculation.
This also must be called after modifying something like the smear density or zr fraction in
an optimization case. In ECPT cases, a BOL block must be passed or else the burnup will try
to get based on a pre-burned value.
Parameters
----------
bolBlock : Block, optional
A BOL-state block of this block type, required for perturbed equilibrium cases.
Must have the same enrichment as this block!
Returns
-------
hmDens : float
The heavy metal number density of this block.
See Also
--------
Reactor.importGeom
depletion._updateBlockParametersAfterDepletion
"""
if bolBlock is None:
bolBlock = self
hmDens = bolBlock.getHMDens() # total homogenized heavy metal number density
self.p.nHMAtBOL = hmDens
self.p.molesHmBOL = self.getHMMoles()
self.p.puFrac = self.getPuMoles() / self.p.molesHmBOL if self.p.molesHmBOL > 0.0 else 0.0
try:
# non-pinned reactors (or ones without cladding) will not use smear density
self.p.smearDensity = self.getSmearDensity()
except ValueError:
pass
self.p.enrichmentBOL = self.getFissileMassEnrich()
massHmBOL = 0.0
for child in self:
hmMass = child.getHMMass()
massHmBOL += hmMass
# Components have the following parameters but not every composite will massHmBOL,
# molesHmBOL, puFrac, enrichmentBOL
if isinstance(child, components.Component):
child.p.massHmBOL = hmMass
child.p.molesHmBOL = child.getHMMoles()
if child.p.molesHmBOL:
child.p.enrichmentBOL = child.getFissileMassEnrich()
self.p.massHmBOL = massHmBOL
return hmDens
def setB10VolParam(self, heightHot):
"""
Set the b.p.initialB10ComponentVol param according to the volume of boron-10 containing components.
Parameters
----------
heightHot : Boolean
True if self.height() is cold height
"""
# exclude fuel components since they could have slight B10 impurity and
# this metric is not relevant for fuel.
b10Comps = [c for c in self if c.getNumberDensity("B10") and not c.isFuel()]
if not b10Comps:
return
# get the highest density comp dont want to sum all because some comps might have very small
# impurities of boron and adding this volume won't be conservative for captures per cc.
b10Comp = sorted(b10Comps, key=lambda x: x.getNumberDensity("B10"))[-1]
if len(b10Comps) > 1:
runLog.warning(
f"More than one boron10-containing component found in {self.name}. Only {b10Comp} "
f"will be considered for calculation of initialB10ComponentVol Since adding "
f"multiple volumes is not conservative for captures. All compos found {b10Comps}",
single=True,
)
if self.isFuel():
runLog.warning(
f"{self.name} has both fuel and initial b10. b10 volume may not be conserved with axial expansion.",
single=True,
)
# calc volume of boron components
coldArea = b10Comp.getArea(cold=True)
coldFactor = b10Comp.getThermalExpansionFactor() if heightHot else 1
coldHeight = self.getHeight() / coldFactor
self.p.initialB10ComponentVol = coldArea * coldHeight
def replaceBlockWithBlock(self, bReplacement):
"""
Replace the current block with the replacementBlock.
Typically used in the insertion of control rods.
"""
paramsToSkip = set(self.p.paramDefs.inCategory(parameters.Category.retainOnReplacement).names)
tempBlock = copy.deepcopy(bReplacement)
oldParams = self.p
newParams = self.p = tempBlock.p
for paramName in paramsToSkip:
newParams[paramName] = oldParams[paramName]
# update synchronization information
self.p.assigned = parameters.SINCE_ANYTHING
paramDefs = self.p.paramDefs
for paramName in set(newParams.keys()) - paramsToSkip:
paramDefs[paramName].assigned = parameters.SINCE_ANYTHING
newComponents = tempBlock.getChildren()
self.setChildren(newComponents)
self.clearCache()
@staticmethod
def plotFlux(core, fName=None, bList=None, peak=False, adjoint=False, bList2=[]):
"""A simple pass-through method to a utils plotting function. This is here to preserve the API."""
plotBlockFlux(core, fName, bList, peak, adjoint, bList2)
def _updatePitchComponent(self, c):
"""
Update the component that defines the pitch.
Given a Component, compare it to the current component that defines the pitch of the Block.
If bigger, replace it. We need different implementations of this to support different logic
for determining the form of pitch and the concept of "larger".
See Also
--------
CartesianBlock._updatePitchComponent
"""
# Some block types don't have a clearly defined pitch (e.g. ThRZ)
if self.PITCH_COMPONENT_TYPE is None:
return
if not isinstance(c, self.PITCH_COMPONENT_TYPE):
return
try:
componentPitch = c.getDimension(self.PITCH_DIMENSION)
except parameters.UnknownParameterError:
# some components dont have the appropriate parameter
return
if componentPitch and (componentPitch > self._pitchDefiningComponent[1]):
self._pitchDefiningComponent = (c, componentPitch)
def add(self, c):
composites.Composite.add(self, c)
self.derivedMustUpdate = True
self.clearCache()
try:
mult = int(c.getDimension("mult"))
if self.p.percentBuByPin is None or len(self.p.percentBuByPin) < mult:
# this may be a little wasteful, but we can fix it later...
self.p.percentBuByPin = [0.0] * mult
except AttributeError:
# maybe adding a Composite of components rather than a single
pass
self._updatePitchComponent(c)
def removeAll(self, recomputeAreaFractions=True):
for c in list(self):
self.remove(c, recomputeAreaFractions=False)
if recomputeAreaFractions: # only do this once
self.getVolumeFractions()
def remove(self, c, recomputeAreaFractions=True):
composites.Composite.remove(self, c)
self.clearCache()
if c is self._pitchDefiningComponent[0]:
self._pitchDefiningComponent = (None, 0.0)
pc = self.getLargestComponent(self.PITCH_DIMENSION)
if pc is not None:
self._updatePitchComponent(pc)
if recomputeAreaFractions:
self.getVolumeFractions()
def getComponentsThatAreLinkedTo(self, comp, dim):
"""
Determine which dimensions of which components are linked to a specific dimension of a
particular component.
Useful for breaking fuel components up into individuals and making sure anything that was
linked to the fuel mult (like the cladding mult) stays correct.
Parameters
----------
comp : Component
The component that the results are linked to
dim : str
The name of the dimension that the results are linked to
Returns
-------
linkedComps : list
A list of (components,dimName) that are linked to this component, dim.
"""
linked = []
for c in self.iterComponents():
for dimName, val in c.p.items():
if c.dimensionIsLinked(dimName):
requiredComponent = val[0]
if requiredComponent is comp and val[1] == dim:
linked.append((c, dimName))
return linked
def getComponentsInLinkedOrder(self, componentList=None):
"""
Return a list of the components in order of their linked-dimension dependencies.
Parameters
----------
components : list, optional
A list of components to consider. If None, this block's components will be used.
Notes
-----
This means that components other components are linked to come first.
"""
if componentList is None:
componentList = self.getComponents()
cList = collections.deque(componentList)
orderedComponents = []
# Loop through the components until there are none left.
counter = 0
while cList:
candidate = cList.popleft() # take first item in list
cleared = True # innocent until proven guilty
# loop through all dimensions in this component to determine its dependencies
for dimName, val in candidate.p.items():
if candidate.dimensionIsLinked(dimName):
# In linked dimensions, val = (component, dimName)
requiredComponent = val[0]
if requiredComponent not in orderedComponents:
# this component depends on one that is not in the ordered list yet.
# do not add it.
cleared = False
break # short circuit. One failed lookup is enough to flag this component as dirty.
if cleared:
# this candidate is free of dependencies and is ready to be added.
orderedComponents.append(candidate)
else:
cList.append(candidate)
counter += 1
if counter > 1000:
cList.append(candidate)
runLog.error(
"The component {0} in {1} contains a dimension that is linked to another component, "
" but the required component is not present in the block. They may also be other dependency fails. "
"The component dims are {2}".format(cList[0], self, cList[0].p)
)
raise RuntimeError("Cannot locate linked component.")
return orderedComponents
def getSortedComponentsInsideOfComponent(self, component):
"""
Returns a list of components inside of the given component sorted from innermost to outermost.
Parameters
----------
component : object
Component to look inside of.
Notes
-----
If you just want sorted components in this block, use ``sorted(self)``. This will never
include any ``DerivedShape`` objects. Since they have a derived area they don't have a well-
defined dimension. For now we just ignore them. If they are desired in the future some
knowledge of their dimension will be required while they are being derived.
"""
sortedComponents = sorted(self)
componentIndex = sortedComponents.index(component)
sortedComponents = sortedComponents[:componentIndex]
return sortedComponents
def getNumPins(self):
"""Return the number of pins in this block.
.. impl:: Get the number of pins in a block.
:id: I_ARMI_BLOCK_NPINS
:implements: R_ARMI_BLOCK_NPINS
Uses some simple criteria to infer the number of pins in the block.
For every flag in the module list :py:data:`~armi.reactor.blocks.PIN_COMPONENTS`, loop
over all components of that type in the block. If the component is an instance of
:py:class:`~armi.reactor.components.basicShapes.Circle`, add its multiplicity to a list,
and sum that list over all components with each given flag.
After looping over all possibilities, return the maximum value returned from the process
above, or if no compatible components were found, return zero.
"""
nPins = [
sum(
[
(int(c.getDimension("mult")) if isinstance(c, basicShapes.Circle) else 0)
for c in self.iterComponents(compType)
]
)
for compType in PIN_COMPONENTS
]
return 0 if not nPins else max(nPins)
def mergeWithBlock(self, otherBlock, fraction):
"""
Turns this block into a mixture of this block and some other block.
Parameters
----------
otherBlock : Block
The block to mix this block with. The other block will not be modified.
fraction : float
Fraction of the other block to mix in with this block. If 0.1 is passed in, this block
will become 90% what it originally was and 10% what the other block is.
Notes
-----
This merges on a high level (using number densities). Components will not be merged.
This is used e.g. for inserting a control block partially to get a very tight criticality
control. In this case, a control block would be merged with a duct block. It is also used
when a control rod is specified as a certain length but that length does not fit exactly
into a full block.
"""
numDensities = self.getNumberDensities()
otherBlockDensities = otherBlock.getNumberDensities()
newDensities = {}
# Make sure to hit all nuclides in union of blocks
for nucName in set(numDensities.keys()).union(otherBlockDensities.keys()):
newDensities[nucName] = (1.0 - fraction) * numDensities.get(
nucName, 0.0
) + fraction * otherBlockDensities.get(nucName, 0.0)
self.setNumberDensities(newDensities)
def getComponentAreaFrac(self, typeSpec):
"""
Returns the area fraction of the specified component(s) among all components in the block.
Parameters
----------
typeSpec : Flags or list of Flags
Component types to look up
Examples
--------
>>> b.getComponentAreaFrac(Flags.CLAD)
0.15
Returns
-------
float
The area fraction of the component.
"""
tFrac = sum(f for (c, f) in self.getVolumeFractions() if c.hasFlags(typeSpec))
if tFrac:
return tFrac
else:
runLog.warning(
f"No component {typeSpec} exists on {self}, so area fraction is zero.",
single=True,
label=f"{typeSpec} areaFrac is zero",
)
return 0.0
def verifyBlockDims(self):
"""Optional dimension checking."""
return
def getDim(self, typeSpec, dimName):
"""
Search through blocks in this assembly and find the first component of compName.
Then, look on that component for dimName.
Parameters
----------
typeSpec : Flags or list of Flags
Component name, e.g. Flags.FUEL, Flags.CLAD, Flags.COOLANT, ...
dimName : str
Dimension name, e.g. 'od', ...
Returns
-------
dimVal : float
The dimension in cm.
Examples
--------
>>> getDim(Flags.WIRE, "od")
0.01
"""
for c in self:
if c.hasFlags(typeSpec):
return c.getDimension(dimName.lower())
raise ValueError(f"Cannot get Dimension because Flag not found: {typeSpec}")
def getPinCenterFlatToFlat(self, cold=False):
"""Return the flat-to-flat distance between the centers of opposing pins in the outermost ring."""
raise NotImplementedError # no geometry can be assumed
def getWireWrapCladGap(self, cold=False):
"""Return the gap between the wire wrap and the clad."""
clad = self.getComponent(Flags.CLAD)
wire = self.getComponent(Flags.WIRE)
wireOuterRadius = wire.getBoundingCircleOuterDiameter(cold=cold) / 2.0
wireInnerRadius = wireOuterRadius - wire.getDimension("od", cold=cold)
cladOuterRadius = clad.getDimension("od", cold=cold) / 2.0
return wireInnerRadius - cladOuterRadius
def getPlenumPin(self):
"""Return the plenum pin if it exists."""
for c in self.iterComponents(Flags.GAP):
if self.isPlenumPin(c):
return c
return None
def isPlenumPin(self, c):
"""Return True if the specified component is a plenum pin."""
# This assumes that anything with the GAP flag will have a valid 'id' dimension.
cIsCenterGapGap = isinstance(c, components.Component) and c.hasFlags(Flags.GAP) and c.getDimension("id") == 0
return self.hasFlags([Flags.PLENUM, Flags.ACLP]) and cIsCenterGapGap
def getPitch(self, returnComp=False):
"""
Return the center-to-center hex pitch of this block.
Parameters
----------
returnComp : bool, optional
If true, will return the component that has the maximum pitch as well
Returns
-------
pitch : float or None
Hex pitch in cm, if well-defined. If there is no clear component for determining pitch, returns None
component : Component or None
Component that has the max pitch, if returnComp == True. If no component is found to define the pitch,
returns None.
Notes
-----
The block stores a reference to the component that defines the pitch, making the assumption that while the
dimensions can change, the component containing the largest dimension will not. This lets us skip the search for
largest component. We still need to ask the largest component for its current dimension in case its temperature
changed, or was otherwise modified.
See Also
--------
setPitch : sets pitch
"""
c, _p = self._pitchDefiningComponent
if c is None:
raise ValueError("{} has no valid pitch defining component".format(self))
# ask component for dimensions, since they could have changed due to temperature
p = c.getPitchData()
return (p, c) if returnComp else p
def hasPinPitch(self):
"""Return True if the block has enough information to calculate pin pitch."""
return self.spatialGrid is not None
def getPinPitch(self, cold=False):
"""
Return sub-block pitch in blocks.
This assumes the spatial grid is defined by unit steps
"""
return self.spatialGrid.pitch
def getDimensions(self, dimension):
"""Return dimensional values of the specified dimension."""
dimVals = set()
for c in self:
try:
dimVal = c.getDimension(dimension)
except parameters.ParameterError:
continue
if dimVal is not None:
dimVals.add(dimVal)
return dimVals
def getLargestComponent(self, dimension):
"""
Find the component with the largest dimension of the specified type.
Parameters
----------
dimension: str
The name of the dimension to find the largest component of.
Returns
-------
largestComponent: armi.reactor.components.Component
The component with the largest dimension of the specified type.
"""
maxDim = -float("inf")
largestComponent = None
for c in self:
try:
dimVal = c.getDimension(dimension)
except parameters.ParameterError:
continue
if dimVal is not None and dimVal > maxDim:
maxDim = dimVal
largestComponent = c
return largestComponent
def setPitch(self, val, updateBolParams=False):
"""
Sets outer pitch to some new value.
This sets the settingPitch and actually sets the dimension of the outer hexagon.
During a load (importGeom), the setDimension doesn't usually do anything except set the
setting See Issue 034
But during a actual case modification (e.g. in an optimization sweep, then the dimension has
to be set as well.
See Also
--------
getPitch : gets the pitch
"""
c, _p = self._pitchDefiningComponent
if c:
c.setDimension("op", val)
self._pitchDefiningComponent = (c, val)
else:
raise RuntimeError("No pitch-defining component on block {}".format(self))
if updateBolParams:
self.completeInitialLoading()
def getMfp(self, gamma=False):
r"""
Calculate the mean free path for neutron or gammas in this block.
.. math::
<\Sigma> = \frac{\sum_E(\phi_e \Sigma_e dE)}{\sum_E (\phi_e dE)} =
\frac{\sum_E(\phi_e N \sum_{\text{type}}(\sigma_e) dE}{\sum_E (\phi_e dE))}
Block macro is the sum of macros of all nuclides.
phi_g = flux*dE already in multigroup method.
Returns
-------
mfp, mfpAbs, diffusionLength : tuple(float, float float)
"""
lib = self.core.lib
flux = self.getMgFlux(gamma=gamma)
flux = [fi / max(flux) for fi in flux]
mfpNumerator = np.zeros(len(flux))
absMfpNumerator = np.zeros(len(flux))
transportNumerator = np.zeros(len(flux))
numDensities = self.getNumberDensities()
for nucName, nDen in numDensities.items():
nucMc = self.nuclideBases.byName[nucName].label + self.getMicroSuffix()
if gamma:
micros = lib[nucMc].gammaXS
else:
micros = lib[nucMc].micros
total = micros.total[:, 0] # 0th order
transport = micros.transport[:, 0] # 0th order, [bn]
absorb = sum(micros.getAbsorptionXS())
mfpNumerator += nDen * total # [cm]
absMfpNumerator += nDen * absorb
transportNumerator += nDen * transport
denom = sum(flux)
mfp = 1.0 / (sum(mfpNumerator * flux) / denom)
sigmaA = sum(absMfpNumerator * flux) / denom
sigmaTr = sum(transportNumerator * flux) / denom
diffusionCoeff = 1 / (3.0 * sigmaTr)
mfpAbs = 1 / sigmaA
diffusionLength = math.sqrt(diffusionCoeff / sigmaA)
return mfp, mfpAbs, diffusionLength
def setAreaFractionsReport(self):
for c, frac in self.getVolumeFractions():
report.setData(
c.getName(),
["{0:10f}".format(c.getArea()), "{0:10f}".format(frac)],
report.BLOCK_AREA_FRACS,
)
# return the group the information went to
return report.ALL[report.BLOCK_AREA_FRACS]
def getBlocks(self):
"""
This method returns all the block(s) included in this block its implemented so that methods
could iterate over reactors, assemblies or single blocks without checking to see what the
type of the reactor-family object is.
"""
return [self]
def updateComponentDims(self):
"""
This method updates all the dimensions of the components.
Notes
-----
This is VERY useful for defining a ThRZ core out of differentialRadialSegements whose
dimensions are connected together some of these dimensions are derivative and can be updated
by changing dimensions in a Parameter Component or other linked components
See Also
--------
armi.reactor.components.DifferentialRadialSegment.updateDims
armi.reactor.components.Parameters
armi.physics.optimize.OptimizationInterface.modifyCase (look up 'ThRZReflectorThickness')
"""
for c in self.getComponentsInLinkedOrder():
try:
c.updateDims()
except NotImplementedError:
runLog.warning("{0} has no updatedDims method -- skipping".format(c))
def getIntegratedMgFlux(self, adjoint=False, gamma=False):
"""
Return the volume integrated multigroup neutron tracklength in [n-cm/s].
The first entry is the first energy group (fastest neutrons). Each additional group is the
next energy group, as set in the ISOTXS library.
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
integratedFlux : np.ndarray
multigroup neutron tracklength in [n-cm/s]
"""
if adjoint:
if gamma:
raise ValueError("Adjoint gamma flux is currently unsupported.")
integratedFlux = self.p.adjMgFlux
elif gamma:
integratedFlux = self.p.mgFluxGamma
else:
integratedFlux = self.p.mgFlux
return np.array(integratedFlux)
def getLumpedFissionProductCollection(self):
"""
Get collection of LFP objects. Will work for global or block-level LFP models.
Returns
-------
lfps : LumpedFissionProduct
lfpName keys , lfp object values
See Also
--------
armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct : LFP object
"""
return composites.ArmiObject.getLumpedFissionProductCollection(self)
def rotate(self, rad):
"""Function for rotating a block's spatially varying variables by a specified angle (radians).
Parameters
----------
rad: float
Number (in radians) specifying the angle of counter clockwise rotation.
"""
raise NotImplementedError
def setAxialExpTargetComp(self, targetComponent):
"""Sets the targetComponent for the axial expansion changer.
.. impl:: Set the target axial expansion components on a given block.
:id: I_ARMI_MANUAL_TARG_COMP
:implements: R_ARMI_MANUAL_TARG_COMP
Sets the ``axialExpTargetComponent`` parameter on the block to the name of the Component
which is passed in. This is then used by the
:py:class:`~armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger`
class during axial expansion.
This method is typically called from within
:py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` during the
process of building a Block from the blueprints.
Parameter
---------
targetComponent: :py:class:`Component ` object
Component specified to be target component for axial expansion changer
"""
self.p.axialExpTargetComponent = targetComponent.name
def getPinLocations(self) -> list[grids.IndexLocation]:
"""Produce all the index locations for pins in the block.
Returns
-------
list[grids.IndexLocation]
Integer locations where pins can be found in the block.
Notes
-----
Only components with ``Flags.CLAD`` are considered to define a pin's location.
See Also
--------
:meth:`getPinCoordinates` - companion for this method.
"""
items = []
for clad in self.iterChildrenWithFlags(Flags.CLAD):
if isinstance(clad.spatialLocator, grids.MultiIndexLocation):
items.extend(clad.spatialLocator)
else:
items.append(clad.spatialLocator)
return items
def getPinCoordinates(self) -> np.ndarray:
"""
Compute the local centroid coordinates of any pins in this block.
The pins must have a CLAD-flagged component for this to work.
Returns
-------
localCoords : numpy.ndarray
``(N, 3)`` array of coordinates for pins locations. ``localCoords[i]`` contains a triplet of
the x, y, z location for pin ``i``. Ordered according to how they are listed as children
See Also
--------
:meth:`getPinLocations` - companion for this method
"""
indices = self.getPinLocations()
coords = [location.getLocalCoordinates() for location in indices]
return np.array(coords)
def getTotalEnergyGenerationConstants(self):
"""
Get the total energy generation group constants for a block.
Gives the total energy generation rates when multiplied by the multigroup flux.
Returns
-------
totalEnergyGenConstant: np.ndarray
Total (fission + capture) energy generation group constants (Joules/cm)
"""
return self.getFissionEnergyGenerationConstants() + self.getCaptureEnergyGenerationConstants()
def getFissionEnergyGenerationConstants(self):
"""
Get the fission energy generation group constants for a block.
Gives the fission energy generation rates when multiplied by the multigroup flux.
Returns
-------
fissionEnergyGenConstant: np.ndarray
Energy generation group constants (Joules/cm)
Raises
------
RuntimeError:
Reports if a cross section library is not assigned to a reactor.
"""
if not self.core.lib:
raise RuntimeError(
"Cannot compute energy generation group constants without a library. Please ensure a library exists."
)
return xsCollections.computeFissionEnergyGenerationConstants(
self.getNumberDensities(), self.core.lib, self.getMicroSuffix()
)
def getCaptureEnergyGenerationConstants(self):
"""
Get the capture energy generation group constants for a block.
Gives the capture energy generation rates when multiplied by the multigroup flux.
Returns
-------
fissionEnergyGenConstant: np.ndarray
Energy generation group constants (Joules/cm)
Raises
------
RuntimeError:
Reports if a cross section library is not assigned to a reactor.
"""
if not self.core.lib:
raise RuntimeError(
"Cannot compute energy generation group constants without a library. Please ensure a library exists."
)
return xsCollections.computeCaptureEnergyGenerationConstants(
self.getNumberDensities(), self.core.lib, self.getMicroSuffix()
)
def getNeutronEnergyDepositionConstants(self):
"""
Get the neutron energy deposition group constants for a block.
Returns
-------
energyDepConstants: np.ndarray
Neutron energy generation group constants (in Joules/cm)
Raises
------
RuntimeError:
Reports if a cross section library is not assigned to a reactor.
"""
if not self.core.lib:
raise RuntimeError(
"Cannot get neutron energy deposition group constants without "
"a library. Please ensure a library exists."
)
return xsCollections.computeNeutronEnergyDepositionConstants(
self.getNumberDensities(), self.core.lib, self.getMicroSuffix()
)
def getGammaEnergyDepositionConstants(self):
"""
Get the gamma energy deposition group constants for a block.
Returns
-------
energyDepConstants: np.ndarray
Energy generation group constants (in Joules/cm)
Raises
------
RuntimeError:
Reports if a cross section library is not assigned to a reactor.
"""
if not self.core.lib:
raise RuntimeError(
"Cannot get gamma energy deposition group constants without a library. Please ensure a library exists."
)
return xsCollections.computeGammaEnergyDepositionConstants(
self.getNumberDensities(), self.core.lib, self.getMicroSuffix()
)
def getBoronMassEnrich(self):
"""Return B-10 mass fraction."""
b10 = self.getMass("B10")
b11 = self.getMass("B11")
total = b11 + b10
if total == 0.0:
return 0.0
return b10 / total
def getUraniumMassEnrich(self):
"""Returns fissile mass fraction of uranium."""
totalU = self.getMass("U")
if totalU < 1e-10:
return 0.0
fissileU = self.getMass(["U233", "U235"])
return fissileU / totalU
def getInputHeight(self) -> float:
"""Determine the input height from blueprints.
Returns
-------
float
Height for this block pulled from the blueprints.
Raises
------
AttributeError
If no ancestor of this block contains the input blueprints. Blueprints are usually
stored on the reactor object, which is typically an ancestor of the block
(block -> assembly -> core -> reactor). However, this may be the case when creating
blocks from scratch in testing where the entire composite tree may not exist.
"""
ancestorWithBp = self.getAncestor(lambda o: getattr(o, "blueprints", None) is not None)
if ancestorWithBp is not None:
bp = ancestorWithBp.blueprints
assemDesign = bp.assemDesigns[self.parent.getType()]
heights = assemDesign.height
myIndex = self.parent.index(self)
return heights[myIndex]
raise AttributeError(f"No ancestor of {self} has blueprints")
def sort(self):
"""Sort the children on this block.
If there is a spatial grid, the previous pin indices on the components
is now invalid because the ordering of :meth:`getPinLocations` has maybe
changed since the ordering of components has changed. Reassign the pin
indices via :meth:`assignPinIndices` accordingly.
"""
super().sort()
if self.spatialGrid is not None:
self.assignPinIndices()
================================================
FILE: armi/reactor/blocks/cartesianBlock.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cartesian blocks can be square or more generically rectangular in cross section."""
import math
from armi.reactor import components
from armi.reactor.blocks.block import Block
from armi.reactor.flags import Flags
class CartesianBlock(Block):
"""
A Cartesian Block is a vertical slice of an Assembly which is laid out on a Cartesian grid. That is, a grid that is
square or rectangular.
A Cartesian grid can have an origin that is in the middle of a grid cell:
+---------+--------+--------+
| | | |
| (-1,1) | (0,1) | (1,1) |
| | | |
+---------+--------+--------+
| | | |
| (-1,0) | (0,0) | (1,0) |
| | | |
+---------+--------+--------+
| | | |
| (-1,-1) | (0,-1) | (1,-1) |
| | | |
+---------+--------+--------+
Or the grid cells can be aligned so the origin is between the grid cells:
+---------+---------+--------+--------+
| | | | |
| (-2,1) | (-1,1) | (0,1) | (1,1) |
| | | | |
+---------+---------+--------+--------+
| | | | |
| (-2,0) | (-1,0) | (0,0) | (1,0) |
| | | | |
+---------+---------+--------+--------+
| | | | |
| (-2,-1) | (-1,-1) | (0,-1) | (1,-1) |
| | | | |
+---------+---------+--------+--------+
| | | | |
| (-2,-2) | (-1,-2) | (0,-2) | (1,-2) |
| | | | |
+---------+---------+--------+--------+
"""
PITCH_DIMENSION = "widthOuter"
PITCH_COMPONENT_TYPE = components.Rectangle
def getMaxArea(self):
"""Get area of this block if it were totally full."""
xw, yw = self.getPitch()
return xw * yw
def setPitch(self, val, updateBolParams=False):
raise NotImplementedError("Directly setting the pitch of a cartesian block is currently not supported.")
def getSymmetryFactor(self):
"""Return a factor between 1 and N where 1/N is how much cut-off by symmetry lines this mesh cell is."""
if self.core is not None:
indices = self.spatialLocator.getCompleteIndices()
if self.core.symmetry.isThroughCenterAssembly:
if indices[0] == 0 and indices[1] == 0:
# central location
return 4.0
elif indices[0] == 0 or indices[1] == 0:
# edge location
return 2.0
return 1.0
def getPinCenterFlatToFlat(self, cold=False):
"""Return the flat-to-flat distance between the centers of opposing pins (corner-2-corner) in the outer ring."""
clad = self.getComponent(Flags.CLAD)
nRings = self.numRingsToHoldNumCells(clad.getDimension("mult"))
pinPitch = self.getPinPitch(cold=cold)
pinPitchDist = math.sqrt(pinPitch[0] ** 2 + pinPitch[1] ** 2)
if self.core.symmetry.isThroughCenterAssembly:
return 2 * (nRings - 1) * pinPitchDist
else:
return ((2 * nRings) - 1) * pinPitchDist
def getNumCellsGivenRings(self, nRings: int):
"""Calculate the number of cells in a Cartesian grid with a given number of rings.
The logic here is separated out into two scenarios: one for when the origin is inside the center grid cell and
one where the origin is on the line between grid cells.
"""
if self.core.symmetry.isThroughCenterAssembly:
return (2 * nRings - 1) ** 2
else:
return (2 * nRings) ** 2
def numRingsToHoldNumCells(self, nCells: int):
"""Calculate the number of rings needed in a Cartesian grid to hold a given number of cells.
The logic here is separated out into two scenarios: one for when the origin is inside the center grid cell and
one where the origin is on the line between grid cells.
"""
if self.core.symmetry.isThroughCenterAssembly:
return math.ceil((math.sqrt(nCells) + 1) / 2.0)
else:
return math.ceil(math.sqrt(nCells) / 2.0)
================================================
FILE: armi/reactor/blocks/hexBlock.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The HexBlock is a vertical slice of a hexagon-shaped assembly. This is a common geometry in reactor design."""
import copy
import functools
import math
import operator
from typing import Callable, ClassVar, Optional, Tuple, Type
import numpy as np
from armi import runLog
from armi.physics.neutronics import GAMMA, NEUTRON
from armi.reactor import components, geometry, grids
from armi.reactor.blocks.block import Block
from armi.reactor.components.basicShapes import Circle, Hexagon
from armi.reactor.components.complexShapes import Helix
from armi.reactor.flags import Flags
from armi.reactor.parameters import ParamLocation
from armi.utils import hexagon, iterables, units
_PitchDefiningComponent = Optional[Tuple[Type[components.Component], ...]]
class HexBlock(Block):
"""
Defines a Block shaped like a hexagon.
.. impl:: ARMI has the ability to create hex shaped blocks.
:id: I_ARMI_BLOCK_HEX
:implements: R_ARMI_BLOCK_HEX
This class defines hexagonal-shaped Blocks. It inherits functionality from the parent class,
Block, and defines hexagonal-specific methods including, but not limited to, querying pin
pitch, pin linear power densities, hydraulic diameter, and retrieving inner and outer pitch.
"""
PITCH_COMPONENT_TYPE: ClassVar[_PitchDefiningComponent] = (components.Hexagon,)
def __init__(self, name, height=1.0):
Block.__init__(self, name, height)
def coords(self):
"""
Returns the coordinates of the block.
.. impl:: Coordinates of a block are queryable.
:id: I_ARMI_BLOCK_POSI2
:implements: R_ARMI_BLOCK_POSI
Calls to the :py:meth:`~armi.reactor.grids.locations.IndexLocation.getGlobalCoordinates`
method of the block's ``spatialLocator`` attribute, which recursively calls itself on
all parents of the block to get the coordinates of the block's centroid in 3D cartesian
space.
Will additionally adjust the x and y coordinates based on the block parameters
``displacementX`` and ``displacementY``.
"""
x, y, _z = self.spatialLocator.getGlobalCoordinates()
x += self.p.displacementX * 100.0
y += self.p.displacementY * 100.0
return (
round(x, units.FLOAT_DIMENSION_DECIMALS),
round(y, units.FLOAT_DIMENSION_DECIMALS),
)
def createHomogenizedCopy(self, pinSpatialLocators=False):
"""
Create a new homogenized copy of a block that is less expensive than a full deepcopy.
.. impl:: Block compositions can be homogenized.
:id: I_ARMI_BLOCK_HOMOG
:implements: R_ARMI_BLOCK_HOMOG
This method creates and returns a homogenized representation of itself in the form of a
new Block. The homogenization occurs in the following manner. A single Hexagon Component
is created and added to the new Block. This Hexagon Component is given the
:py:class:`armi.materials.mixture._Mixture` material and a volume averaged temperature
(``getAverageTempInC``). The number densities of the original Block are also stored on
this new Component (:need:`I_ARMI_CMP_GET_NDENS`). Several parameters from the original
block are copied onto the homogenized block (e.g., macros, lumped fission products,
burnup group, number of pins, and spatial grid).
Notes
-----
This can be used to improve performance when a new copy of a reactor needs to be built, but
the full detail of the block (including component geometry, material, number density, etc.)
is not required for the targeted physics solver being applied to the new reactor model.
The main use case is for the uniform mesh converter (UMC). Frequently, a deterministic
neutronics solver will require a uniform mesh reactor, which is produced by the UMC. Many
deterministic solvers for fast spectrum reactors will also treat the individual blocks as
homogenized mixtures. Since the neutronics solver does not need to know about the geometric
and material details of the individual child components within a block, we can save
significant effort while building the uniform mesh reactor with the UMC by omitting this
detailed data and only providing the necessary level of detail for the uniform mesh reactor:
number densities on each block.
Individual components within a block can have different temperatures, and this can affect
cross sections. This temperature variation is captured by the lattice physics module. As
long as temperature distribution is correctly captured during cross section generation, it
does not need to be transferred to the neutronics solver directly through this copy
operation.
If you make a new block, you must add it to an assembly and a reactor.
Returns
-------
b : A homogenized block containing a single Hexagon Component that contains an average
temperature and the number densities from the original block.
See Also
--------
armi.reactor.converters.uniformMesh.UniformMeshGeometryConverter.makeAssemWithUniformMesh
"""
b = self.__class__(self.getName(), height=self.getHeight())
b.setType(self.getType(), self.p.flags)
# assign macros and LFP
b.macros = self.macros
b._lumpedFissionProducts = self._lumpedFissionProducts
b.p.envGroup = self.p.envGroup
hexComponent = Hexagon(
"homogenizedHex",
"_Mixture",
self.getAverageTempInC(),
self.getAverageTempInC(),
self._pitchDefiningComponent[1],
)
hexComponent.setNumberDensities(self.getNumberDensities())
b.add(hexComponent)
b.p.nPins = self.p.nPins
if pinSpatialLocators:
# create a null component with cladding flags and spatialLocator from source block's
# clad components in case pin locations need to be known for physics solver
if self.hasComponents(Flags.CLAD):
cladComponents = self.getComponents(Flags.CLAD)
for i, clad in enumerate(cladComponents):
pinComponent = Circle(
f"voidPin{i}",
"Void",
self.getAverageTempInC(),
self.getAverageTempInC(),
0.0,
)
pinComponent.setType("pin", Flags.CLAD)
pinComponent.spatialLocator = copy.deepcopy(clad.spatialLocator)
if isinstance(pinComponent.spatialLocator, grids.MultiIndexLocation):
for i1, i2 in zip(list(pinComponent.spatialLocator), list(clad.spatialLocator)):
i1.associate(i2.grid)
pinComponent.setDimension("mult", clad.getDimension("mult"))
b.add(pinComponent)
if self.spatialGrid is not None:
b.spatialGrid = self.spatialGrid
return b
def getMaxArea(self):
"""Compute the max area of this block if it was totally full."""
pitch = self.getPitch()
if not pitch:
return 0.0
return hexagon.area(pitch)
def getDuctIP(self):
"""Returns the duct IP dimension."""
duct = self.getComponent(Flags.DUCT, exact=True)
return duct.getDimension("ip")
def getDuctOP(self):
"""Returns the duct OP dimension."""
duct = self.getComponent(Flags.DUCT, exact=True)
return duct.getDimension("op")
def setPinPowers(self, powers, powerKeySuffix=""):
"""
Updates the pin linear power densities of this block.
The linear densities are represented by the ``linPowByPin`` parameter.
It is expected that the ordering of ``powers`` is consistent with :meth:`getPinLocations`. That helps ensure
alignment with component-level look ups like :meth:`~armi.reactor.components.Circle.getPinIndices`.
The ``linPowByPin`` parameter can be directly assigned to instead of using this method if the multiplicity of
the pins in the block is equal to the number of pins in the block.
Parameters
----------
powers : list of floats, required
The block-level pin linear power densities. ``powers[i]`` represents the average linear power density of pin
``i`` location at ``self.getPinLocations()[i]``. The units of linear power density is watts/cm (i.e., watts
produced per cm of pin length).
powerKeySuffix: str, optional
Must be either an empty string, :py:const:`NEUTRON `, or
:py:const:`GAMMA `. Defaults to empty string.
"""
numPins = self.getNumPins()
if not numPins or numPins != len(powers):
raise ValueError(
f"Invalid power data for {self} with {numPins} pins. Got {len(powers)} entries in powers: {powers}"
)
powerKey = f"linPowByPin{powerKeySuffix}"
self.p[powerKey] = powers
# If using the *powerKeySuffix* parameter, we also need to set total power, which is sum of neutron and gamma
# powers. We assume that a solo gamma calculation to set total power does not make sense.
if powerKeySuffix:
if powerKeySuffix == GAMMA:
if self.p[f"linPowByPin{NEUTRON}"] is None:
msg = f"Neutron power has not been set yet. Cannot set total power for {self}."
raise UnboundLocalError(msg)
self.p.linPowByPin = self.p[f"linPowByPin{NEUTRON}"] + self.p[powerKey]
else:
self.p.linPowByPin = self.p[powerKey]
def rotate(self, rad: float):
"""
Rotates a block's spatially varying parameters by a specified angle in the counter-clockwise direction.
The parameters must have a ParamLocation of either CORNERS or EDGES and must be a Python list of length 6 in
order to be eligible for rotation; all parameters that do not meet these two criteria are not rotated.
.. impl:: Rotating a hex block updates parameters on the boundary, the orientation
parameter, and the spatial coordinates on contained objects.
:id: I_ARMI_ROTATE_HEX_BLOCK
:implements: R_ARMI_ROTATE_HEX
This method rotates a block on a hexagonal grid, conserving the 60-degree symmetry of the grid. It first
determines how many rotations the block will undergo based on the 60-degree hex grid. Then it uses that
"rotation number" to do a few things: reset the orientation parameter, rotate the children, and rotate the
boundary parameters. It also sets the "displacement in X" and "displacement in Y" parameters.
Parameters
----------
rad: float, required
Angle of counter-clockwise rotation in units of radians. Rotations must be in 60-degree increments
(i.e., PI/3, 2 * PI/3, PI, 4 * PI/3, 5 * PI/3, and 2 * PI).
"""
rotNum = round((rad % (2 * math.pi)) / math.radians(60))
self._rotateChildLocations(rad, rotNum)
if self.p.orientation is None:
self.p.orientation = np.array([0.0, 0.0, 0.0])
self.p.orientation[2] += rotNum * 60.0
self._rotateBoundaryParameters(rotNum)
self._rotateDisplacement(rad)
def _rotateChildLocations(self, radians: float, rotNum: int):
"""Update spatial locators for children."""
if self.spatialGrid is None:
return
locationRotator = functools.partial(self.spatialGrid.rotateIndex, rotations=rotNum)
rotationMatrix = np.array([[math.cos(radians), -math.sin(radians)], [math.sin(radians), math.cos(radians)]])
for c in self:
if isinstance(c.spatialLocator, grids.MultiIndexLocation):
newLocations = list(map(locationRotator, c.spatialLocator))
c.spatialLocator = grids.MultiIndexLocation(self.spatialGrid)
c.spatialLocator.extend(newLocations)
elif isinstance(c.spatialLocator, grids.CoordinateLocation):
oldCoords = c.spatialLocator.getLocalCoordinates()
newXY = rotationMatrix.dot(oldCoords[:2])
newLocation = grids.CoordinateLocation(newXY[0], newXY[1], oldCoords[2], self.spatialGrid)
c.spatialLocator = newLocation
elif isinstance(c.spatialLocator, grids.IndexLocation):
c.spatialLocator = locationRotator(c.spatialLocator)
elif c.spatialLocator is not None:
msg = f"{c} on {self} has an invalid spatial locator for rotation: {c.spatialLocator}"
runLog.error(msg)
raise TypeError(msg)
def _rotateBoundaryParameters(self, rotNum: int):
"""Rotate any parameters defined on the corners or edge of bounding hexagon.
Parameters
----------
rotNum : int
Rotation number between zero and five, inclusive, specifying how many rotations have taken place.
"""
names = self.p.paramDefs.atLocation(ParamLocation.CORNERS).names
names += self.p.paramDefs.atLocation(ParamLocation.EDGES).names
for name in names:
original = self.p[name]
if isinstance(original, (list, np.ndarray)):
if len(original) == 6:
# Rotate by making the -rotNum item be first
self.p[name] = iterables.pivot(original, -rotNum)
elif len(original) == 0:
# Hasn't been defined yet, no warning needed.
pass
else:
msg = (
"No rotation method defined for spatial parameters that aren't defined "
f"once per hex edge/corner. No rotation performed on {name}"
)
runLog.warning(msg)
elif isinstance(original, (int, float)):
# this is a scalar and there shouldn't be any rotation.
pass
elif original is None:
# param is not set yet. no rotations as well.
pass
else:
raise TypeError(
f"b.rotate() method received unexpected data type for {name} on block {self}\n"
+ f"expected list, np.ndarray, int, or float. received {original}"
)
def _rotateDisplacement(self, rad: float):
# This specifically uses the .get() functionality to avoid an error if this parameter does not exist.
dispx = self.p.get("displacementX")
dispy = self.p.get("displacementY")
if (dispx is not None) and (dispy is not None):
self.p.displacementX = dispx * math.cos(rad) - dispy * math.sin(rad)
self.p.displacementY = dispx * math.sin(rad) + dispy * math.cos(rad)
def verifyBlockDims(self):
"""Perform some checks on this type of block before it is assembled."""
try:
wireComp = self.getComponent(Flags.WIRE, quiet=True) # Quiet because None case is checked for below
ductComps = self.getComponents(Flags.DUCT)
cladComp = self.getComponent(Flags.CLAD, quiet=True) # Quiet because None case is checked for below
except ValueError:
# there are probably more that one clad/wire, so we really dont know what this block looks like
runLog.info(f"Block design {self} is too complicated to verify dimensions. Make sure they are correct!")
return
# check wire wrap in contact with clad
if cladComp is not None and wireComp is not None:
wwCladGap = self.getWireWrapCladGap(cold=True)
if round(wwCladGap, 6) != 0.0:
runLog.warning(
"The gap between wire wrap and clad in block {} was {} cm. Expected 0.0.".format(self, wwCladGap),
single=True,
)
# check clad duct overlap
pinToDuctGap = self.getPinToDuctGap(cold=True)
# Allow for some tolerance; user input precision may lead to slight negative gaps
if pinToDuctGap is not None and pinToDuctGap < -0.005:
raise ValueError(
"Gap between pins and duct is {0:.4f} cm in {1}. Make more room.".format(pinToDuctGap, self)
)
elif pinToDuctGap is None:
# only produce a warning if pin or clad are found, but not all of pin, clad and duct. We may need to tune
# this logic a bit
ductComp = next(iter(ductComps), None)
if (cladComp is not None or wireComp is not None) and any(
[c is None for c in (wireComp, cladComp, ductComp)]
):
runLog.warning("Some component was missing in {} so pin-to-duct gap not calculated".format(self))
def getPinToDuctGap(self, cold=False):
"""
Returns the distance in cm between the outer most pin and the duct in a block.
Parameters
----------
cold : boolean
Determines whether the results should be cold or hot dimensions.
Returns
-------
pinToDuctGap : float
Returns the diameteral gap between the outer most pins in a hex pack to the duct inner face to face in cm.
"""
wire = self.getComponent(Flags.WIRE, quiet=True) # Quiet because None case is checked for below
ducts = sorted(self.getChildrenWithFlags(Flags.DUCT))
duct = None
if any(ducts):
duct = ducts[0]
if not isinstance(duct, components.Hexagon):
# getPinCenterFlatToFlat only works for hexes
# inner most duct might be circle or some other shape
duct = None
elif isinstance(duct, components.HoledHexagon):
# has no ip and is circular on inside so following
# code will not work
duct = None
clad = self.getComponent(Flags.CLAD, quiet=True) # Quiet because None case is checked for below
if any(c is None for c in (duct, wire, clad)):
return None
# NOTE: If nRings was a None, this could be for a non-hex packed fuel assembly see thermal hydraulic design
# basis for description of equation
pinCenterFlatToFlat = self.getPinCenterFlatToFlat(cold=cold)
pinOuterFlatToFlat = (
pinCenterFlatToFlat + clad.getDimension("od", cold=cold) + 2.0 * wire.getDimension("od", cold=cold)
)
ductMarginToContact = duct.getDimension("ip", cold=cold) - pinOuterFlatToFlat
pinToDuctGap = ductMarginToContact / 2.0
return pinToDuctGap
def getRotationNum(self) -> int:
"""Get index 0 through 5 indicating number of rotations counterclockwise around the z-axis."""
# assume rotation only in Z
return np.rint(self.p.orientation[2] / 360.0 * 6) % 6
def setRotationNum(self, rotNum: int):
"""
Set orientation based on a number 0 through 5 indicating number of rotations
counterclockwise around the z-axis.
"""
self.p.orientation[2] = 60.0 * rotNum
def getSymmetryFactor(self):
"""
Return a factor between 1 and N where 1/N is how much cut-off by symmetry lines this mesh cell is.
Reactor-level meshes have symmetry information so we have a reactor for this to work. That is why it is not
implemented on the grid/locator level.
When edge-assemblies are included on both edges (i.e. MCNP or DIF3D-FD 1/3-symmetric cases), the edge assemblies
have symmetry factors of 2.0. Otherwise (DIF3D-nodal) there's a full assembly on the bottom edge (overhanging)
and no assembly at the top edge so the ones at the bottom are considered full (symmetryFactor=1).
If this block is not in any grid at all, then there can be no symmetry so return 1.
"""
try:
symmetry = self.parent.spatialLocator.grid.symmetry
except Exception:
return 1.0
if symmetry.domain == geometry.DomainType.THIRD_CORE and symmetry.boundary == geometry.BoundaryType.PERIODIC:
indices = self.spatialLocator.getCompleteIndices()
if indices[0] == 0 and indices[1] == 0:
# central location
return 3.0
else:
symmetryLine = self.core.spatialGrid.overlapsWhichSymmetryLine(indices)
# Detect if upper edge assemblies are included. Doing this is the only way to know definitively whether
# or not the edge assemblies are half-assems or full. Seeing the first one is the easiest way to detect
# them. Check it last in the and statement so we don't waste time doing it.
upperEdgeLoc = self.core.spatialGrid[-1, 2, 0]
if symmetryLine in [
grids.BOUNDARY_0_DEGREES,
grids.BOUNDARY_120_DEGREES,
] and bool(self.core.childrenByLocator.get(upperEdgeLoc)):
return 2.0
return 1.0
def autoCreateSpatialGrids(self, systemSpatialGrid=None):
"""
Given a block without a spatialGrid, create a spatialGrid and give its children the corresponding
spatialLocators (if it is a simple block).
In this case, a simple block would be one that has either multiplicity of components equal to 1 or N but no
other multiplicities. Also, this should only happen when N fits exactly into a given number of hex rings.
Otherwise, do not create a grid for this block.
Parameters
----------
systemSpatialGrid : Grid, optional
Spatial Grid of the system-level parent of this Assembly that contains this Block.
Notes
-----
When a hex grid has another hex grid nested inside it, the nested grid has the opposite orientation (corners vs
flats up). This method takes care of that.
If components inside this block are multiplicity 1, they get a single locator at the center of the grid cell. If
the multiplicity is greater than 1, all the components are added to a multiIndexLocation on the hex grid.
Raises
------
ValueError
If the multiplicities of the block are not only 1 or N or if generated ringNumber leads to more positions
than necessary.
"""
# not necessary
if self.spatialGrid is not None:
return
# Check multiplicities
mults = {c.getDimension("mult") for c in self.iterComponents()}
# Do some validation: Should we try to create a spatial grid?
multz = {float(m) for m in mults}
if len(multz) == 1 and 1.0 in multz:
runLog.extra(
f"Block {self.p.type} does not need a spatial grid: multiplicities are all 1.",
single=True,
)
return
elif len(multz) != 2 or 1.0 not in multz:
runLog.extra(
f"Could not create a spatialGrid for block {self.p.type}, multiplicities are not {{1, N}} "
f"they are {mults}",
single=True,
)
return
# build the grid, from pitch and orientation
if isinstance(systemSpatialGrid, grids.HexGrid):
cornersUp = not systemSpatialGrid.cornersUp
else:
cornersUp = False
grid = grids.HexGrid.fromPitch(
self.getPinPitch(cold=True),
numRings=0,
armiObject=self,
cornersUp=cornersUp,
)
ringNumber = hexagon.numRingsToHoldNumCells(self.getNumPins())
numLocations = 0
for ring in range(ringNumber):
numLocations = numLocations + hexagon.numPositionsInRing(ring + 1)
if numLocations != self.getNumPins():
raise ValueError(
"Cannot create spatialGrid, number of locations in rings {} not equal to pin number {}".format(
numLocations, self.getNumPins()
)
)
# set the spatial position of the sub-block components
spatialLocators = grids.MultiIndexLocation(grid=grid)
for ring in range(ringNumber):
for pos in range(grid.getPositionsInRing(ring + 1)):
i, j = grid.getIndicesFromRingAndPos(ring + 1, pos + 1)
spatialLocators.append(grid[i, j, 0])
# finally, fill the spatial grid, and put the sub-block components on it
if self.spatialGrid is None:
self.spatialGrid = grid
for c in self:
if c.getDimension("mult") > 1:
c.spatialLocator = spatialLocators
elif c.getDimension("mult") == 1:
c.spatialLocator = grids.CoordinateLocation(0.0, 0.0, 0.0, grid)
def assignPinIndices(self):
"""Assign pin indices for pin components on the block."""
if self.spatialGrid is None:
return
locations = self.getPinLocations()
if not locations:
return
# Clear out any previous values. If your block is built with one ordering
# and then sorted, things that used to have pin indices may now have invalid
# pin indices. Wipe them out just to be safe
for c in self:
c.p.pinIndices = None
ijGetter = operator.attrgetter("i", "j")
allIJ: tuple[tuple[int, int]] = tuple(map(ijGetter, locations))
# Flags for components that we want to set this parameter
# Usually things are linked to one of these "important" flags, like
# a cladding component having linked dimensions to a fuel component
primaryFlags = (Flags.FUEL, Flags.CONTROL, Flags.SHIELD)
withPinIndices: list[components.Component] = []
for c in self.iterChildrenWithFlags(primaryFlags):
if self._setPinIndices(c, ijGetter, allIJ):
withPinIndices.append(c)
# Iterate over every other thing on the grid and make sure
# 1) it share a lattice site with something that has pin indices, or
# 2) it itself declares the pin indices
for c in self:
if c.p.pinIndices is not None:
continue
# Does anything with pin indices share this lattice site?
if any(other.spatialLocator == c.spatialLocator for other in withPinIndices):
continue
if self._setPinIndices(c, ijGetter, allIJ):
withPinIndices.append(c)
@staticmethod
def _setPinIndices(
c: components.Component, ijGetter: Callable[[grids.IndexLocation], tuple[int, int]], allIJ: tuple[int, int]
):
localLocations = c.spatialLocator
if isinstance(localLocations, grids.MultiIndexLocation):
localIJ = list(map(ijGetter, localLocations))
# CoordinateLocations do not live on the grid, by definition
elif isinstance(localLocations, grids.CoordinateLocation):
return False
elif isinstance(localLocations, grids.IndexLocation):
localIJ = [ijGetter(localLocations)]
else:
return False
localIndices = list(map(allIJ.index, localIJ))
c.p.pinIndices = localIndices
return True
def getPinCenterFlatToFlat(self, cold=False):
"""Return the flat-to-flat distance between the centers of opposing pins in the outermost ring."""
clad = self.getComponent(Flags.CLAD)
nRings = hexagon.numRingsToHoldNumCells(clad.getDimension("mult"))
pinPitch = self.getPinPitch(cold=cold)
pinCenterCornerToCorner = 2 * (nRings - 1) * pinPitch
pinCenterFlatToFlat = math.sqrt(3.0) / 2.0 * pinCenterCornerToCorner
return pinCenterFlatToFlat
def hasPinPitch(self):
"""Return True if the block has enough information to calculate pin pitch."""
try:
return (self.getComponent(Flags.CLAD, quiet=True) is not None) and (
self.getComponent(Flags.WIRE, quiet=True) is not None
)
except ValueError:
# not well defined pitch due to multiple pin and/or wire components
return False
def getPinPitch(self, cold=False):
"""
Get the pin pitch in cm.
Assumes that the pin pitch is defined entirely by contacting cladding tubes and wire wraps.
Grid spacers not yet supported.
Parameters
----------
cold : boolean
Determines whether the dimensions should be cold or hot
Returns
-------
pinPitch : float
pin pitch in cm
"""
try:
clad = self.getComponent(Flags.CLAD, quiet=True) # Quiet because None case is checked for below
wire = self.getComponent(Flags.WIRE, quiet=True) # Quiet because None case is checked for below
except ValueError:
raise ValueError(f"Block {self} has multiple clad and wire components, so pin pitch is not well-defined.")
if wire and clad:
return clad.getDimension("od", cold=cold) + wire.getDimension("od", cold=cold)
else:
raise ValueError(f"Cannot get pin pitch in {self} because it does not have a wire and a clad")
def getWettedPerimeter(self):
"""
Return the total wetted perimeter of the block in cm.
Notes
-----
Please be aware that this method is specific to Fast Reactors, and probably even Sodium Fast Reactors. This is
obviously an awkward design choice, and we hope to improve upon it soon.
"""
# flags pertaining to hexagon components where the interior of the hexagon is wetted
wettedHollowHexagonComponentFlags = (
Flags.DUCT,
Flags.GRID_PLATE,
Flags.INLET_NOZZLE,
Flags.HANDLING_SOCKET,
Flags.DUCT | Flags.DEPLETABLE,
Flags.GRID_PLATE | Flags.DEPLETABLE,
Flags.INLET_NOZZLE | Flags.DEPLETABLE,
Flags.HANDLING_SOCKET | Flags.DEPLETABLE,
)
# flags pertaining to circular pin components where the exterior of the circle is wetted
wettedPinComponentFlags = (
Flags.CLAD,
Flags.WIRE,
)
# flags pertaining to components where both the interior and exterior are wetted
wettedHollowComponentFlags = (
Flags.DUCT | Flags.INNER,
Flags.DUCT | Flags.INNER | Flags.DEPLETABLE,
)
# obtain all wetted components based on type
wettedHollowHexagonComponents = []
for flag in wettedHollowHexagonComponentFlags:
c = self.getComponent(flag, exact=True)
wettedHollowHexagonComponents.append(c) if c else None
wettedPinComponents = []
for flag in wettedPinComponentFlags:
comps = self.getComponents(flag)
wettedPinComponents.extend(comps)
wettedHollowCircleComponents = []
wettedHollowHexComponents = []
for flag in wettedHollowComponentFlags:
c = self.getComponent(flag, exact=True)
if isinstance(c, Hexagon):
wettedHollowHexComponents.append(c) if c else None
else:
wettedHollowCircleComponents.append(c) if c else None
# calculate wetted perimeters according to their geometries
# hollow hexagon = 6 * ip / sqrt(3)
wettedHollowHexagonPerimeter = 0.0
for c in wettedHollowHexagonComponents:
wettedHollowHexagonPerimeter += 6 * c.getDimension("ip") / math.sqrt(3) if c else 0.0
# solid circle = NumPins * pi * (Comp Diam + Wire Diam)
wettedPinPerimeter = 0.0
for c in wettedPinComponents:
correctionFactor = 1.0
if isinstance(c, Helix):
# account for the helical wire wrap
correctionFactor = np.hypot(
1.0,
math.pi * c.getDimension("helixDiameter") / c.getDimension("axialPitch"),
)
compWettedPerim = c.getDimension("od") * correctionFactor * c.getDimension("mult") * math.pi
wettedPinPerimeter += compWettedPerim
# hollow circle = (id + od) * pi
wettedHollowCirclePerimeter = 0.0
for c in wettedHollowCircleComponents:
wettedHollowCirclePerimeter += c.getDimension("id") + c.getDimension("od") if c else 0.0
wettedHollowCirclePerimeter *= math.pi
# hollow hexagon = 6 * (ip + op) / sqrt(3)
wettedHollowHexPerimeter = 0.0
for c in wettedHollowHexComponents:
wettedHollowHexPerimeter += c.getDimension("ip") + c.getDimension("op") if c else 0.0
wettedHollowHexPerimeter *= 6 / math.sqrt(3)
return (
wettedHollowHexagonPerimeter + wettedPinPerimeter + wettedHollowCirclePerimeter + wettedHollowHexPerimeter
)
def getFlowArea(self):
"""Return the total flowing coolant area of the block in cm^2."""
area = self.getComponent(Flags.COOLANT, exact=True).getArea()
for c in self.getComponents(Flags.INTERDUCTCOOLANT, exact=True):
area += c.getArea()
return area
def getHydraulicDiameter(self):
"""
Return the hydraulic diameter in this block in cm.
Hydraulic diameter is 4A/P where A is the flow area and P is the wetted perimeter. In a hex assembly, the wetted
perimeter includes the cladding, the wire wrap, and the inside of the duct. The flow area is the inner area of
the duct minus the area of the pins and the wire.
"""
return 4.0 * self.getFlowArea() / self.getWettedPerimeter()
================================================
FILE: armi/reactor/blocks/thRZBlock.py
================================================
# Copyright 2026 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple base class to help define blocks in a Theta-R-Z geometry."""
from armi.reactor.blocks.block import Block
class ThRZBlock(Block):
def getMaxArea(self):
"""Return the area of the Theta-R-Z block if it was totally full."""
raise NotImplementedError("Cannot get max area of a TRZ block. Fully specify your geometry.")
def radialInner(self):
"""Return a smallest radius of all the components."""
innerRadii = self.getDimensions("inner_radius")
smallestInner = min(innerRadii) if innerRadii else None
return smallestInner
def radialOuter(self):
"""Return a largest radius of all the components."""
outerRadii = self.getDimensions("outer_radius")
largestOuter = max(outerRadii) if outerRadii else None
return largestOuter
def thetaInner(self):
"""Return a smallest theta of all the components."""
innerTheta = self.getDimensions("inner_theta")
smallestInner = min(innerTheta) if innerTheta else None
return smallestInner
def thetaOuter(self):
"""Return a largest theta of all the components."""
outerTheta = self.getDimensions("outer_theta")
largestOuter = max(outerTheta) if outerTheta else None
return largestOuter
def axialInner(self):
"""Return the lower z-coordinate."""
return self.getDimensions("inner_axial")
def axialOuter(self):
"""Return the upper z-coordinate."""
return self.getDimensions("outer_axial")
def verifyBlockDims(self):
"""Perform dimension checks related to ThetaRZ blocks."""
return
================================================
FILE: armi/reactor/blueprints/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Blueprints describe the geometric and composition details of the objects in the reactor
(e.g. fuel assemblies, control rods, etc.).
Inputs captured within this blueprints module pertain to major design criteria like
custom material properties or basic structures like the assemblies in use.
This is essentially a wrapper for a yaml loader.
The given yaml file is expected to rigidly adhere to given key:value pairings.
See the :ref:`blueprints documentation ` for more details.
The file structure is expectation is::
nuclide flags:
AM241: {burn: true, xs: true}
...
custom isotopics: {} # optional
blocks:
name:
component name:
component dimensions
...
assemblies:
name:
specifier: ABC
blocks: [...]
height: [...]
axial mesh points: [...]
xs types: [...]
# optional
myMaterialModification1: [...]
myMaterialModification2: [...]
# optionally extra settings (note this is probably going to be a removed feature)
# hotChannelFactors: TWRPclad
Examples
--------
>>> design = blueprints.Blueprints.load(self.yamlString)
>>> print(design.gridDesigns)
Notes
-----
The blueprints system was built to enable round trip translations between
text representations of input and objects in the code.
"""
import copy
import io
import math
import pathlib
import traceback
import typing
import h5py
import ordered_set
import yamlize
import yamlize.objects
from ruamel.yaml import RoundTripLoader
from armi import (
context,
getPluginManager,
getPluginManagerOrFail,
migration,
plugins,
runLog,
)
from armi.nucDirectory import nuclideBases
from armi.physics.neutronics.settings import CONF_LOADING_FILE
from armi.reactor import assemblies
from armi.reactor.blueprints import isotopicOptions
from armi.reactor.blueprints.assemblyBlueprint import AssemblyKeyedList
from armi.reactor.blueprints.blockBlueprint import BlockKeyedList
from armi.reactor.blueprints.componentBlueprint import (
ComponentGroups,
ComponentKeyedList,
)
from armi.reactor.blueprints.gridBlueprint import Grids, Triplet
from armi.reactor.blueprints.reactorBlueprint import SystemBlueprint, Systems
from armi.reactor.converters import axialExpansionChanger
from armi.reactor.flags import Flags
from armi.settings.fwSettings.globalSettings import (
CONF_ACCEPTABLE_BLOCK_AREA_ERROR,
CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP,
CONF_DETAILED_AXIAL_EXPANSION,
CONF_INPUT_HEIGHTS_HOT,
CONF_NON_UNIFORM_ASSEM_FLAGS,
)
from armi.utils import tabulate, textProcessors
from armi.utils.customExceptions import InputError
context.BLUEPRINTS_IMPORTED = True
context.BLUEPRINTS_IMPORT_CONTEXT = "".join(traceback.format_stack())
def loadFromCs(cs, roundTrip=False):
"""Function to load Blueprints based on supplied ``Settings``."""
from armi.utils import directoryChangers
with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False):
bpPath = pathlib.Path(cs[CONF_LOADING_FILE])
if bpPath.suffix.lower() in (".h5", ".hdf5"):
# This is a case settings from a database so the blueprints are also in the database.
try:
db = h5py.File(bpPath, "r")
bpString = db["inputs/blueprints"].asstr()[()]
stream = io.StringIO(bpString)
stream = Blueprints.migrate(stream)
bp = Blueprints.load(stream)
except KeyError:
# not all reactors need to be created from blueprints, so they may not exist
bp = None
else:
with open(cs[CONF_LOADING_FILE], "r") as bpYaml:
root = bpPath.parent.absolute()
bpYaml = textProcessors.resolveMarkupInclusions(bpYaml, root)
try:
bp = Blueprints.load(bpYaml, roundTrip=roundTrip)
except yamlize.yamlizing_error.YamlizingError as err:
if "cross sections" in err.args[0]:
runLog.error(
"The loading file {} contains invalid `cross sections` input. "
"Please run the `modify` entry point on this case to automatically convert."
"".format(cs[CONF_LOADING_FILE])
)
raise
return bp
class _BlueprintsPluginCollector(yamlize.objects.ObjectType):
"""
Simple metaclass for adding yamlize.Attributes from plugins to Blueprints.
This calls the defineBlueprintsSections() plugin hook to discover new class
attributes to add before the yamlize code fires off to make the root yamlize.Object.
Since yamlize.Object itself uses a metaclass to define the attributes to turn into
yamlize.Attributes, these need to be folded in early.
"""
def __new__(mcs, name, bases, attrs):
pm = getPluginManager()
if pm is None:
runLog.warning(
"Blueprints were instantiated before the framework was "
"configured with plugins. Blueprints cannot be imported before "
"ARMI has been configured."
)
else:
pluginSections = pm.hook.defineBlueprintsSections()
for plug in pluginSections:
for attrName, section, resolver in plug:
assert isinstance(section, yamlize.Attribute)
if attrName in attrs:
raise plugins.PluginError(
"There is already a section called '{}' in the reactor blueprints".format(attrName)
)
attrs[attrName] = section
attrs["_resolveFunctions"].append(resolver)
newType = yamlize.objects.ObjectType.__new__(mcs, name, bases, attrs)
return newType
class Blueprints(yamlize.Object, metaclass=_BlueprintsPluginCollector):
"""Base Blueprintsobject representing all the subsections in the input file."""
nuclideFlags = yamlize.Attribute(key="nuclide flags", type=isotopicOptions.NuclideFlags, default=None)
customIsotopics = yamlize.Attribute(key="custom isotopics", type=isotopicOptions.CustomIsotopics, default=None)
blockDesigns = yamlize.Attribute(key="blocks", type=BlockKeyedList, default=None)
assemDesigns = yamlize.Attribute(key="assemblies", type=AssemblyKeyedList, default=None)
systemDesigns = yamlize.Attribute(key="systems", type=Systems, default=None)
gridDesigns = yamlize.Attribute(key="grids", type=Grids, default=None)
componentDesigns = yamlize.Attribute(key="components", type=ComponentKeyedList, default=None)
componentGroups = yamlize.Attribute(key="component groups", type=ComponentGroups, default=None)
# These are used to set up new attributes that come from plugins.
_resolveFunctions = []
def __new__(cls):
# yamlizable does not call __init__, so attributes that are not defined above need to be
# initialized here
self = yamlize.Object.__new__(cls)
self.assemblies = {}
self._prepped = False
self._assembliesBySpecifier = {}
# Better for performance since these are used for lookups
self.allNuclidesInProblem = ordered_set.OrderedSet()
self.activeNuclides = ordered_set.OrderedSet()
self.inertNuclides = ordered_set.OrderedSet()
self.nucsToForceInXsGen = ordered_set.OrderedSet()
self.elementsToExpand = []
return self
def __init__(self):
# Yamlize does not call __init__, instead we use Blueprints.load which creates and instance
# of a Blueprints object and initializes it with valuesconstructAssemusing setattr.
self._assembliesBySpecifier = {}
self._prepped = False
self.systemDesigns = Systems()
self.assemDesigns = AssemblyKeyedList()
self.blockDesigns = BlockKeyedList()
self.assemblies = {}
self.grids = Grids()
self.elementsToExpand = []
def __repr__(self):
return f"<{self.__class__.__name__} Assemblies:{len(self.assemDesigns)} Blocks:{len(self.blockDesigns)}>"
def constructAssem(self, cs, name=None, specifier=None, orientation=0.0):
"""
Construct a new assembly instance from the assembly designs in this Blueprints object.
Parameters
----------
cs : Settings
Used to apply various modeling options when constructing an assembly.
name : str (optional, and should be exclusive with specifier)
Name of the assembly to construct. This should match the key that was used to define the
assembly in the Blueprints YAML file.
specifier : str (optional, and should be exclusive with name)
Identifier of the assembly to construct. This should match the identifier that was used
to define the assembly in the Blueprints YAML file.
orientation : float (optional, is usually just zero)
Rotate the Assembly at creation.
Raises
------
ValueError
If neither name nor specifier are passed
Notes
-----
There is some possibility for "compiling" the logic with closures to make constructing an
assembly / block / component faster. At this point is is pretty much irrelevant because we
are currently just deepcopying already constructed assemblies.
Currently, this method is backward compatible with other code in ARMI and generates the
`.assemblies` attribute (the BOL assemblies). Eventually, this should be removed.
"""
self._prepConstruction(cs)
if name is not None:
assem = self.assemblies[name]
elif specifier is not None:
assem = self._assembliesBySpecifier[specifier]
else:
raise ValueError("Must supply assembly name or specifier to construct")
a = copy.deepcopy(assem)
# since a deepcopy has the same assembly numbers and block id's, we need to make it unique
a.makeUnique()
if orientation:
a.rotate(math.radians(orientation))
return a
def _prepConstruction(self, cs):
"""
This method initializes a bunch of information within a Blueprints object such as assigning
assembly and block type numbers, resolving the nuclides in the problem, and pre-populating
assemblies.
Ideally, it would not be necessary at all, but the ``cs`` currently contains a bunch of
information necessary to create the applicable model. If it were possible, it would be
terrific to override the Yamlizable.from_yaml method to run this code after the instance has
been created, but we need additional information in order to build the assemblies that is
not within the YAML file.
This method should not be called directly, but it is used in testing.
"""
if not self._prepped:
self._assignTypeNums()
for func in self._resolveFunctions:
func(self, cs)
self._resolveNuclides(cs)
self._assembliesBySpecifier.clear()
self.assemblies.clear()
for aDesign in self.assemDesigns:
a = aDesign.construct(cs, self)
self._assembliesBySpecifier[aDesign.specifier] = a
self.assemblies[aDesign.name] = a
runLog.header("=========== Verifying Assembly Configurations ===========")
self._checkAssemblyAreaConsistency(cs)
if not cs[CONF_DETAILED_AXIAL_EXPANSION]:
# this is required to set up assemblies so they know how to snap to the reference
# mesh. They won't know the mesh to conform to otherwise....
axialExpansionChanger.makeAssemsAbleToSnapToUniformMesh(
self.assemblies.values(), cs[CONF_NON_UNIFORM_ASSEM_FLAGS]
)
if not cs[CONF_INPUT_HEIGHTS_HOT]:
runLog.header("=========== Axially expanding all assemblies from Tinput to Thot ===========")
# expand axial heights from cold to hot so dims and masses are consistent with
# specified component hot temperatures.
assemsToSkip = [Flags.fromStringIgnoreErrors(t) for t in cs[CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP]]
assemsToExpand = list(
a for a in list(self.assemblies.values()) if not any(a.hasFlags(f) for f in assemsToSkip)
)
axialExpander = getPluginManagerOrFail().hook.getAxialExpansionChanger()
if axialExpander is not None:
axialExpander.expandColdDimsToHot(
assemsToExpand,
cs[CONF_DETAILED_AXIAL_EXPANSION],
)
getPluginManagerOrFail().hook.afterConstructionOfAssemblies(assemblies=self.assemblies.values(), cs=cs)
self._prepped = True
def _assignTypeNums(self):
if self.blockDesigns is None:
# this happens when directly defining assemblies.
self.blockDesigns = BlockKeyedList()
for aDesign in self.assemDesigns:
for bDesign in aDesign.blocks:
if bDesign not in self.blockDesigns:
self.blockDesigns.add(bDesign)
def _resolveNuclides(self, cs):
"""
Process elements and determine how to expand them to natural isotopics.
Also builds meta-data about which nuclides are in the problem.
This system works by building a dictionary in the ``elementsToExpand`` attribute with
``Element`` keys and list of ``NuclideBase`` values.
The actual expansion of elementals to isotopics occurs during
:py:meth:`Component construction `.
"""
from armi import utils
actives = set()
inerts = set()
nuclideFlags = self.nuclideFlags or isotopicOptions.genDefaultNucFlags()
nucsToForceInXsGen = set()
# just expanding flags now. ndense gets expanded in comp blueprints
self.elementsToExpand = []
for nucFlag in nuclideFlags:
# this returns any nuclides that are flagged specifically for expansion by input
(
expandedElements,
undefBurnChainActiveNuclides,
) = nucFlag.fileAsActiveOrInert(
actives,
inerts,
)
self.elementsToExpand.extend(expandedElements)
inerts -= actives
self.customIsotopics = self.customIsotopics or isotopicOptions.CustomIsotopics()
eleKeep, eleExpand = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)
# Flag all elementals for expansion unless they've been flagged otherwise by
# user input or automatic lattice/datalib rules.
for nucBase in nuclideBases.instances:
isAlreadyIsotopic = not isinstance(nucBase, nuclideBases.NaturalNuclideBase)
if isAlreadyIsotopic:
# `elemental` may be a NaturalNuclideBase or a NuclideBase
# skip all NuclideBases (isotopics)
continue
# we now know its an elemental
elemental = nucBase
if elemental in eleKeep:
continue
if elemental.name in actives:
currentSet = actives
elif elemental.name in inerts:
currentSet = inerts
else:
# This was not specified in the nuclide flags at all as burn or xs.
# If a material with this in its composition is brought in it's nice from a user
# perspective to allow it.
# But current behavior is that all nuclides in problem must be declared up front.
continue
self.elementsToExpand.append(elemental.element)
if elemental.name in nuclideFlags and nuclideFlags[elemental.element.symbol].expandTo:
# user-input expandTo has precedence
newNuclides = [nuclideBases.byName[nn] for nn in nuclideFlags[elemental.element.symbol].expandTo]
elif elemental in eleExpand and elemental.element.symbol in nuclideFlags:
# code-specific expansion required based on code and ENDF
newNuclides = eleExpand[elemental]
# Overlay code details onto nuclideFlags for other parts of the code that use them.
# Also, if this element is not in nuclideFlags at all, we just don't add it.
nuclideFlags[elemental.element.symbol].expandTo = [nb.name for nb in newNuclides]
else:
# expand to all possible natural isotopics
newNuclides = elemental.element.getNaturalIsotopics()
# remove the elemental and add the isotopic
currentSet.remove(elemental.name)
for nb in newNuclides:
currentSet.add(nb.name)
# force everything asked for in xsGen
nucsToForceInXsGen = ordered_set.OrderedSet(sorted(actives.union(inerts)))
# add all detailed isotopes in ENDF if requested
isotopicOptions.autoUpdateNuclideFlags(cs, nuclideFlags, inerts)
self.nuclideFlags = nuclideFlags
if self.elementsToExpand:
runLog.info(
"Will expand {} elementals to have natural isotopics".format(
", ".join(element.symbol for element in self.elementsToExpand)
)
)
self.activeNuclides = ordered_set.OrderedSet(sorted(actives))
self.inertNuclides = ordered_set.OrderedSet(sorted(inerts))
self.allNuclidesInProblem = ordered_set.OrderedSet(sorted(actives.union(inerts)))
self.nucsToForceInXsGen = ordered_set.OrderedSet(sorted(nucsToForceInXsGen))
# Inform user which nuclides are truncating the burn chain.
if undefBurnChainActiveNuclides and nuclideBases.burnChainImposed:
runLog.info(
tabulate.tabulate(
[
[
"Nuclides truncating the burn-chain:",
utils.createFormattedStrWithDelimiter(list(undefBurnChainActiveNuclides)),
]
],
tableFmt="plain",
),
single=True,
)
def _checkAssemblyAreaConsistency(self, cs):
references = None
for a in self.assemblies.values():
if references is None:
references = (a, a.getArea())
continue
assemblyArea = a.getArea()
if isinstance(a, assemblies.RZAssembly):
# R-Z assemblies by definition have different areas, so skip the check
continue
if abs(references[1] - assemblyArea) > 1e-9:
runLog.error("REFERENCE COMPARISON ASSEMBLY:")
references[0][0].printContents()
runLog.error("CURRENT COMPARISON ASSEMBLY:")
a[0].printContents()
raise InputError(
"Assembly {} has a different area {} than assembly {} {}. Check inputs for accuracy".format(
a, assemblyArea, references[0], references[1]
)
)
blockArea = a[0].getArea()
for b in a[1:]:
if abs(b.getArea() - blockArea) / blockArea > cs[CONF_ACCEPTABLE_BLOCK_AREA_ERROR]:
runLog.error("REFERENCE COMPARISON BLOCK:")
a[0].printContents(includeNuclides=False)
runLog.error("CURRENT COMPARISON BLOCK:")
b.printContents(includeNuclides=False)
for c in b:
runLog.error(
"{0} area {1} effective area {2}".format(c, c.getArea(), c.getVolume() / b.getHeight())
)
raise InputError(
"Block {} has a different area {} than block {} {}. Check inputs for accuracy".format(
b, b.getArea(), a[0], blockArea
)
)
@classmethod
def migrate(cls, inp: typing.TextIO):
"""Given a stream representation of a blueprints file, migrate it.
Parameters
----------
inp : typing.TextIO
Input stream to migrate.
"""
for migI in migration.ACTIVE_MIGRATIONS:
if issubclass(migI, migration.base.BlueprintsMigration):
mig = migI(stream=inp)
inp = mig.apply()
return inp
@classmethod
def load(cls, stream, roundTrip=False):
"""This method is a wrapper around the `yamlize.Object.load()` method."""
# With the release of ruamel.yaml 0.19.1, we began getting the following error:
# AttributeError: 'RoundTripLoader' object has no attribute 'max_depth'
# Setting that attribute to `None` solved the issue. However, it would be prudent to rework blueprints loading
# to side step the issue entirely. This occurs because of the way `yamlize` works when it calls
# `get_single_node`.
RoundTripLoader.max_depth = None
return super().load(stream, Loader=RoundTripLoader)
def addDefaultSFP(self):
"""Create a default SFP if it's not in the blueprints."""
if self.systemDesigns is not None:
if not any(structure.typ == "sfp" for structure in self.systemDesigns):
sfp = SystemBlueprint("Spent Fuel Pool", "sfp", Triplet())
sfp.typ = "sfp"
self.systemDesigns["Spent Fuel Pool"] = sfp
else:
runLog.warning(f"Can't add default SFP to {self}, there are no systemDesigns!")
def migrate(bp: Blueprints, cs):
"""
Apply migrations to the input structure.
This is a good place to perform migrations that address changes to the system design description
(settings, blueprints). We have access both here, so we can even move stuff between files.
"""
from armi.reactor.blueprints import gridBlueprint
if bp.systemDesigns is None:
bp.systemDesigns = Systems()
if bp.gridDesigns is None:
bp.gridDesigns = gridBlueprint.Grids()
if "core" in [rd.name for rd in bp.gridDesigns]:
raise ValueError("Cannot auto-create a 2nd `core` grid. Adjust input.")
if "core" in [rd.name for rd in bp.systemDesigns]:
raise ValueError("Cannot auto-create a 2nd `core` grid. Adjust input.")
bp.systemDesigns["core"] = SystemBlueprint("core", "core", Triplet())
================================================
FILE: armi/reactor/blueprints/assemblyBlueprint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines the blueprints input object for assemblies.
In addition to defining the input format, the ``AssemblyBlueprint`` class is responsible for
constructing ``Assembly`` objects. An attempt has been made to decouple ``Assembly`` construction
from the rest of ARMI as much as possible. For example, an assembly does not require a reactor to be
constructed, or a geometry file (but uses contained Block geometry type as a surrogate).
"""
import yamlize
from armi import getPluginManagerOrFail, runLog
from armi.reactor import assemblies, grids, parameters
from armi.reactor.blueprints import blockBlueprint
from armi.reactor.flags import Flags
from armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT
def _configureAssemblyTypes():
assemTypes = dict()
pm = getPluginManagerOrFail()
for pluginAssemTypes in pm.hook.defineAssemblyTypes():
for blockType, assemType in pluginAssemTypes:
assemTypes[blockType] = assemType
return assemTypes
class Modifications(yamlize.Map):
"""
The names of material modifications and lists of the modification values for each block in the
assembly.
"""
key_type = yamlize.Typed(str)
value_type = yamlize.Sequence
class ByComponentModifications(yamlize.Map):
"""The name of a component within the block and an associated Modifications object."""
key_type = yamlize.Typed(str)
value_type = Modifications
class MaterialModifications(yamlize.Map):
"""
A yamlize map for reading and holding material modifications.
A user may specify material modifications directly as keys/values on this class, in which case
these material modifications will be blanket applied to the entire block.
If the user wishes to specify material modifications specific to a component within the block,
they should use the `by component` attribute, specifying the keys/values underneath the name of
a specific component in the block.
.. impl:: User-impact on material definitions.
:id: I_ARMI_MAT_USER_INPUT0
:implements: R_ARMI_MAT_USER_INPUT
Defines a yaml map attribute for the assembly portion of the blueprints (see
:py:class:`~armi.blueprints.assemblyBlueprint.AssemblyBlueprint`) that allows users to
specify material attributes as lists corresponding to each axial block in the assembly. Two
types of specifications can be made:
1. Key-value pairs can be specified directly, where the key is the name of the
modification and the value is the list of block values.
2. The "by component" attribute can be used, in which case the user can specify material
attributes that are specific to individual components in each block. This is enabled
through the
:py:class:`~armi.reactor.blueprints.assemblyBlueprint.ByComponentModifications` class,
which basically just allows for one additional layer of attributes corresponding to the
component names.
These material attributes can be used during the resolution of material classes during core
instantiation (see
:py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` and
:py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`).
"""
key_type = yamlize.Typed(str)
value_type = yamlize.Sequence
byComponent = yamlize.Attribute(
key="by component",
type=ByComponentModifications,
default=ByComponentModifications(),
)
class AssemblyBlueprint(yamlize.Object):
"""
A data container for holding information needed to construct an ARMI assembly.
This class utilizes ``yamlize`` to enable serialization to and from the blueprints YAML file.
.. impl:: Create assembly from blueprint file.
:id: I_ARMI_BP_ASSEM
:implements: R_ARMI_BP_ASSEM
Defines a yaml construct that allows the user to specify attributes of an
assembly from within their blueprints file, including a name, flags, specifier
for use in defining a core map, a list of blocks, a list of block heights,
a list of axial mesh points in each block, a list of cross section identifiers
for each block, and material options (see :need:`I_ARMI_MAT_USER_INPUT0`).
Relies on the underlying infrastructure from the ``yamlize`` package for
reading from text files, serialization, and internal storage of the data.
Is implemented as part of a blueprints file by being imported and used
as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints`
class.
Includes a ``construct`` method, which instantiates an instance of
:py:class:`~armi.reactor.assemblies.Assembly` with the characteristics
as specified in the blueprints.
"""
name = yamlize.Attribute(type=str)
flags = yamlize.Attribute(type=str, default=None)
specifier = yamlize.Attribute(type=str)
blocks = yamlize.Attribute(type=blockBlueprint.BlockList)
height = yamlize.Attribute(type=yamlize.FloatList)
axialMeshPoints = yamlize.Attribute(key="axial mesh points", type=yamlize.IntList)
radialMeshPoints = yamlize.Attribute(key="radial mesh points", type=int, default=None)
azimuthalMeshPoints = yamlize.Attribute(key="azimuthal mesh points", type=int, default=None)
materialModifications = yamlize.Attribute(
key="material modifications",
type=MaterialModifications,
default=MaterialModifications(),
)
xsTypes = yamlize.Attribute(key="xs types", type=yamlize.StrList)
# note: yamlizable does not call an __init__ method, instead it uses __new__ and setattr
_assemTypes = _configureAssemblyTypes()
@classmethod
def getAssemClass(cls, blocks):
"""
Get the ARMI ``Assembly`` class for the specified blocks.
Parameters
----------
blocks : list of Blocks
Blocks for which to determine appropriate containing Assembly type
"""
blockClasses = {b.__class__ for b in blocks}
for bType, aType in cls._assemTypes.items():
if bType in blockClasses:
return aType
raise ValueError('Unsupported block geometries in {}: "{}"'.format(cls.name, blocks))
def construct(self, cs, blueprint):
"""
Construct an instance of this specific assembly blueprint.
Parameters
----------
cs : Settings
Settings object which containing relevant modeling options.
blueprint : Blueprint
Root blueprint object containing relevant modeling options.
"""
runLog.info("Constructing assembly `{}`".format(self.name))
self._checkParamConsistency()
a = self._constructAssembly(cs, blueprint)
a.calculateZCoords()
return a
def _constructAssembly(self, cs, blueprint):
"""Construct the current assembly."""
blocks = []
for axialIndex, bDesign in enumerate(self.blocks):
b = self._createBlock(cs, blueprint, bDesign, axialIndex)
blocks.append(b)
assemblyClass = self.getAssemClass(blocks)
a = assemblyClass(self.name)
flags = None
if self.flags is not None:
flags = Flags.fromString(self.flags)
a.p.flags = flags
# set a basic grid with the right number of blocks with bounds to be adjusted.
a.spatialGrid = grids.AxialGrid.fromNCells(len(blocks))
a.spatialGrid.armiObject = a
# init submeshes
radMeshPoints = self.radialMeshPoints or 1
a.p.RadMesh = radMeshPoints
aziMeshPoints = self.azimuthalMeshPoints or 1
a.p.AziMesh = aziMeshPoints
# Loop a second time because we needed all the blocks before choosing the assembly class.
for axialIndex, b in enumerate(blocks):
b.name = b.makeName(a.p.assemNum, axialIndex)
a.add(b)
# Assign values for the parameters if they are defined on the blueprints
for paramDef in a.p.paramDefs.inCategory(parameters.Category.assignInBlueprints):
val = getattr(self, paramDef.name)
if val is not None:
a.p[paramDef.name] = val
return a
@staticmethod
def _shouldMaterialModiferBeApplied(value) -> bool:
"""Determine if a material modifier entry is applicable.
Two exceptions:
1. Modifiers that are empty strings are not applied.
2. Modifiers that are ``None`` are not applied
Parameters
----------
value : object
Entry in a material modifications array
Returns
-------
bool: Result of the check
"""
return bool(value != "" and value is not None)
def _createBlock(self, cs, blueprint, bDesign, axialIndex):
"""Create a block based on the block design and the axial index."""
meshPoints = self.axialMeshPoints[axialIndex]
height = self.height[axialIndex]
xsType = self.xsTypes[axialIndex]
materialInput = {}
for key, mod in {
"byBlock": {**self.materialModifications},
**self.materialModifications.byComponent,
}.items():
materialInput[key] = {
modName: modList[axialIndex]
for modName, modList in mod.items()
if self._shouldMaterialModiferBeApplied(modList[axialIndex])
}
b = bDesign.construct(cs, blueprint, axialIndex, meshPoints, height, xsType, materialInput)
b.completeInitialLoading()
# set b10 volume cc since its a cold dim param
b.setB10VolParam(cs[CONF_INPUT_HEIGHTS_HOT])
return b
def _checkParamConsistency(self) -> None:
"""Check that the number of block params specified is equal to the number of blocks specified."""
# general things to check
paramsToCheck = {
"mesh points": self.axialMeshPoints,
"heights": self.height,
"xs types": self.xsTypes,
}
# check by-block mat mods
for modName, modList in self.materialModifications.items():
paramName = f"mat mod for {modName}"
paramsToCheck[paramName] = modList
# check by-component mat mods
for comp in self.materialModifications.byComponent.values():
for modName, modList in comp.items():
paramName = f"material modifications for {modName}"
paramsToCheck[paramName] = modList
# perform the check
for paramName, blockVals in paramsToCheck.items():
if len(self.blocks) != len(blockVals):
msg = (
f"Assembly {self.name} had {len(self.blocks)} block(s), but {len(blockVals)} "
f"'{paramName}'. These numbers should be equal. Check input for errors."
)
runLog.error(msg)
raise ValueError(msg)
for paramDef in parameters.forType(assemblies.Assembly).inCategory(parameters.Category.assignInBlueprints):
setattr(
AssemblyBlueprint,
paramDef.name,
yamlize.Attribute(name=paramDef.name, default=None),
)
class AssemblyKeyedList(yamlize.KeyedList):
"""
Effectively and OrderedDict of assembly items, keyed on the assembly name.
This uses yamlize KeyedList for YAML serialization.
"""
item_type = AssemblyBlueprint
key_attr = AssemblyBlueprint.name
heights = yamlize.Attribute(type=yamlize.FloatList, default=None)
axialMeshPoints = yamlize.Attribute(key="axial mesh points", type=yamlize.IntList, default=None)
# NOTE: yamlize does not call an __init__ method, instead it uses __new__ and setattr
@property
def bySpecifier(self):
"""Used by the reactor to ``_loadComposites`` later, specifiers are two character strings."""
return {aDesign.specifier: aDesign for aDesign in self}
================================================
FILE: armi/reactor/blueprints/blockBlueprint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines the ARMI input for a block definition, and code for constructing an ARMI ``Block``."""
import collections
from inspect import signature
from typing import Iterable, Iterator, Set
import yamlize
from armi import getPluginManagerOrFail, runLog
from armi.materials.material import Material
from armi.reactor import blocks, parameters
from armi.reactor.blueprints import componentBlueprint
from armi.reactor.components.component import Component
from armi.reactor.composites import Composite
from armi.reactor.converters import blockConverters
from armi.reactor.flags import Flags
from armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT
def _configureGeomOptions():
blockTypes = dict()
pm = getPluginManagerOrFail()
for pluginBlockTypes in pm.hook.defineBlockTypes():
for compType, blockType in pluginBlockTypes:
blockTypes[compType] = blockType
return blockTypes
class BlockBlueprint(yamlize.KeyedList):
"""Input definition for Block.
.. impl:: Create a Block from blueprint file.
:id: I_ARMI_BP_BLOCK
:implements: R_ARMI_BP_BLOCK
Defines a yaml construct that allows the user to specify attributes of a block from within
their blueprints file, including a name, flags, a radial grid to specify locations of pins,
and the name of a component which drives the axial expansion of the block (see
:py:mod:`~armi.reactor.converters.axialExpansionChanger`).
In addition, the user may specify key-value pairs to specify the components contained within
the block, where the keys are component names and the values are component blueprints (see
:py:class:`~armi.reactor.blueprints.ComponentBlueprint.ComponentBlueprint`).
Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
files, serialization, and internal storage of the data.
Is implemented into a blueprints file by being imported and used as an attribute within the
larger :py:class:`~armi.reactor.blueprints.Blueprints` class.
Includes a ``construct`` method, which instantiates an instance of
:py:class:`~armi.reactor.blocks.Block` with the characteristics as specified in the
blueprints.
"""
item_type = componentBlueprint.ComponentBlueprint
key_attr = componentBlueprint.ComponentBlueprint.name
name = yamlize.Attribute(key="name", type=str)
gridName = yamlize.Attribute(key="grid name", type=str, default=None)
flags = yamlize.Attribute(type=str, default=None)
axialExpTargetComponent = yamlize.Attribute(key="axial expansion target component", type=str, default=None)
_geomOptions = _configureGeomOptions()
def _getBlockClass(self, outerComponent):
"""
Get the ARMI ``Block`` class for the specified outerComponent.
Parameters
----------
outerComponent : Component
Largest component in block.
"""
for compCls, blockCls in self._geomOptions.items():
if isinstance(outerComponent, compCls):
return blockCls
raise ValueError(
"Block input for {} has outer component {} which is "
" not a supported Block geometry subclass. Update geometry."
"".format(self.name, outerComponent)
)
def construct(self, cs, blueprint, axialIndex, axialMeshPoints, height, xsType, materialInput):
"""
Construct an ARMI ``Block`` to be placed in an ``Assembly``.
Parameters
----------
cs : Settings
Settings object for the appropriate simulation.
blueprint : Blueprints
Blueprints object containing various detailed information, such as nuclides to model
axialIndex : int
The Axial index this block exists within the parent assembly
axialMeshPoints : int
number of mesh points for use in the neutronics kernel
height : float
initial height of the block
xsType : str
String representing the xsType of this block.
materialInput : dict
Double-layered dict.
Top layer groups the by-block material modifications under the `byBlock` key
and the by-component material modifications under the component's name.
The inner dict under each key contains material modification names and values.
"""
runLog.debug("Constructing block {}".format(self.name))
components = collections.OrderedDict()
# build grid before components so you can load
# the components into the grid.
gridDesign = self._getGridDesign(blueprint)
if gridDesign:
spatialGrid = gridDesign.construct()
else:
spatialGrid = None
self._checkByComponentMaterialInput(materialInput)
allLatticeIds = set()
for componentDesign in self:
filteredMaterialInput, byComponentMatModKeys = self._filterMaterialInput(materialInput, componentDesign)
c = componentDesign.construct(
blueprint,
filteredMaterialInput,
cs[CONF_INPUT_HEIGHTS_HOT],
)
components[c.name] = c
# check that the mat mods for this component are valid options
# this will only examine by-component mods, block mods are done later
if isinstance(c, Component):
# there are other things like composite groups that don't get
# material modifications -- skip those
validMatModOptions = self._getMaterialModsFromBlockChildren(c)
for key in byComponentMatModKeys:
if key not in validMatModOptions:
raise ValueError(f"{c} in block {self.name} has invalid material modification: {key}")
if spatialGrid:
componentLocators = gridDesign.getMultiLocator(spatialGrid, componentDesign.latticeIDs)
if componentLocators:
# this component is defined in the block grid
# We can infer the multiplicity from the grid.
# Otherwise it's a component that is in a block
# with grids but that's not in the grid itself.
c.spatialLocator = componentLocators
mult = c.getDimension("mult")
if mult and mult != 1.0 and mult != len(c.spatialLocator):
raise ValueError(
f"For {c} in {self.name} there is a conflicting ``mult`` input ({mult}) "
f"and number of lattice positions ({len(c.spatialLocator)}). "
"Recommend leaving off ``mult`` input when using grids."
)
elif not mult or mult == 1.0:
# learn mult from grid definition
c.setDimension("mult", len(c.spatialLocator))
idsInGrid = list(gridDesign.gridContents.values())
if componentDesign.latticeIDs:
for latticeID in componentDesign.latticeIDs:
allLatticeIds.add(str(latticeID))
# the user has given this component latticeIDs. check that
# each of the ids appears in the grid, otherwise
# their blueprints are probably wrong
if len([i for i in idsInGrid if i == str(latticeID)]) == 0:
raise ValueError(
f"latticeID {latticeID} in block blueprint '{self.name}' is expected "
"to be present in the associated block grid. "
"Check that the component's latticeIDs align with the block's grid."
)
# for every id in grid, confirm that at least one component had it
if gridDesign:
idsInGrid = list(gridDesign.gridContents.values())
for idInGrid in idsInGrid:
if str(idInGrid) not in allLatticeIds:
raise ValueError(
f"ID {idInGrid} in grid {gridDesign.name} is not in any components of block {self.name}. "
"All IDs in the grid must appear in at least one component."
)
# check that the block level mat mods use valid options in the same way
# as we did for the by-component mods above
validMatModOptions = self._getBlockwiseMaterialModifierOptions(components.values())
if "byBlock" in materialInput:
for key in materialInput["byBlock"]:
if key not in validMatModOptions:
raise ValueError(f"Block {self.name} has invalid material modification key: {key}")
# Resolve linked dims after all components in the block are created
for c in components.values():
c.resolveLinkedDims(components)
boundingComp = sorted(components.values())[-1]
# give a temporary name (will be updated by b.makeName as real blocks populate systems)
b = self._getBlockClass(boundingComp)(name=f"block-bol-{axialIndex:03d}")
for paramDef in b.p.paramDefs.inCategory(parameters.Category.assignInBlueprints):
val = getattr(self, paramDef.name)
if val is not None:
b.p[paramDef.name] = val
flags = None
if self.flags is not None:
flags = Flags.fromString(self.flags)
b.setType(self.name, flags)
if self.axialExpTargetComponent is not None:
try:
b.setAxialExpTargetComp(components[self.axialExpTargetComponent])
except KeyError as noMatchingComponent:
raise RuntimeError(
f"Block {b} --> axial expansion target component {self.axialExpTargetComponent} "
"specified in the blueprints does not match any component names. "
"Revise axial expansion target component in blueprints "
"to match the name of a component and retry."
) from noMatchingComponent
for c in components.values():
b.add(c)
b.p.nPins = b.getNumPins()
b.p.axMesh = _setBlueprintNumberOfAxialMeshes(axialMeshPoints, cs["axialMeshRefinementFactor"])
b.p.height = height
b.p.heightBOL = height # for fuel performance
b.p.xsType = xsType
b.setBuLimitInfo()
b = self._mergeComponents(b)
b.verifyBlockDims()
b.spatialGrid = spatialGrid
return b
def _getBlockwiseMaterialModifierOptions(self, children: Iterable[Composite]) -> Set[str]:
"""Collect all the material modifiers that exist on a block."""
validMatModOptions = set()
for c in children:
perChildModifiers = self._getMaterialModsFromBlockChildren(c)
validMatModOptions.update(perChildModifiers)
return validMatModOptions
def _getMaterialModsFromBlockChildren(self, c: Composite) -> Set[str]:
"""Collect all the material modifiers from a child of a block."""
perChildModifiers = set()
for material in self._getMaterialsInComposite(c):
for materialParentClass in material.__class__.__mro__:
# we must loop over parents as well, since applyInputParams
# could call to Parent.applyInputParams()
if issubclass(materialParentClass, Material):
perChildModifiers.update(signature(materialParentClass.applyInputParams).parameters.keys())
# self is a parameter to methods, so it gets picked up here
# but that's obviously not a real material modifier
perChildModifiers.discard("self")
return perChildModifiers
def _getMaterialsInComposite(self, child: Composite) -> Iterator[Material]:
"""Collect all the materials in a composite."""
# Leaf node, no need to traverse further down
if isinstance(child, Component):
yield child.material
return
# Don't apply modifications to other things that could reside
# in a block e.g., component groups
def _checkByComponentMaterialInput(self, materialInput):
for component in materialInput:
if component != "byBlock":
if component not in [componentDesign.name for componentDesign in self]:
if materialInput[component]: # ensure it is not empty
raise ValueError(
f"The component '{component}' used to specify a by-component"
f" material modification is not in block '{self.name}'."
)
@staticmethod
def _filterMaterialInput(materialInput, componentDesign):
"""
Get the by-block material modifications and those specifically for this
component.
If a material modification is specified both by-block and by-component
for a given component, the by-component value will be used.
"""
filteredMaterialInput = {}
byComponentMatModKeys = set()
# first add the by-block modifications without question
if "byBlock" in materialInput:
for modName, modVal in materialInput["byBlock"].items():
filteredMaterialInput[modName] = modVal
# then get the by-component modifications as appropriate
for component, mod in materialInput.items():
if component == "byBlock":
pass # we already added these
else:
# these are by-component mods, first test if the component matches
# before adding. if component matches, add the modifications,
# overwriting any by-block modifications of the same type
if component == componentDesign.name:
for modName, modVal in mod.items():
byComponentMatModKeys.add(modName)
filteredMaterialInput[modName] = modVal
return filteredMaterialInput, byComponentMatModKeys
def _getGridDesign(self, blueprint):
"""
Get the appropriate grid design.
This happens when a lattice input is provided on the block. Otherwise all
components are ambiguously defined in the block.
"""
if self.gridName:
if self.gridName not in blueprint.gridDesigns:
raise KeyError(
f"Lattice {self.gridName} defined on {self} is not defined in the blueprints `lattices` section."
)
return blueprint.gridDesigns[self.gridName]
return None
@staticmethod
def _mergeComponents(b):
solventNamesToMergeInto = set(c.p.mergeWith for c in b.iterComponents() if c.p.mergeWith)
if solventNamesToMergeInto:
runLog.warning(
"Component(s) {} in block {} has merged components inside it. The merge was valid at hot "
"temperature, but the merged component only has the basic thermal expansion factors "
"of the component(s) merged into. Expansion properties or dimensions of non hot "
"temperature may not be representative of how the original components would have acted had "
"they not been merged. It is recommended that merging happen right before "
"a physics calculation using a block converter to avoid this."
"".format(solventNamesToMergeInto, b.name),
single=True,
)
for solventName in solventNamesToMergeInto:
soluteNames = []
for c in b:
if c.p.mergeWith == solventName:
soluteNames.append(c.name)
converter = blockConverters.MultipleComponentMerger(b, soluteNames, solventName)
b = converter.convert()
return b
for paramDef in parameters.forType(blocks.Block).inCategory(parameters.Category.assignInBlueprints):
setattr(
BlockBlueprint,
paramDef.name,
yamlize.Attribute(name=paramDef.name, default=None),
)
def _setBlueprintNumberOfAxialMeshes(meshPoints, factor):
"""Set the blueprint number of axial mesh based on the axial mesh refinement factor."""
if factor <= 0:
raise ValueError(f"A positive axial mesh refinement factor must be provided. A value of {factor} is invalid.")
if factor != 1:
runLog.important(
"An axial mesh refinement factor of {} is applied to blueprint based on setting specification.".format(
factor
),
single=True,
)
return int(meshPoints) * factor
class BlockKeyedList(yamlize.KeyedList):
"""
An OrderedDict of BlockBlueprints keyed on the name. Utilizes yamlize for serialization to and from YAML.
This is used within the ``blocks:`` main entry of the blueprints.
"""
item_type = BlockBlueprint
key_attr = BlockBlueprint.name
class BlockList(yamlize.Sequence):
"""
A list of BlockBlueprints keyed on the name. Utilizes yamlize for serialization to and from YAML.
This is used to define the ``blocks:`` attribute of the assembly definitions.
"""
item_type = BlockBlueprint
================================================
FILE: armi/reactor/blueprints/componentBlueprint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines the ARMI input for a component definition, and code for constructing an ARMI ``Component``.
Special logic is required for handling component links.
"""
import yamlize
from armi import materials, runLog
from armi.nucDirectory import nuclideBases
from armi.reactor import components, composites
from armi.reactor.flags import Flags
from armi.utils import densityTools
COMPONENT_GROUP_SHAPE = "group"
class ComponentDimension(yamlize.Object):
"""
Dummy object for ensuring well-formed component links are specified within the YAML input.
This can be either a number (float or int), or a conformation string (``name.dimension``).
"""
def __init__(self, value):
# note: yamlizable does not call an __init__ method, instead it uses __new__ and setattr
self.value = value
if isinstance(value, str):
if not components.COMPONENT_LINK_REGEX.search(value):
raise ValueError(f"Bad component link `{value}`, must be in form `name.dimension`")
def __repr__(self):
return f""
@classmethod
def from_yaml(cls, loader, node, _rtd=None):
"""
Override the ``Yamlizable.from_yaml`` to inject custom interpretation of component dimension.
This allows us to create a new object with either a string or numeric value.
"""
try:
val = loader.construct_object(node)
self = ComponentDimension(val)
loader.constructed_objects[node] = self
return self
except ValueError as ve:
raise yamlize.YamlizingError(str(ve), node)
@classmethod
def to_yaml(cls, dumper, self, _rtd=None):
"""
Override the ``Yamlizable.to_yaml`` to remove the object-like behavior, otherwise we'd end up with a
``{value: ...}`` dictionary.
This allows someone to programmatically edit the component dimensions without using the ``ComponentDimension``
class.
"""
if not isinstance(self, cls):
self = cls(self)
node = dumper.represent_data(self.value)
dumper.represented_objects[self] = node
return node
def __mul__(self, other):
return self.value * other
def __add__(self, other):
return self.value + other
def __div__(self, other):
return self.value / other
def __sub__(self, other):
return self.value - other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __hash__(self):
return id(self)
class ComponentBlueprint(yamlize.Object):
"""
This class defines the inputs necessary to build ARMI component objects. It uses ``yamlize`` to enable serialization
to and from YAML.
.. impl:: Construct component from blueprint file.
:id: I_ARMI_BP_COMP
:implements: R_ARMI_BP_COMP
Defines a yaml construct that allows the user to specify attributes of a component from within their blueprints
file, including a name, flags, shape, material and/or isotopic vector, input temperature, corresponding
component dimensions, and ID for placement in a block lattice (see
:py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`). Component dimensions that can be defined
for a given component are dependent on the component's ``shape`` attribute, and the dimensions defining each
shape can be found in the :py:mod:`~armi.reactor.components` module.
Limited validation on the inputs is performed to ensure that the component shape corresponds to a valid shape
defined by the ARMI application.
Relies on the underlying infrastructure from the ``yamlize`` package for reading from text files, serialization,
and internal storage of the data.
Is implemented as part of a blueprints file by being imported and used as an attribute within the larger
:py:class:`~armi.reactor.blueprints.Blueprints` class. Can also be used within the
:py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint` class to enable specification of components
directly within the "blocks" portion of the blueprint file.
Includes a ``construct`` method, which instantiates an instance of
:py:class:`~armi.reactor.components.component.Component` with the characteristics specified in the blueprints
(see :need:`I_ARMI_MAT_USER_INPUT1`).
"""
name = yamlize.Attribute(type=str)
flags = yamlize.Attribute(type=str, default=None)
@name.validator
def name(self, name):
"""Validate component names."""
if name == "cladding":
# many users were mixing cladding and clad and it caused issues downstream where physics plugins checked for
# clad.
raise ValueError(f"Cannot set ComponentBlueprint.name to {name}. Prefer 'clad'.")
shape = yamlize.Attribute(type=str)
@shape.validator
def shape(self, shape):
normalizedShape = shape.strip().lower()
if normalizedShape not in components.ComponentType.TYPES and normalizedShape != COMPONENT_GROUP_SHAPE:
raise ValueError(f"Cannot set ComponentBlueprint.shape to unknown shape: {shape}")
material = yamlize.Attribute(type=str, default=None)
Tinput = yamlize.Attribute(type=float, default=None)
Thot = yamlize.Attribute(type=float, default=None)
isotopics = yamlize.Attribute(type=str, default=None)
latticeIDs = yamlize.Attribute(type=list, default=None)
origin = yamlize.Attribute(type=list, default=None)
orientation = yamlize.Attribute(type=str, default=None)
mergeWith = yamlize.Attribute(type=str, default=None)
area = yamlize.Attribute(type=float, default=None)
def construct(self, blueprint, matMods, inputHeightsConsideredHot):
"""Construct a component or group.
.. impl:: User-defined on material alterations are applied here.
:id: I_ARMI_MAT_USER_INPUT1
:implements: R_ARMI_MAT_USER_INPUT
Allows for user input to impact a component's materials by applying the "material modifications" section of
a blueprints file (see :need:`I_ARMI_MAT_USER_INPUT0`) to the material during construction. This takes place
during lower calls to ``_conformKwargs()`` and subsequently ``_constructMaterial()``, which operate using
the component blueprint and associated material modifications from the component's block.
Within ``_constructMaterial()``, the material class is resolved into a material object by calling
:py:func:`~armi.materials.resolveMaterialClassByName`. The ``applyInputParams()`` method of that material
class is then called, passing in the associated material modifications data, which the material class can
then use to modify the isotopics as necessary.
Parameters
----------
blueprint : Blueprints
Blueprints object containing various detailed information, such as nuclides to model
matMods : dict
Material modifications to apply to the component.
inputHeightsConsideredHot : bool
See the case setting of the same name.
"""
runLog.debug(f"Constructing component {self.name}")
kwargs = self._conformKwargs(blueprint, matMods)
shape = self.shape.lower().strip()
if shape == COMPONENT_GROUP_SHAPE:
group = blueprint.componentGroups[self.name]
constructedObject = composites.Composite(self.name)
for groupedComponent in group:
componentDesign = blueprint.componentDesigns[groupedComponent.name]
component = componentDesign.construct(blueprint, {}, inputHeightsConsideredHot)
# override free component multiplicity if it's set based on the group definition
component.setDimension("mult", groupedComponent.mult)
_setComponentFlags(component, self.flags, blueprint)
insertDepletableNuclideKeys(component, blueprint)
constructedObject.add(component)
else:
constructedObject = components.factory(shape, [], kwargs)
_setComponentFlags(constructedObject, self.flags, blueprint)
insertDepletableNuclideKeys(constructedObject, blueprint)
constructedObject.p.theoreticalDensityFrac = constructedObject.material.getTD()
self._setComponentCustomDensity(
constructedObject,
blueprint,
matMods,
inputHeightsConsideredHot,
)
if hasattr(constructedObject, "material") and "Custom" in str(constructedObject.material):
if len(constructedObject.material.massFrac) == 0:
msg = f"Custom material does not have isotopics: {self}"
runLog.error(msg, single=True)
raise IOError(msg)
return constructedObject
def _setComponentCustomDensity(self, comp, blueprint, matMods, inputHeightsConsideredHot):
"""Apply a custom density to a material with custom isotopics but not a 'custom material'."""
if self.isotopics is None:
# No custom isotopics specified
return
densityFromCustomIsotopic = blueprint.customIsotopics[self.isotopics].density
if densityFromCustomIsotopic is None:
# Nothing to do
return
if densityFromCustomIsotopic <= 0:
runLog.error(
"A zero or negative density was specified in a custom isotopics input. This is not permitted, if a 0 "
f"density material is needed, use 'Void'. The component is {comp} and the isotopics entry is "
f"{self.isotopics}."
)
raise ValueError("A zero or negative density was specified in the custom isotopics for a component")
elif len(matMods):
runLog.warning(
f"Custom isotopics and material modifications have both been defined for {self.material} for component"
f"{comp}. Please consider carefully if these are in conflict.",
single=True,
label=f"custom iso + mat mods {self.material} {comp}",
)
mat = materials.resolveMaterialClassByName(self.material)()
if not isinstance(mat, materials.Custom):
# check for some problem cases
overSpecs = [k for k in matMods if k.endswith("_frac")]
if len(overSpecs):
runLog.error(
f"Both {overSpecs} and a custom isotopic with density {blueprint.customIsotopics[self.isotopics]} "
f"have been specified for material {self.material}. This is an overspecification.",
single=True,
)
if not mat.density(Tc=self.Tinput) > 0:
runLog.error(
f"A custom density has been assigned to material '{self.material}', which has no baseline density. "
"Only materials with a starting density may be assigned a density. This comes up e.g. if isotopics "
"are assigned to 'Void'."
)
raise ValueError("Cannot apply custom densities to materials without density.")
# Apply a density scaling to account for the temperature change between Tinput and Thot
if isinstance(mat, materials.Fluid):
densityRatio = densityFromCustomIsotopic / mat.density(Tc=comp.inputTemperatureInC)
else:
# For solids we need to consider if the input heights are hot or cold, in order to get the density
# correct. There may be a better place in the initialization to determine if the block height will be
# interpreted as hot dimensions, which would allow us to not have to pass the case settings this far.
dLL = mat.linearExpansionFactor(Tc=comp.temperatureInC, T0=comp.inputTemperatureInC)
if inputHeightsConsideredHot:
f = 1.0 / (1 + dLL) ** 2
else:
f = 1.0 / (1 + dLL) ** 3
scaledDensity = comp.density() / f
densityRatio = densityFromCustomIsotopic / scaledDensity
comp.changeNDensByFactor(densityRatio)
runLog.important(
f"A custom material density was specified in the custom isotopics for non-custom material {mat}. The "
f"component density has been altered to {comp.density()} at temperature {comp.temperatureInC} C",
single=True,
)
def _conformKwargs(self, blueprint, matMods):
"""This method gets the relevant kwargs to construct the component."""
kwargs = {"mergeWith": self.mergeWith or "", "isotopics": self.isotopics or ""}
for attr in self.attributes: # yamlize magic
val = attr.get_value(self)
if attr.name == "shape" or val == attr.default:
continue
elif attr.name == "material":
# value is a material instance
value = self._constructMaterial(blueprint, matMods)
elif attr.name == "latticeIDs":
# Don't pass latticeIDs on to the component constructor.
# They're applied during block construction.
continue
elif attr.name == "flags":
# Don't pass these to the component constructor. These are used to
# override the flags derived from the type, if present.
continue
else:
value = attr.get_value(self)
# Keep digging until the actual value is found. This is a bit of a hack to get around an issue in
# yamlize/ComponentDimension where Dimensions can end up chained.
while isinstance(value, ComponentDimension):
value = value.value
kwargs[attr.name] = value
return kwargs
def _constructMaterial(self, blueprint, matMods):
nucsInProblem = blueprint.allNuclidesInProblem
# make material with defaults
mat = materials.resolveMaterialClassByName(self.material)()
if self.isotopics is not None:
# Apply custom isotopics before processing input mods so
# the input mods have the final word
blueprint.customIsotopics.apply(mat, self.isotopics)
# add mass fraction custom isotopics info, since some material modifications need to see them e.g. in the base
# Material.applyInputParams
matMods.update({"customIsotopics": {k: v.massFracs for k, v in blueprint.customIsotopics.items()}})
if len(matMods) > 1:
# don't apply if only customIsotopics is in there
try:
# update material with updated input params from blueprints file.
mat.applyInputParams(**matMods)
except TypeError as ee:
errorMessage = ee.args[0]
if "got an unexpected keyword argument" in errorMessage:
# This component does not accept material modification inputs of the names passed in
# Keep going since the modification could work for another component
pass
else:
raise ValueError(
f"Something went wrong in applying the material modifications {matMods} "
f"to component {self.name}.\nError message is: \n{errorMessage}."
)
expandElementals(mat, blueprint)
missing = set(mat.massFrac.keys()).difference(nucsInProblem)
if missing:
raise ValueError(
f"The nuclides {missing} are present in material {mat} by compositions, but are not specified in the "
"`nuclide flags` section of the input file. They need to be added, or custom isotopics need to be "
"applied."
)
return mat
def expandElementals(mat, blueprint):
"""
Expand elements to isotopics during material construction.
Does so as required by modeling options or user input.
See Also
--------
armi.reactor.blueprints.Blueprints._resolveNuclides
Sets the metadata defining this behavior.
"""
elementExpansionPairs = []
for elementToExpand in blueprint.elementsToExpand:
if elementToExpand.symbol not in mat.massFrac:
continue
nucFlags = blueprint.nuclideFlags.get(elementToExpand.symbol)
nuclidesToBecome = (
[nuclideBases.byName[nn] for nn in nucFlags.expandTo] if (nucFlags and nucFlags.expandTo) else None
)
elementExpansionPairs.append((elementToExpand, nuclidesToBecome))
densityTools.expandElementalMassFracsToNuclides(mat.massFrac, elementExpansionPairs)
def insertDepletableNuclideKeys(c, blueprint):
"""
Auto update number density keys on all DEPLETABLE components.
.. impl:: Insert any depletable blueprint flags onto this component.
:id: I_ARMI_BP_NUC_FLAGS0
:implements: R_ARMI_BP_NUC_FLAGS
This is called during the component construction process for each component from within
:py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`.
For a given initialized component, check its flags to determine if it has been marked as
depletable. If it is, use
:py:func:`~armi.nucDirectory.nuclideBases.initReachableActiveNuclidesThroughBurnChain` to
apply the user-specifications in the "nuclide flags" section of the blueprints to the
Component such that all active isotopes and derivatives of those isotopes in the burn chain
are initialized to have an entry in the component's ``nuclides`` array.
Note that certain case settings, including ``fpModel`` and ``fpModelLibrary``, may trigger
modifications to the active nuclides specified by the user in the "nuclide flags" section of
the blueprints.
Notes
-----
This should be moved to a neutronics/depletion plugin hook but requires some refactoring in how
active nuclides and reactors are initialized first.
See Also
--------
armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface.isDepletable :
contains design docs describing the ``DEPLETABLE`` flagging situation
"""
if c.hasFlags(Flags.DEPLETABLE):
# depletable components, whether auto-derived or explicitly flagged need expanded nucs
(
c.p.nuclides,
c.p.numberDensities,
) = nuclideBases.initReachableActiveNuclidesThroughBurnChain(
c.p.nuclides,
c.p.numberDensities,
blueprint.activeNuclides,
)
class ComponentKeyedList(yamlize.KeyedList):
"""
An OrderedDict of ComponentBlueprints keyed on the name.
This is used within the ``components:`` main entry of the blueprints.
This is *not* (yet) used when components are defined within a block blueprint. That is handled in the blockBlueprint
construct method.
"""
item_type = ComponentBlueprint
key_attr = ComponentBlueprint.name
class GroupedComponent(yamlize.Object):
"""
A pointer to a component with a multiplicity to be used in a ComponentGroup.
Multiplicity can be a fraction (e.g. to set volume fractions)
"""
name = yamlize.Attribute(type=str)
mult = yamlize.Attribute(type=float)
class ComponentGroup(yamlize.KeyedList):
"""
A single component group containing multiple GroupedComponents.
Example
-------
triso:
kernel:
mult: 0.7
buffer:
mult: 0.3
"""
group_name = yamlize.Attribute(type=str)
key_attr = GroupedComponent.name
item_type = GroupedComponent
class ComponentGroups(yamlize.KeyedList):
"""
A list of component groups.
This is used in the top-level blueprints file.
"""
key_attr = ComponentGroup.group_name
item_type = ComponentGroup
# This import-time magic requires all possible components be imported before this module imports. The intent was to make
# registration basically automatic. This has proven to be quite problematic and will be replaced with an explicit
# plugin-level component registration system.
for dimName in set([kw for cType in components.ComponentType.TYPES.values() for kw in cType.DIMENSION_NAMES]):
setattr(
ComponentBlueprint,
dimName,
yamlize.Attribute(name=dimName, type=ComponentDimension, default=None),
)
def _setComponentFlags(component, flags, blueprint):
"""Update component flags based on user input in blueprint."""
# The component __init__ calls setType(), which gives us our initial guess at what the flags should be.
if flags is not None:
# override the flags from __init__ with the ones from the blueprint
component.p.flags = Flags.fromString(flags)
else:
# Potentially add the DEPLETABLE flag. Don't do this if we set flags explicitly.
# WARNING: If you add flags explicitly, it will turn off depletion so be sure to add depletable to your list of
# flags if you expect depletion
if any(nuc in blueprint.activeNuclides for nuc in component.getNuclides()):
component.p.flags |= Flags.DEPLETABLE
================================================
FILE: armi/reactor/blueprints/gridBlueprint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Input definitions for Grids.
Grids are given names which can be referred to on other input structures (like core maps and pin
maps).
These are in turn interpreted into concrete things at lower levels. For example:
* Core map lattices get turned into :py:mod:`armi.reactor.grids`, which get set to
``core.spatialGrid``.
* Block pin map lattices get applied to the components to provide some subassembly spatial details.
Lattice inputs here are floating in space. Specific dimensions and anchor points are handled by the
lower-level objects definitions. This is intended to maximize lattice reusability.
See Also
--------
armi.utils.asciimaps
Description of the ascii maps and their formats.
Examples
--------
::
grids:
control:
geom: hex
symmetry: full
lattice map: |
- - - - - - - - - 1 1 1 1 1 1 1 1 1 4
- - - - - - - - 1 1 1 1 1 1 1 1 1 1 1
- - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1
- - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1
- - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 3 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1
1 6 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1
sfp:
geom: cartesian
lattice pitch:
x: 25.0
y: 25.0
lattice map: |
2 2 2 2 2
2 1 1 1 2
2 1 3 1 2
2 3 1 1 2
2 2 2 2 2
core:
geom: hex
symmetry: third periodic
origin:
x: 0.0
y: 10.1
z: 1.1
lattice map: |
- SH SH SH
- SH SH SH SH
SH RR RR RR SH
RR RR RR RR SH
RR RR RR RR RR SH
RR OC OC RR RR SH
OC OC OC RR RR SH
OC OC OC OC RR RR
OC MC OC OC RR SH
MC MC PC OC RR SH
MC MC MC OC OC RR
MC MC MC OC RR SH
PC MC MC OC RR SH
MC MC MC MC OC RR
IC MC MC OC RR SH
IC US MC OC RR
IC IC MC OC RR SH
IC MC MC OC RR
IC IC MC PC RR SH
"""
import copy
import itertools
from io import StringIO
from typing import Tuple
import numpy as np
import yamlize
from ruamel.yaml import scalarstring
from armi import runLog
from armi.reactor import blueprints, geometry, grids
from armi.utils import asciimaps
from armi.utils.customExceptions import InputError
from armi.utils.mathematics import isMonotonic
class Triplet(yamlize.Object):
"""A x, y, z triplet for coordinates or lattice pitch."""
x = yamlize.Attribute(type=float)
y = yamlize.Attribute(type=float, default=0.0)
z = yamlize.Attribute(type=float, default=0.0)
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
class Pitch(yamlize.Object):
"""A x, y, z triplet or triangular hex pitch for coordinates or lattice pitch for hexagonal grids."""
hex = yamlize.Attribute(type=float, default=0.0)
x = yamlize.Attribute(type=float, default=0.0)
y = yamlize.Attribute(type=float, default=0.0)
z = yamlize.Attribute(type=float, default=0.0)
def __init__(self, hexPitch=0.0, x=0.0, y=0.0, z=0.0):
"""
Parameters
----------
hex : float, optional
Triangular/hex lattice pitch
x : float, optional
Cartesian grid: pitch in the x direction
Hexagonal grid: interpreted as hex lattice pitch
y : float, optional
Cartesian grid: pitch in the y direction
z : float, optional
Pitch in the z direction
Raises
------
InputError
* If a `hexPitch` and `x` or `y` pitch are provided simultaneously.
* If no non-zero value is provided for any parameter.
"""
if hexPitch and (x or y):
raise InputError("Cannot mix `hex` with `x` and `y` attributes of `latticePitch`.")
if not any([hexPitch, x, y, z]):
raise InputError("`lattice pitch` must have at least one non-zero attribute! Check the blueprints.")
self.hex = hexPitch or x
self.x = x
self.y = y
self.z = z
class GridBlueprint(yamlize.Object):
"""
A grid input blueprint.
These directly build Grid objects and contain information about how to populate the Grid with
child ArmiObjects for the Reactor Model.
The grids get origins either from a parent block (for pin lattices) or from a System (for Cores,
SFPs, and other components).
.. impl:: Define a lattice map in reactor core.
:id: I_ARMI_BP_GRID
:implements: R_ARMI_BP_GRID
Defines a yaml construct that allows the user to specify a grid from within their blueprints
file, including a name, geometry, dimensions, symmetry, and a map with the relative
locations of components within that grid.
Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
files, serialization, and internal storage of the data.
Is implemented as part of a blueprints file by being used in key-value pairs within the
:py:class:`~armi.reactor.blueprints.gridBlueprint.Grid` class, which is imported and used as
an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class.
Includes a ``construct`` method, which instantiates an instance of one of the subclasses of
:py:class:`~armi.reactor.grids.structuredgrid.StructuredGrid`. This is typically called from
within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`, which
then also associates the individual components in the block with locations specified in the
grid.
Attributes
----------
name : str
The grid name
geom : str
The geometry of the grid (e.g. 'cartesian')
latticeMap : str
An asciimap representation of the lattice contents
latticeDimensions : Pitch
An x/y/z Triplet or hex pitch with grid dimensions in cm. This is used to specify a
uniform grid, such as Cartesian or Hex. Mutually exclusive with gridBounds.
gridBounds : dict
A dictionary containing explicit grid boundaries. Specific keys used will depend on the type
of grid being defined. Mutually exclusive with latticeDimensions.
symmetry : str
A string defining the symmetry mode of the grid
gridContents : dict
A {(i,j): str} dictionary mapping spatialGrid indices in 2-D to string specifiers of what's
supposed to be in the grid.
orientationBOL : dict
A {(i,j): float} dictionary mapping spatialGrid indices in 2-D to the orientation of
what's supposed to be in the grid.
"""
name = yamlize.Attribute(key="name", type=str)
geom = yamlize.Attribute(key="geom", type=str, default=geometry.HEX)
latticeMap = yamlize.Attribute(key="lattice map", type=str, default=None)
latticeDimensions = yamlize.Attribute(key="lattice pitch", type=Pitch, default=None)
gridBounds = yamlize.Attribute(key="grid bounds", type=dict, default=None)
symmetry = yamlize.Attribute(
key="symmetry",
type=str,
default=str(geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)),
)
# gridContents is the final form of grid contents information; it is set regardless of how the
# input is read. When writing, we attempt to preserve the input mode and write ascii map if that
# was what was originally provided.
gridContents = yamlize.Attribute(key="grid contents", type=dict, default=None)
# allowing us to add custom orientations to the objects on this gritd, at BOL
orientationBOL = yamlize.Attribute(key="orientationBOL", type=dict, default=None)
@gridContents.validator
def gridContents(self, value):
if value is None:
return True
if not all(isinstance(key, tuple) for key in value.keys()):
raise InputError("Grid contents Keys need to be like [i, j]. Check the blueprints.")
return True
@orientationBOL.validator
def orientationBOL(self, value):
if value is None:
return True
if not all(isinstance(key, tuple) for key in value.keys()):
raise InputError("Orientation BOL Keys need to be like [i, j]. Check the blueprints.")
return True
def __init__(
self,
name=None,
geom=geometry.HEX,
latticeMap=None,
symmetry=str(geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)),
gridContents=None,
orientationBOL=None,
gridBounds=None,
):
"""
A Grid blueprint.
Notes
-----
yamlize does not call an ``__init__`` method, instead it uses ``__new__`` and setattr this
is only needed for when you want to make this object from a non-YAML source.
Warning
-------
This is a Yamlize object, so ``__init__`` never really gets called. Only ``__new__`` does.
"""
self.name = name
self.geom = str(geom)
self.latticeMap = latticeMap
self._readFromLatticeMap = False
self.symmetry = str(symmetry)
self.gridContents = gridContents
self.orientationBOL = orientationBOL
self.gridBounds = gridBounds
@property
def readFromLatticeMap(self):
"""
This is implemented as a property, since as a Yamlize object, ``__init__`` is not always
called and we have to lazily evaluate its default value.
"""
return getattr(self, "_readFromLatticeMap", False)
@readFromLatticeMap.setter
def readFromLatticeMap(self, value):
self._readFromLatticeMap = value
def construct(self):
"""Build a Grid from a grid definition."""
self._readGridContents()
grid = self._constructSpatialGrid()
return grid
def _constructSpatialGrid(self):
"""
Build spatial grid.
If you do not enter ``latticeDimensions``, a unit grid will be produced which must be adjusted to the proper
dimensions (often by inspection of children) at a later time.
"""
symmetry = geometry.SymmetryType.fromStr(self.symmetry) if self.symmetry else None
geom = self.geom
maxIndex = self._getMaxIndex()
runLog.extra(f"Creating the spatial grid {self.name}", single=True)
if geom in (geometry.RZT, geometry.RZ):
if self.gridBounds is None:
# This check is regrettably late. It would be nice if we could validate that bounds
# are provided if R-Theta mesh is being used.
raise InputError(
f"Grid bounds must be provided for `{self.name}` to specify a grid with r-theta components."
)
for key in ("theta", "r"):
if key not in self.gridBounds:
raise InputError(f"{key} grid bounds were not provided for `{self.name}`.")
# convert to list, otherwise it is a CommentedSeq
theta = np.array(self.gridBounds["theta"])
radii = np.array(self.gridBounds["r"])
for lst, name in ((theta, "theta"), (radii, "radii")):
if not isMonotonic(lst, "<"):
raise InputError(
f"Grid bounds for {self.name}:{name} is not sorted or contains duplicates. Check blueprints."
)
spatialGrid = grids.ThetaRZGrid(bounds=(theta, radii, (0.0, 0.0)))
if geom in (geometry.HEX, geometry.HEX_CORNERS_UP):
if not self.latticeDimensions:
pitch = 1.0
else:
ld = self.latticeDimensions
if ld.hex and (ld.x or ld.y):
raise InputError("Cannot mix `hex` with `x` and `y` attributes of `latticePitch`.")
if not any([ld.hex, ld.x, ld.y, ld.z]):
raise InputError("`lattice pitch` must have at least one non-zero attribute! Check the blueprints.")
pitch = ld.hex or ld.x
# add 2 for potential dummy assems
spatialGrid = grids.HexGrid.fromPitch(
pitch,
numRings=maxIndex + 2,
cornersUp=geom == geometry.HEX_CORNERS_UP,
)
elif geom == geometry.CARTESIAN:
# if full core or not cut-off, bump the first assembly from the center of the mesh into
# the positive values.
xw, yw = (self.latticeDimensions.x, self.latticeDimensions.y) if self.latticeDimensions else (1.0, 1.0)
# Specifically in the case of grid blueprints, where we have grid contents available, we
# can also infer "through center" based on the contents. Note that the "through center"
# symmetry check cannot be performed when the grid contents has not been provided (i.e.,
# None or empty).
if self.gridContents and symmetry.domain == geometry.DomainType.FULL_CORE:
nx, ny = _getGridSize(self.gridContents.keys())
if nx == ny and nx % 2 == 1:
symmetry.isThroughCenterAssembly = True
isOffset = symmetry is not None and not symmetry.isThroughCenterAssembly
spatialGrid = grids.CartesianGrid.fromRectangle(xw, yw, numRings=maxIndex + 1, isOffset=isOffset)
runLog.debug("Built grid: {}".format(spatialGrid))
# set geometric metadata on spatialGrid. This information is needed in various parts of the
# code and is best encapsulated on the grid itself rather than on the container state.
spatialGrid._geomType: str = str(self.geom)
self.symmetry = str(symmetry)
spatialGrid._symmetry: str = self.symmetry
return spatialGrid
def _getMaxIndex(self):
"""
Find the max index in the grid contents.
Used to limit the size of the spatialGrid. Used to be called maxNumRings.
"""
if self.gridContents:
return max(itertools.chain(*zip(*self.gridContents.keys())))
else:
return 6
def expandToFull(self):
"""
Unfold the blueprints to represent full symmetry.
Notes
-----
This relatively rudimentary, and copies entries from the currently-represented domain to
their corresponding locations in full symmetry. This may not produce the desired behavior
for some scenarios, such as when expanding fuel shuffling paths or the like. Future work may
make this more sophisticated.
"""
if geometry.SymmetryType.fromAny(self.symmetry).domain == geometry.DomainType.FULL_CORE:
return
# fill the new grid contents
grid = self.construct()
self._expandToFullOrientationBOL(grid)
newContents = copy.copy(self.gridContents)
for idx, contents in self.gridContents.items():
equivs = grid.getSymmetricEquivalents(idx)
for idx2 in equivs:
newContents[idx2] = contents
self.gridContents = newContents
# set the grid symmetry
split = geometry.THROUGH_CENTER_ASSEMBLY in self.symmetry
self.symmetry = str(
geometry.SymmetryType(
geometry.DomainType.FULL_CORE,
geometry.BoundaryType.NO_SYMMETRY,
throughCenterAssembly=split,
)
)
def _expandToFullOrientationBOL(self, grid):
"""Set the orientationBOL parameter during expandToFulLCore().
Parameters
----------
grid : Grid
Spatial grid for the current ARMI object.
"""
if self.orientationBOL is None:
return
newOrientations = copy.copy(self.orientationBOL)
for idx, contents in self.gridContents.items():
equivs = grid.getSymmetricEquivalents(idx)
angle = 360.0 / (len(equivs) + 1)
for count, idx2 in enumerate(equivs):
loc = grid.indicesToRingPos(*idx)
if loc in self.orientationBOL:
loc2 = grid.indicesToRingPos(*idx2)
newOrientation = self.orientationBOL[loc] + (count + 1) * angle
newOrientations[loc2] = newOrientation % 360.0
self.orientationBOL = newOrientations
def _readGridContents(self):
"""
Read the specifiers as a function of grid position.
The contents can either be provided as:
* A dict mapping indices to specifiers (default output of this)
* An asciimap
The output will always be stored in ``self.gridContents``.
"""
if self.gridContents:
return
elif self.latticeMap:
self._readGridContentsLattice()
if self.gridContents is None:
# Make sure we have at least something; clients shouldn't have to worry about whether
# gridContents exist at all.
self.gridContents = dict()
def _readGridContentsLattice(self):
"""Read an ascii map of grid contents.
This update the gridContents attribute, which is a dict mapping grid i,j,k indices to textual specifiers
(e.g. ``IC``)).
"""
self.readFromLatticeMap = True
symmetry = geometry.SymmetryType.fromStr(self.symmetry)
geom = geometry.GeomType.fromStr(self.geom)
latticeCls = asciimaps.asciiMapFromGeomAndDomain(self.geom, symmetry.domain)
asciimap = latticeCls()
asciimap.readAscii(self.latticeMap)
self.gridContents = dict()
iOffset = 0
jOffset = 0
if geom == geometry.GeomType.CARTESIAN and symmetry.domain == geometry.DomainType.FULL_CORE:
# asciimaps is not smart about where the center should be, so we need to offset
# apropriately to get (0,0) in the middle
nx, ny = _getGridSize(asciimap.keys())
# turns out this works great for even and odd cases. love it when integer math works in your favor
iOffset = int(-nx / 2)
jOffset = int(-ny / 2)
for (i, j), spec in asciimap.items():
if spec == "-":
# skip placeholders
continue
self.gridContents[i + iOffset, j + jOffset] = spec
def getLocators(self, spatialGrid: grids.Grid, latticeIDs: list):
"""
Return spatialLocators in grid corresponding to lattice IDs.
This requires a fully-populated ``gridContents`` attribute.
"""
if latticeIDs is None:
return []
if self.gridContents is None:
return []
# tried using yamlize to coerce ints to strings but failed after much struggle, so we just
# auto-convert here to deal with int-like specifications. (yamlize.StrList fails to coerce
# when ints are provided)
latticeIDs = [str(i) for i in latticeIDs]
locators = []
for (i, j), spec in self.gridContents.items():
locator = spatialGrid[i, j, 0]
if spec in latticeIDs:
locators.append(locator)
return locators
def getMultiLocator(self, spatialGrid, latticeIDs):
"""Create a MultiIndexLocation based on lattice IDs."""
spatialLocator = grids.MultiIndexLocation(grid=spatialGrid)
spatialLocator.extend(self.getLocators(spatialGrid, latticeIDs))
return spatialLocator
class Grids(yamlize.KeyedList):
item_type = GridBlueprint
key_attr = GridBlueprint.name
def _getGridSize(idx) -> Tuple[int, int]:
"""
Return the number of spaces between the min and max of a collection of (int, int) tuples, inclusive.
This essentially returns the number of grid locations along the i, and j dimensions, given the (i,j) indices of each
occupied location. This is useful for determining certain grid offset behavior.
"""
nx = max(key[0] for key in idx) - min(key[0] for key in idx) + 1
ny = max(key[1] for key in idx) - min(key[1] for key in idx) + 1
return nx, ny
def _filterOutsideDomain(gridBp):
"""Remove grid contents that lie outside the represented domain.
This removes extra objects; ARMI allows the user input specifiers in regions outside of the
represented domain, which is fine as long as the contained specifier is consistent with the
corresponding region in the represented domain given the symmetry condition. For instance, if we
have a 1/3-core hex model, it is typically okay for an assembly to be specified outside of the
first 1/3rd of the core, as long as it is the same assembly as would be there when expanding the
first 1/3rd into a full-core model.
However, we do not really want these hanging around, since editing the represented 1/Nth of the
core will probably lead to consistency issues, so we remove them.
"""
grid = gridBp.construct()
contentsToRemove = {
idx
for idx, _contents in gridBp.gridContents.items()
if not grid.locatorInDomain(grid[idx + (0,)], symmetryOverlap=False)
}
for idx in contentsToRemove:
symmetrics = grid.getSymmetricEquivalents(idx)
for symmetric in symmetrics:
if symmetric in gridBp.gridContents:
if gridBp.gridContents[symmetric] != gridBp.gridContents[idx]:
raise ValueError(
"The contents at `{}` (`{}`) in grid `{}` is not the "
"same as it's symmetric equivalent at `{}` (`{}`). "
"Check your grid blueprints for symmetry.".format(
idx,
gridBp.gridContents[idx],
gridBp.name,
symmetric,
gridBp.gridContents[symmetric],
)
)
del gridBp.gridContents[idx]
def saveToStream(stream, bluep, full=False, tryMap=False):
"""
Save the blueprints to the passed stream.
This can save either the entire blueprints, or just the `grids:` section of the blueprints, based on the passed
``full`` argument. Saving just the grid blueprints can be useful when cobbling blueprints together with !include
flags.
.. impl:: Write a blueprint file from a blueprint object.
:id: I_ARMI_BP_TO_DB
:implements: R_ARMI_BP_TO_DB
First makes a copy of the blueprints that are passed in. Then modifies any grids specified in the blueprints
into a canonical lattice map style, if needed. Then uses the ``dump`` method that is inherent to all ``yamlize``
subclasses to write the blueprints to the given ``stream`` object.
If called with the ``full`` argument, the entire blueprints is dumped. If not, only the grids portion is dumped.
Parameters
----------
stream :
file output stream of some kind
bluep : armi.reactor.blueprints.Blueprints, or Grids
full : bool
Is this a full output file, or just a partial/grids?
tryMap : bool
regardless of input form, attempt to output as a lattice map
"""
# To save, we want to try our best to output our grid blueprints in the lattice map style. However, we do not want
# to wreck the state that the current blueprints are in. So we make a copy and do some manipulations to try to
# canonicalize it and save that, leaving the original blueprints unmolested.
bp = copy.deepcopy(bluep)
if isinstance(bp, blueprints.Blueprints):
gridDesigns = bp.gridDesigns
elif isinstance(bp, blueprints.Grids):
gridDesigns = bp
else:
raise TypeError(f"Expected Blueprints or Grids, got {type(bp)}")
for gridDesignType, gridDesign in gridDesigns.items():
# The core equilibrium path should be put into the grid contents rather than a lattice map until we write a
# string-> tuple parser for reading it back in. Skip this type of grid.
if gridDesignType == "coreEqPath":
continue
_filterOutsideDomain(gridDesign)
if not gridDesign.gridContents:
# there is no grid, so there must be lattice, and that goes to output
continue
if gridDesign.readFromLatticeMap or tryMap:
symmetry = geometry.SymmetryType.fromStr(gridDesign.symmetry)
aMap = asciimaps.asciiMapFromGeomAndDomain(gridDesign.geom, symmetry.domain)()
try:
if gridDesign.latticeMap:
# Try to use the lattice map first, it was the original source of truth.
aMap.readAscii(gridDesign.latticeMap)
else:
# If there is no original lattice map, use the current grid of data.
aMap.asciiLabelByIndices = {(key[0], key[1]): val for key, val in gridDesign.gridContents.items()}
aMap.gridContentsToAscii()
except Exception as e:
runLog.warning(
"The `lattice map` for the current assembly arrangement cannot be written. Defaulting to using the "
f"`grid contents` dictionary instead. Exception: {e}"
)
aMap = None
if aMap is not None:
# If there is an ascii map available then use it to fill out the contents of the lattice map section of
# the grid design. This also clears out the grid contents so there is not duplicate data.
gridDesign.gridContents = None
mapString = StringIO()
aMap.writeAscii(mapString)
gridDesign.latticeMap = scalarstring.LiteralScalarString(mapString.getvalue())
else:
gridDesign.latticeMap = None
else:
# Grid contents were supplied as a dictionary, so we shouldn't even have a latticeMap, unless it was set
# explicitly in code somewhere. Discard if there is one.
gridDesign.latticeMap = None
toSave = bp if full else gridDesigns
# NOTE: type(bp) here used because importing Blueprints causes a circular import
type(toSave).dump(toSave, stream)
================================================
FILE: armi/reactor/blueprints/isotopicOptions.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines nuclide flags and custom isotopics via input.
Nuclide flags control meta-data about nuclides. Custom isotopics allow specification of arbitrary isotopic compositions.
"""
import yamlize
from armi import materials, runLog
from armi.nucDirectory import elements, nucDir, nuclideBases
from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (
CONF_FISSION_PRODUCT_LIBRARY_NAME,
CONF_FP_MODEL,
)
from armi.physics.neutronics.settings import (
CONF_MCNP_LIB_BASE,
CONF_NEUTRONICS_KERNEL,
CONF_XS_KERNEL,
)
from armi.utils import densityTools, units
from armi.utils.customExceptions import InputError
class NuclideFlag(yamlize.Object):
"""
Defines whether or not each nuclide is included in the burn chain and cross sections.
Also controls which nuclides get expanded from elementals to isotopics and which natural
isotopics to exclude (if any). Oftentimes, cross section library creators include some natural
isotopes but not all. For example, it is common to include O16 but not O17 or O18. Each code has
slightly different interpretations of this so we give the user full control here.
We also try to provide useful defaults.
There are lots of complications that can arise in these choices. It makes reasonable sense to
use elemental compositions for things that are typically used without isotopic modifications
(Fe, O, Zr, Cr, Na). If we choose to expand some or all of these to isotopics at initialization
based on cross section library requirements, a single case will work fine with a given lattice
physics option. However, restarting from that case with different cross section needs is
challenging.
.. impl:: The blueprint object that represents a nuclide flag.
:id: I_ARMI_BP_NUC_FLAGS1
:implements: R_ARMI_BP_NUC_FLAGS
This class creates a yaml interface for the user to specify in their blueprints which
isotopes should be depleted. It is incorporated into the "nuclide flags" section of a
blueprints file by being included as key-value pairs within the
:py:class:`~armi.reactor.blueprints.isotopicOptions.NuclideFlags` class, which is in turn
included into the overall blueprints within :py:class:`~armi.reactor.blueprints.Blueprints`.
This class includes a boolean ``burn`` attribute which can be specified for any nuclide.
This attribute is examined by the
:py:meth:`~armi.reactor.blueprints.isotopicOptions.NuclideFlag.fileAsActiveOrInert` method
to sort the nuclides into sets of depletable or not, which is typically called during
construction of assemblies in :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`.
Note that while the ``burn`` attribute can be set by the user in the blueprints, other
methods may also set it based on case settings (see, for instance,
:py:func:`~armi.reactor.blueprints.isotopicOptions.genDefaultNucFlags`,
:py:func:`~armi.reactor.blueprints.isotopicOptions.autoUpdateNuclideFlags`, and
:py:func:`~armi.reactor.blueprints.isotopicOptions.getAllNuclideBasesByLibrary`).
Attributes
----------
nuclideName : str
The name of the nuclide
burn : bool
True if this nuclide should be added to the burn chain. If True, all reachable nuclides via
transmutation and decay must be included as well.
xs : bool
True if this nuclide should be included in the cross section libraries. Effectively, if this
nuclide is in the problem at all, this should be true.
expandTo : list of str, optional
isotope nuclideNames to expand to. For example, if nuclideName is ``O`` then this could be
``["O16", "O17"]`` to expand it into those two isotopes (but not ``O18``). The nuclides will
be scaled up uniformly to account for any missing natural nuclides.
"""
nuclideName = yamlize.Attribute(type=str)
@nuclideName.validator
def nuclideName(self, value):
if value not in nuclideBases.byName and value not in elements.bySymbol:
allowedKeys = set(nuclideBases.byName.keys()).update(set(elements.bySymbol.keys()))
raise ValueError(f"`{value}` is not a valid nuclide name, must be one of: {allowedKeys}")
burn = yamlize.Attribute(type=bool)
xs = yamlize.Attribute(type=bool)
expandTo = yamlize.Attribute(type=yamlize.StrList, default=None)
def __init__(self, nuclideName, burn, xs, expandTo):
# note: yamlize does not call an __init__ method, instead it uses __new__ and setattr
self.nuclideName = nuclideName
self.burn = burn
self.xs = xs
self.expandTo = expandTo
def __repr__(self):
return f""
def fileAsActiveOrInert(self, activeSet, inertSet):
"""
Given a nuclide or element name, file it as either active or inert.
If isotopic expansions are requested, include the isotopics rather than the NaturalNuclideBase, as the
NaturalNuclideBase will never occur in such a problem.
"""
undefBurnChainActiveNuclides = set()
nb = nuclideBases.byName[self.nuclideName]
if self.expandTo:
nucBases = [nuclideBases.byName[nn] for nn in self.expandTo]
expanded = [nb.element] # error to expand non-elements
else:
nucBases = [nb]
expanded = []
for nuc in nucBases:
if self.burn:
if not nuc.trans and not nuc.decays:
# DUMPs and LFPs usually
undefBurnChainActiveNuclides.add(nuc.name)
activeSet.add(nuc.name)
if self.xs:
inertSet.add(nuc.name)
return expanded, undefBurnChainActiveNuclides
class NuclideFlags(yamlize.KeyedList):
"""An OrderedDict of ``NuclideFlags``, keyed by their ``nuclideName``."""
item_type = NuclideFlag
key_attr = NuclideFlag.nuclideName
class CustomIsotopic(yamlize.Map):
"""
User specified, custom isotopics input defined by a name (such as MOX), and key/pairs of nuclide
names and numeric values consistent with the ``input format``.
.. impl:: Certain material modifications will be applied using this code.
:id: I_ARMI_MAT_USER_INPUT2
:implements: R_ARMI_MAT_USER_INPUT
Defines a yaml construct that allows the user to define a custom isotopic vector from within
their blueprints file, including a name and key-value pairs corresponding to nuclide names
and their concentrations.
Relies on the underlying infrastructure from the ``yamlize`` package for reading from text
files, serialization, and internal storage of the data.
Is implemented as part of a blueprints file by being used in key-value pairs within the
:py:class:`~armi.reactor.blueprints.isotopicOptions.CustomIsotopics` class, which is
imported and used as an attribute within the larger
:py:class:`~armi.reactor.blueprints.Blueprints` class.
These isotopics are linked to a component during calls to
:py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`, where
the name specified in the ``isotopics`` attribute of the component blueprint is searched
against the available ``CustomIsotopics`` defined in the "custom isotopics" section of the
blueprints. Once linked, the
:py:meth:`~armi.reactor.blueprints.isotopicOptions.CustomIsotopic.apply` method is called,
which adjusts the ``massFrac`` attribute of the component's material class.
"""
key_type = yamlize.Typed(str)
value_type = yamlize.Typed(float)
name = yamlize.Attribute(type=str)
inputFormat = yamlize.Attribute(key="input format", type=str)
@inputFormat.validator
def inputFormat(self, value):
if value not in self._allowedFormats:
raise ValueError(f"Cannot set `inputFormat` to `{value}`, must be one of: {self._allowedFormats}")
_density = yamlize.Attribute(key="density", type=float, default=None)
_allowedFormats = {"number fractions", "number densities", "mass fractions"}
def __new__(cls, *args):
self = yamlize.Map.__new__(cls, *args)
# the density as computed by source number densities
self._computedDensity = None
return self
def __init__(self, name, inputFormat, density):
# note: yamlize does not call an __init__ method, instead it uses __new__ and setattr
self._name = None
self.name = name
self._inputFormat = None
self.inputFormat = inputFormat
self.density = density
self.massFracs = {}
def __setitem__(self, key, value):
if key not in nuclideBases.byName and key not in elements.bySymbol:
allowedKeys = set(nuclideBases.byName.keys()).update(set(elements.bySymbol.keys()))
raise ValueError(f"Key `{key}` is not valid, must be one of: {allowedKeys}")
yamlize.Map.__setitem__(self, key, value)
@property
def density(self):
return self._computedDensity or self._density
@density.setter
def density(self, value):
if self._computedDensity is not None:
raise AttributeError("Density was computed from number densities, and should not be set directly.")
self._density = value
if value is not None and value < 0:
raise ValueError(f"Cannot set `density` to `{value}`, must greater than 0")
@classmethod
def from_yaml(cls, loader, node, rtd):
"""
Override the ``Yamlizable.from_yaml`` to inject custom data validation logic, and complete initialization of the
object.
"""
self = yamlize.Map.from_yaml.__func__(cls, loader, node, rtd)
try:
self._initializeMassFracs()
self._expandElementMassFracs()
except Exception as ex:
# use a YamlizingError to get line/column of erroneous input
raise yamlize.YamlizingError(str(ex), node)
return self
@classmethod
def from_yaml_key_val(cls, loader, key_node, val_node, key_attr, rtd):
"""
Override the ``Yamlizable.from_yaml`` to inject custom data validation logic, and complete initialization of the
object.
"""
self = yamlize.Map.from_yaml_key_val.__func__(cls, loader, key_node, val_node, key_attr, rtd)
try:
self._initializeMassFracs()
self._expandElementMassFracs()
except Exception as ex:
# use a YamlizingError to get line/column of erroneous input
raise yamlize.YamlizingError(str(ex), val_node)
return self
def _initializeMassFracs(self):
self.massFracs = dict() # defaults to 0.0, __init__ is not called
if any(v < 0.0 for v in self.values()):
raise ValueError(f"Custom isotopic input for {self.name} is negative")
valSum = sum(self.values())
if not abs(valSum - 1.0) < 1e-5 and "fractions" in self.inputFormat:
raise ValueError(f"Fractional custom isotopic input values must sum to 1.0 in: {self.name}")
if self.inputFormat == "number fractions":
sumNjAj = 0.0
for nuc, nj in self.items():
if nj:
sumNjAj += nj * nucDir.getAtomicWeight(nuc)
for nuc, value in self.items():
massFrac = value * nucDir.getAtomicWeight(nuc) / sumNjAj
self.massFracs[nuc] = massFrac
elif self.inputFormat == "number densities":
if self._density is not None:
raise InputError(
f"Custom isotopic `{self.name}` is over-specified. It was provided as number densities, and but "
f"density ({self.density}) was also provided. Is the input format correct?"
)
M = {
nuc: Ni / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * nucDir.getAtomicWeight(nuc)
for nuc, Ni in self.items()
}
densityTotal = sum(M.values())
if densityTotal < 0:
raise ValueError("Computed density is negative")
for nuc, Mi in M.items():
self.massFracs[nuc] = Mi / densityTotal
self._computedDensity = densityTotal
elif self.inputFormat == "mass fractions":
self.massFracs = dict(self) # as input
else:
raise ValueError(f"Unrecognized custom isotopics input format {self.inputFormat}.")
def _expandElementMassFracs(self):
"""
Expand the custom isotopics input entries that are elementals to isotopics.
This is necessary when the element name is not a elemental nuclide. Most everywhere else expects Nuclide objects
(or nuclide names). This input allows a user to enter "U" which would expand to the naturally occurring uranium
isotopics.
This is different than the isotopic expansion done for meeting user-specified modeling options (such as an
MC**2, or MCNP expecting elements or isotopes), because it translates the user input into something that can be
used later on.
"""
elementsToExpand = []
for nucName in self.massFracs:
if nucName not in nuclideBases.byName:
element = elements.bySymbol.get(nucName)
if element is not None:
runLog.info(f"Expanding custom isotopic `{self.name}` element `{nucName}` to natural isotopics")
# include all natural isotopes with None flag
elementsToExpand.append((element, None))
else:
raise InputError(f"Unrecognized nuclide/isotope/element in input: {nucName}")
densityTools.expandElementalMassFracsToNuclides(self.massFracs, elementsToExpand)
def apply(self, material):
"""
Apply specific isotopic compositions to a component.
Generically, materials have composition-dependent bulk properties such as mass density. Note that this operation
does not update these material properties. Use with care.
Parameters
----------
material : armi.materials.material.Material
An ARMI Material instance.
"""
material.massFrac = dict(self.massFracs)
if self.density is not None:
if not isinstance(material, materials.Custom):
runLog.important(
"A custom isotopic with associated density has been specified for non-`Custom` material "
f"{material}. The reference density of materials in the materials library will not be changed, but "
"the associated components will use the density implied by the custom isotopics.",
single=True,
)
# specifically, non-Custom materials only use refDensity and dLL, mat.customDensity has no effect
return
material.customDensity = self.density
class CustomIsotopics(yamlize.KeyedList):
"""OrderedDict of CustomIsotopic objects, keyed by their name."""
item_type = CustomIsotopic
key_attr = CustomIsotopic.name
# note: yamlize does not call an __init__ method, instead it uses __new__ and setattr
def apply(self, material, customIsotopicsName):
"""
Apply specific isotopic compositions to a component.
Generically, materials have composition-dependent bulk properties such as mass density. Note that this operation
does not update these material properties. Use with care.
Parameters
----------
material : armi.materials.material.Material
Material instance to adjust.
customIsotopicName : str
String corresponding to the ``CustomIsoptopic.name``.
"""
if customIsotopicsName not in self:
raise KeyError(
"The input custom isotopics do not include {}. The only present specifications are {}".format(
customIsotopicsName, self.keys()
)
)
custom = self[customIsotopicsName]
custom.apply(material)
def getDefaultNuclideFlags():
"""
Return a default set of nuclides to model and deplete.
Notes
-----
The nuclideFlags input on blueprints has confused new users and is infrequently changed. It will be moved to be a
user setting, but in any case a reasonable default should be provided. We will by default model medium-lived and
longer actinides between U234 and CM247.
We will include B10 and B11 without depletion, sodium, and structural elements.
We will include LFPs with depletion.
"""
nuclideFlags = {}
actinides = {
"U": [234, 235, 236, 238],
"NP": [237, 238],
"PU": [236] + list(range(238, 243)),
"AM": range(241, 244),
"CM": range(242, 248),
}
for el, masses in actinides.items():
for mass in masses:
nuclideFlags[f"{el}{mass}"] = {"burn": True, "xs": True, "expandTo": None}
for fp in [35, 38, 39, 40, 41]:
nuclideFlags[f"LFP{fp}"] = {"burn": True, "xs": True, "expandTo": None}
for dmp in [1, 2]:
nuclideFlags[f"DUMP{dmp}"] = {"burn": True, "xs": True, "expandTo": None}
for boron in [10, 11]:
nuclideFlags[f"B{boron}"] = {"burn": False, "xs": True, "expandTo": None}
for struct in ["ZR", "C", "SI", "V", "CR", "MN", "FE", "NI", "MO", "W", "NA", "HE", "AL", "CO", "NB"]:
nuclideFlags[struct] = {"burn": False, "xs": True, "expandTo": None}
return nuclideFlags
def eleExpandInfoBasedOnCodeENDF(cs):
"""
Intelligently choose elements to expand based on code and ENDF version.
If settings point to a particular code and library and we know that combo requires certain elementals to be
expanded, we flag them here to make the user input as simple as possible.
This determines both which elementals to keep and which specific expansion subsets to use.
Notes
-----
This logic is expected to be moved to respective plugins in time.
Returns
-------
elementalsToKeep : set
Set of NaturalNuclideBase instances to not expand into natural isotopics.
expansions : dict
Element to list of nuclides for expansion.
For example: {oxygen: [oxygen16]} indicates that all
oxygen should be expanded to O16, ignoring natural
O17 and O18. (variables are Natural/NuclideBases)
"""
elementalsToKeep = set()
oxygenElementals = [nuclideBases.byName["O"]]
hydrogenElementals = [nuclideBases.byName[name] for name in ["H"]]
endf70Elementals = [nuclideBases.byName[name] for name in ["C", "V", "ZN"]]
endf71Elementals = [nuclideBases.byName[name] for name in ["C"]]
endf80Elementals = []
elementalsInMC2 = set()
expansionStrings = {}
mc2Expansions = {
"HE": ["HE4"], # neglect HE3
"O": ["O16"], # neglect O17 and O18
"W": ["W182", "W183", "W184", "W186"], # neglect W180
}
mcnpExpansions = {"O": ["O16"]}
for element in elements.byName.values():
# any NaturalNuclideBase that's available in MC2 libs
nnb = nuclideBases.byName.get(element.symbol)
if nnb and nnb.getMcc2Id():
elementalsInMC2.add(nnb)
if "MCNP" in cs[CONF_NEUTRONICS_KERNEL]:
expansionStrings.update(mcnpExpansions)
if cs[CONF_MCNP_LIB_BASE] == "ENDF/B-V.0":
# ENDF/B V.0
elementalsToKeep.update(nuclideBases.instances) # skip expansion
elif cs[CONF_MCNP_LIB_BASE] == "ENDF/B-VII.0":
# ENDF/B VII.0
elementalsToKeep.update(endf70Elementals)
elif cs[CONF_MCNP_LIB_BASE] == "ENDF/B-VII.1":
# ENDF/B VII.1
elementalsToKeep.update(endf71Elementals)
elif cs[CONF_MCNP_LIB_BASE] == "ENDF/B-VIII.0":
# ENDF/B VIII.0
elementalsToKeep.update(endf80Elementals)
else:
raise InputError(
"Failed to determine nuclides for modeling. The `mcnpLibraryVersion` "
f"setting value ({cs[CONF_MCNP_LIB_BASE]}) is not supported."
)
elif cs[CONF_XS_KERNEL] == "SERPENT":
elementalsToKeep.update(endf70Elementals)
expansionStrings.update(mc2Expansions)
elif cs[CONF_XS_KERNEL] in ["", "MC2v3", "MC2v3-PARTISN"]:
elementalsToKeep.update(endf71Elementals)
expansionStrings.update(mc2Expansions)
elif cs[CONF_XS_KERNEL] == "DRAGON":
# Users need to use default nuclear lib name. This is documented.
dragLib = cs["dragonDataPath"]
# only supports ENDF/B VII/VIII at the moment.
if "7r0" in dragLib:
elementalsToKeep.update(endf70Elementals)
elif "7r1" in dragLib:
elementalsToKeep.update(endf71Elementals)
elif "8r0" in dragLib:
elementalsToKeep.update(endf80Elementals)
elementalsToKeep.update(hydrogenElementals)
elementalsToKeep.update(oxygenElementals)
else:
raise ValueError(f"Unrecognized DRAGLIB name: {dragLib} Use default file name.")
elif cs[CONF_XS_KERNEL] == "MC2v2":
# strip out any NaturalNuclideBase with no getMcc2Id() (not on mcc-nuclides.yaml)
elementalsToKeep.update(elementalsInMC2)
expansionStrings.update(mc2Expansions)
# convert convenient string notation to actual NuclideBase objects
expansions = {}
for nnb, nbs in expansionStrings.items():
expansions[nuclideBases.byName[nnb]] = [nuclideBases.byName[nb] for nb in nbs]
return elementalsToKeep, expansions
def genDefaultNucFlags():
"""Perform all the yamlize-required type conversions."""
flagsDict = getDefaultNuclideFlags()
flags = NuclideFlags()
for nucName, nucFlags in flagsDict.items():
flag = NuclideFlag(nucName, nucFlags["burn"], nucFlags["xs"], nucFlags["expandTo"])
flags[nucName] = flag
return flags
def autoUpdateNuclideFlags(cs, nuclideFlags, inerts):
"""
This function is responsible for examining the fission product model treatment that is selected by the user and
adding a set of nuclides to the `nuclideFlags` list.
Notes
-----
The reason for adding this method is that when switching between fission product modeling treatments it can be
time-consuming to manually adjust the ``nuclideFlags`` inputs.
See Also
--------
genDefaultNucFlags
"""
nbs = getAllNuclideBasesByLibrary(cs)
if nbs:
runLog.info(
"Adding explicit fission products to the nuclide flags based on the fission product model set to "
f"`{cs[CONF_FP_MODEL]}`."
)
for nb in nbs:
nuc = nb.name
if nuc in nuclideFlags or elements.byZ[nb.z] in nuclideFlags:
continue
nuclideFlags[nuc] = NuclideFlag(nuc, burn=False, xs=True, expandTo=[])
# inert since burn is False
inerts.add(nuc)
def getAllNuclideBasesByLibrary(cs):
"""
Return a list of nuclide bases available for cross section modeling
based on the ``CONF_FISSION_PRODUCT_LIBRARY_NAME`` setting.
"""
nbs = []
if cs[CONF_FP_MODEL] == "explicitFissionProducts":
if not cs[CONF_FISSION_PRODUCT_LIBRARY_NAME]:
pass
if cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] == "MC2-3":
nbs = nuclideBases.byMcc3Id.values()
else:
raise ValueError(
"An option to handle the `CONF_FISSION_PRODUCT_LIBRARY_NAME` set to "
f"`{cs[CONF_FISSION_PRODUCT_LIBRARY_NAME]}` has not been implemented."
)
return nbs
================================================
FILE: armi/reactor/blueprints/reactorBlueprint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definitions of top-level reactor arrangements like the Core (default), SFP, etc.
See documentation of blueprints in :ref:`bp-input-file` for more context. See example in
:py:mod:`armi.reactor.blueprints.tests.test_reactorBlueprints`.
This was built to replace the old system that loaded the core geometry from the ``cs['geometry']`` setting. Until the
geom file-based input is completely removed, this system will attempt to migrate the core layout from geom files. When
geom files are used, explicitly specifying a ``core`` system will result in an error.
System Blueprints are a big step in the right direction to generalize user input, but was still mostly adapted from the
old Core layout input. As such, they still only really support Core-like systems. Future work should generalize the
concept of "system" to more varied scenarios.
See Also
--------
armi.reactor.blueprints.gridBlueprints : Method for storing system assembly layouts.
"""
import yamlize
from armi import context, getPluginManagerOrFail, runLog
from armi.reactor import geometry, grids
from armi.reactor.blueprints.gridBlueprint import Triplet
from armi.utils import tabulate
class SystemBlueprint(yamlize.Object):
"""
The reactor-level structure input blueprint.
.. impl:: Build core and spent fuel pool from blueprints
:id: I_ARMI_BP_SYSTEMS
:implements: R_ARMI_BP_SYSTEMS, R_ARMI_BP_CORE
This class creates a yaml interface for the user to define systems with grids, such as cores or spent fuel
pools, each having their own name, type, grid, and position in space. It is incorporated into the "systems"
section of a blueprints file by being included as key-value pairs within the
:py:class:`~armi.reactor.blueprints.reactorBlueprint.Systems` class, which is in turn included into the overall
blueprints within :py:class:`~armi.reactor.blueprints.Blueprints`.
This class includes a :py:meth:`~armi.reactor.blueprints.reactorBlueprint.SystemBlueprint.construct` method,
which is typically called from within :py:func:`~armi.reactor.reactors.factory` during the initialization of the
reactor object to instantiate the core and/or spent fuel pool objects. During that process, a spatial grid is
constructed based on the grid blueprints specified in the "grids" section of the blueprints (see
:need:`I_ARMI_BP_GRID`) and the assemblies needed to fill the lattice are built from blueprints using
:py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`.
Notes
-----
We use string keys to link grids to objects that use them. This differs from how blocks / assembies are specified,
which use YAML anchors. YAML anchors have proven to be problematic and difficult to work with.
"""
name = yamlize.Attribute(key="name", type=str)
typ = yamlize.Attribute(key="type", type=str, default="core")
gridName = yamlize.Attribute(key="grid name", type=str)
origin = yamlize.Attribute(key="origin", type=Triplet, default=None)
def __init__(self, name=None, gridName=None, origin=None):
"""
A Reactor-level structure like a core, or ex-core like SFP.
Notes
-----
yamlize does not call an __init__ method, instead it uses __new__ and setattr this is only needed for when you
want to make this object from a non-YAML source.
"""
self.name = name
self.gridName = gridName
self.origin = origin
@staticmethod
def _resolveSystemType(typ: str):
"""Loop over all plugins that could be attached and determine if any tell us how to build a specific systems
attribute.
"""
manager = getPluginManagerOrFail()
# Only need this to handle the case we don't find the system we expect
seen = set()
for options in manager.hook.defineSystemBuilders():
for key, builder in options.items():
# Take the first match we find. This would allow other plugins to define a new core builder before
# finding those defined by the ReactorPlugin
if key == typ:
return builder
seen.add(key)
raise ValueError(
f"Could not determine an appropriate class for handling a system of type `{typ}`. "
f"Supported types are {seen}."
)
def construct(self, cs, bp, reactor, loadComps=True):
"""Build a core or ex-core grid and fill it with children.
Parameters
----------
cs : :py:class:`Settings `
armi settings to apply
bp : :py:class:`Reactor `
armi blueprints to apply
reactor : :py:class:`Reactor `
reactor to fill
loadComps : bool, optional
whether to fill reactor with assemblies, as defined in blueprints, or not. Is False in
:py:class:`UniformMeshGeometryConverter `
within the initNewReactor() method.
Returns
-------
Composite
A Composite object with a grid, like a Spent Fuel Pool or other ex-core structure.
Raises
------
ValueError
input error, no grid design provided
ValueError
objects were added to non-existent grid locations
"""
runLog.info(f"Constructing the `{self.name}`")
if not bp.gridDesigns:
raise ValueError("The input must define grids to construct a reactor, but does not. Update input.")
gridDesign = bp.gridDesigns.get(self.gridName, None)
system = self._resolveSystemType(self.typ)(self.name)
# Some systems may not require a prescribed grid design. Only use one if provided
if gridDesign is not None:
spatialGrid = gridDesign.construct()
system.spatialGrid = spatialGrid
system.spatialGrid.armiObject = system
reactor.add(system) # ensure the reactor is the parent
spatialLocator = grids.CoordinateLocation(self.origin.x, self.origin.y, self.origin.z, None)
system.spatialLocator = spatialLocator
if context.MPI_RANK != 0:
# Non-primary nodes get the reactor via DistributeState.
return None
system = self._constructComposites(cs, bp, loadComps, system, gridDesign)
return system
def _constructComposites(self, cs, bp, loadComps, system, gridDesign):
"""Fill a grid with composities, if there are any to fill.
Parameters
----------
cs : Settings object.
armi settings to apply
bp : Blueprints object.
armi blueprints to apply
loadComps : bool
whether to fill reactor with composities, as defined in blueprints, or not
system : Composite
The composite we are building.
gridDesign : GridBlueprint
The definition of the grid on the object.
Returns
-------
Composite
A Composite object with a grid, like a Spent Fuel Pool or other ex-core structure.
"""
from armi.reactor.reactors import Core # avoid circular import
if loadComps and gridDesign is not None:
self._loadComposites(cs, bp, system, gridDesign.gridContents, gridDesign.orientationBOL)
if isinstance(system, Core):
self._modifyGeometry(system, gridDesign)
summarizeMaterialData(system)
system.processLoading(cs)
return system
def _loadComposites(self, cs, bp, container, gridContents, orientationBOL):
from armi.reactor.cores import Core
runLog.header(f"=========== Adding Composites to {container} ===========")
badLocations = set()
for locationInfo, aTypeID in gridContents.items():
# handle the hex-grid special case, where the user enters (ring, pos)
i, j = locationInfo
if isinstance(container, Core) and container.geomType == geometry.GeomType.HEX:
loc = container.spatialGrid.indicesToRingPos(i, j)
else:
loc = locationInfo
# correctly rotate the Composite
if orientationBOL is None or loc not in orientationBOL:
orientation = 0.0
else:
orientation = orientationBOL[loc]
# create a new Composite to add to the grid
newAssembly = bp.constructAssem(cs, specifier=aTypeID, orientation=orientation)
# add the Composite to the grid
posi = container.spatialGrid[i, j, 0]
try:
container.add(newAssembly, posi)
except LookupError:
badLocations.add(posi)
if badLocations:
raise ValueError(f"Attempted to add objects to non-existent locations on the grid: {badLocations}.")
# init position history param on each assembly
for a in container:
loc = a.getLocation()
if loc in a.NOT_IN_CORE:
a.p.ringPosHist = [(loc, loc)]
else:
try:
ring, pos, _ = grids.locatorLabelToIndices(a.getLocation())
a.p.ringPosHist = [(ring, pos)]
except ValueError:
# some ex-core structures may not have valid locator label indices
a.p.ringPosHist = [(a.NOT_CREATED_YET, a.NOT_CREATED_YET)]
def _modifyGeometry(self, container, gridDesign):
"""Perform post-load geometry conversions like full core, edge assems."""
# all cases should have no edge assemblies. They are added ephemerally when needed
from armi.reactor.converters import geometryConverters
runLog.header("=========== Applying Geometry Modifications ===========")
if not container.isFullCore:
runLog.extra("Applying non-full core modifications")
converter = geometryConverters.EdgeAssemblyChanger()
converter.scaleParamsRelatedToSymmetry(container)
converter.removeEdgeAssemblies(container)
# now update the spatial grid dimensions based on the populated children (unless specified on input)
if not gridDesign.latticeDimensions:
runLog.info(f"Updating spatial grid pitch data for {container.geomType} geometry")
if container.geomType == geometry.GeomType.HEX:
container.spatialGrid.changePitch(container[0][0].getPitch())
elif container.geomType == geometry.GeomType.CARTESIAN:
xw, yw = container[0][0].getPitch()
container.spatialGrid.changePitch(xw, yw)
class Systems(yamlize.KeyedList):
item_type = SystemBlueprint
key_attr = SystemBlueprint.name
def summarizeMaterialData(container):
"""
Create a summary of the material objects and source data for a reactor container.
Parameters
----------
container : Core object
Any Core object with Blocks and Components defined.
"""
runLog.header(f"=========== Summarizing Source of Material Data for {container} ===========")
materialNames = set()
materialData = []
for c in container.iterComponents():
if c.material.name in materialNames:
continue
materialData.append((c.material.name, c.material.DATA_SOURCE))
materialNames.add(c.material.name)
materialData = sorted(materialData)
runLog.info(tabulate.tabulate(data=materialData, headers=["Material Name", "Source Location"], tableFmt="armi"))
return materialData
================================================
FILE: armi/reactor/blueprints/tests/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/reactor/blueprints/tests/test_assemblyBlueprints.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Assembly Blueprints."""
import unittest
from armi import settings
from armi.reactor import blueprints
class TestMaterialModifications(unittest.TestCase):
twoBlockInput_correct = r"""
nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
blocks:
fuel: &block_fuel
fuel1: &component_fuel_fuel1
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
fuel2: &component_fuel_fuel2
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel, *block_fuel]
height: [1.0, 1.0]
axial mesh points: [1, 1]
xs types: [A, A]
"""
twoBlockInput_wrongMeshPoints = r"""
nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
blocks:
fuel: &block_fuel
fuel1: &component_fuel_fuel1
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
fuel2: &component_fuel_fuel2
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel, *block_fuel]
height: [1.0, 1.0]
axial mesh points: [1]
xs types: [A, A]
"""
twoBlockInput_wrongHeights = r"""
nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
blocks:
fuel: &block_fuel
fuel1: &component_fuel_fuel1
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
fuel2: &component_fuel_fuel2
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel, *block_fuel]
height: [1.0]
axial mesh points: [1, 1]
xs types: [A, A]
"""
twoBlockInput_wrongXSTypes = r"""
nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
blocks:
fuel: &block_fuel
fuel1: &component_fuel_fuel1
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
fuel2: &component_fuel_fuel2
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel, *block_fuel]
height: [1.0, 1.0]
axial mesh points: [1, 1]
xs types: [A]
"""
twoBlockInput_wrongMatMods = r"""
nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
blocks:
fuel: &block_fuel
fuel1: &component_fuel_fuel1
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
fuel2: &component_fuel_fuel2
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel, *block_fuel]
height: [1.0, 1.0]
axial mesh points: [1, 1]
xs types: [A, A]
material modifications:
U235_wt_frac: [0.5]
"""
def loadCustomAssembly(self, assemblyInput):
yamlString = assemblyInput
design = blueprints.Blueprints.load(yamlString)
design._prepConstruction(settings.Settings())
return design.assemblies["fuel a"]
def test_checkParamConsistency(self):
"""
Load assembly from a blueprint file.
.. test:: Create assembly from blueprint file.
:id: T_ARMI_BP_ASSEM
:tests: R_ARMI_BP_ASSEM
"""
# make sure a good example doesn't error
a = self.loadCustomAssembly(self.twoBlockInput_correct)
blockAxialMesh = a.getAxialMesh()
blockXSTypes = [a[0].p.xsType, a[1].p.xsType]
self.assertAlmostEqual(blockAxialMesh, [1.0, 2.0])
self.assertEqual(blockXSTypes, ["A", "A"])
with self.assertRaises(ValueError):
a = self.loadCustomAssembly(self.twoBlockInput_wrongMeshPoints)
with self.assertRaises(ValueError):
a = self.loadCustomAssembly(self.twoBlockInput_wrongHeights)
with self.assertRaises(ValueError):
a = self.loadCustomAssembly(self.twoBlockInput_wrongXSTypes)
with self.assertRaises(ValueError):
a = self.loadCustomAssembly(self.twoBlockInput_wrongMatMods)
================================================
FILE: armi/reactor/blueprints/tests/test_blockBlueprints.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for block blueprints."""
import io
import unittest
from armi import settings
from armi.reactor import blueprints
from armi.reactor.flags import Flags
from armi.reactor.tests import test_blocks
FULL_BP = """
blocks:
fuel: &block_fuel
grid name: fuelgrid
fuel:
shape: Circle
material: UZr
Tinput: 25.0
Thot: 600.0
id: 0.0
od: 0.7
latticeIDs: [1]
clad: # same args as test_blocks (except mult)
shape: Circle
material: HT9
Tinput: 25.0
Thot: 450.0
id: .77
od: .80
latticeIDs: [1,2]
coolant:
shape: DerivedShape
material: Sodium
Tinput: 450.0
Thot: 450.0
duct:
shape: Hexagon
material: HT9
Tinput: 25.0
Thot: 450.0
ip: 16.0
mult: 1.0
op: 16.6
intercoolant:
shape: Hexagon
material: Sodium
Tinput: 450.0
Thot: 450.0
ip: duct.op
mult: 1.0
op: 16.75
other fuel: &block_fuel_other
grid name: fuelgrid
flags: fuel test depletable
fuel:
shape: Circle
material: UZr
Tinput: 25.0
Thot: 600.0
id: 0.0
od: 0.67
latticeIDs: [1]
clad:
shape: Circle
material: HT9
Tinput: 25.0
Thot: 450.0
id: .77
od: .80
latticeIDs: [1,2]
coolant:
shape: DerivedShape
material: Sodium
Tinput: 450.0
Thot: 450.0
duct:
shape: Hexagon
material: HT9
Tinput: 25.0
Thot: 450.0
ip: 16.0
mult: 1.0
op: 16.6
intercoolant:
shape: Hexagon
material: Sodium
Tinput: 450.0
Thot: 450.0
ip: duct.op
mult: 1.0
op: 16.75
assemblies:
fuel:
specifier: IC
blocks: [*block_fuel, *block_fuel_other]
height: [25.0, 25.0]
axial mesh points: [1, 1]
material modifications:
U235_wt_frac: [0.11, 0.11]
ZR_wt_frac: [0.06, 0.06]
xs types: [A, A]
fuel other:
flags: fuel test
specifier: ID
blocks: [*block_fuel, *block_fuel_other]
height: [25.0, 25.0]
axial mesh points: [1, 1]
material modifications:
U235_wt_frac: [0.11, 0.11]
ZR_wt_frac: [0.06, 0.06]
xs types: [A, A]
grids:
fuelgrid:
geom: hex_corners_up
symmetry: full
lattice map: |
- - - 1 1 1 1
- - 1 1 2 1 1
- 1 1 1 1 1 1
1 2 1 2 1 2 1
1 1 1 1 1 1
1 1 2 1 1
1 1 1 1
"""
FULL_BP_ERRANT_ID = (
FULL_BP.split("lattice map:")[0]
+ """lattice map: |
- - - 1 1 1 1
- - 1 1 1 1 1
- 1 1 1 1 1 1
1 1 1 1 1 1 1
1 1 1 1 1 1
1 1 1 1 1
1 1 1 1
"""
)
FULL_BP_NO_COMP = (
FULL_BP.split("lattice map:")[0]
+ """lattice map: |
- - - 1 1 1 1
- - 1 1 1 1 1
- 1 1 1 1 1 1
1 3 1 1 1 3 1
1 1 1 1 1 1
1 1 1 1 1
1 1 1 1
"""
)
FULL_BP_GRID = (
FULL_BP.split("lattice map:")[0]
+ """grid contents:
? - -3
- 3
: '1'
? - -2
- 3
: '1'
? - -1
- 3
: '1'
? - 0
- 3
: '1'
? - -3
- 2
: '1'
? - -2
- 2
: '1'
? - -1
- 2
: '2'
? - 0
- 2
: '1'
? - 1
- 2
: '1'
? - -3
- 1
: '1'
? - -2
- 1
: '1'
? - -1
- 1
: '1'
? - 0
- 1
: '1'
? - 1
- 1
: '1'
? - 2
- 1
: '1'
? - -3
- 0
: '1'
? - -2
- 0
: '3'
? - -1
- 0
: '1'
? - 0
- 0
: '2'
? - 1
- 0
: '1'
? - 2
- 0
: '3'
? - 3
- 0
: '1'
? - -2
- -1
: '1'
? - -1
- -1
: '1'
? - 0
- -1
: '1'
? - 1
- -1
: '1'
? - 2
- -1
: '1'
? - 3
- -1
: '1'
? - -1
- -2
: '1'
? - 0
- -2
: '1'
? - 1
- -2
: '2'
? - 2
- -2
: '1'
? - 3
- -2
: '1'
? - 0
- -3
: '1'
? - 1
- -3
: '1'
? - 2
- -3
: '1'
? - 3
- -3
: '1'
"""
)
class TestGriddedBlock(unittest.TestCase):
"""Tests for a block that has components in a lattice."""
def setUp(self):
self.cs = settings.Settings()
with io.StringIO(FULL_BP) as stream:
self.blueprints = blueprints.Blueprints.load(stream)
self.blueprints._prepConstruction(self.cs)
def test_constructSpatialGrid(self):
"""Test intermediate grid construction function."""
bDesign = self.blueprints.blockDesigns["fuel"]
gridDesign = bDesign._getGridDesign(self.blueprints)
self.assertEqual(gridDesign.gridContents[0, 0], "2")
def test_getLocatorsAtLatticePositions(self):
"""Ensure extraction of specifiers results in locators."""
bDesign = self.blueprints.blockDesigns["fuel"]
gridDesign = bDesign._getGridDesign(self.blueprints)
grid = gridDesign.construct()
locators = gridDesign.getLocators(grid, ["2"])
self.assertEqual(len(locators), 5)
self.assertIs(grid[locators[0].getCompleteIndices()], locators[0])
def test_blockLattice(self):
"""Make sure constructing a block with grid specifiers works as a whole.
.. test:: Create block with blueprint file.
:id: T_ARMI_BP_BLOCK
:tests: R_ARMI_BP_BLOCK
"""
aDesign = self.blueprints.assemDesigns.bySpecifier["IC"]
a = aDesign.construct(self.cs, self.blueprints)
fuelBlock = a.getFirstBlock(Flags.FUEL)
fuel = fuelBlock.getComponent(Flags.FUEL)
self.assertTrue(fuel.spatialLocator)
seen = False
for locator in fuel.spatialLocator:
if locator == (1, 0, 0):
seen = True
self.assertTrue(seen)
def test_componentsNotInLattice(self):
"""
Ensure that we catch cases when a component is expected to be in the grid,
but is not. In this case, latticeID "2" is not in the lattice.
"""
with self.assertRaises(ValueError) as ee:
with io.StringIO(FULL_BP_ERRANT_ID) as stream:
self.blueprints = blueprints.Blueprints.load(stream)
self.blueprints._prepConstruction(self.cs)
self.assertIn(
"Check that the component's latticeIDs align with the block's grid.",
ee.args[0],
)
def test_latticeNotInComponents(self):
"""
Ensure that we catch cases when a latticeID listed in the grid is not present
in any of the components on the block. In this case, latticeID "2" is not in the lattice.
"""
with self.assertRaises(ValueError) as ee:
with io.StringIO(FULL_BP_NO_COMP) as stream:
self.blueprints = blueprints.Blueprints.load(stream)
self.blueprints._prepConstruction(self.cs)
self.assertIn(
"All IDs in the grid must appear in at least one component.",
ee.args[0],
)
def test_nonLatticeComponentHasRightMult(self):
"""Make sure non-grid components in blocks with grids get the right multiplicity."""
aDesign = self.blueprints.assemDesigns.bySpecifier["IC"]
a = aDesign.construct(self.cs, self.blueprints)
fuelBlock = a.getFirstBlock(Flags.FUEL)
duct = fuelBlock.getComponent(Flags.DUCT)
self.assertEqual(duct.getDimension("mult"), 1.0)
def test_explicitFlags(self):
"""
Test flags are created from blueprint file.
.. test:: Nuc flags can define depletable objects.
:id: T_ARMI_BP_NUC_FLAGS0
:tests: R_ARMI_BP_NUC_FLAGS
"""
a1 = self.blueprints.assemDesigns.bySpecifier["IC"].construct(self.cs, self.blueprints)
b1 = a1[0]
b2 = a1[1]
a2 = self.blueprints.assemDesigns.bySpecifier["ID"].construct(self.cs, self.blueprints)
self.assertTrue(b1.hasFlags(Flags.FUEL, exact=True))
self.assertTrue(b2.hasFlags(Flags.FUEL | Flags.TEST | Flags.DEPLETABLE, exact=True))
self.assertEqual(a1.p.flags, Flags.FUEL)
self.assertTrue(a1.hasFlags(Flags.FUEL, exact=True))
self.assertTrue(a2.hasFlags(Flags.FUEL | Flags.TEST, exact=True))
def test_densConsistentCompConstructor(self):
a1 = self.blueprints.assemDesigns.bySpecifier["IC"].construct(self.cs, self.blueprints)
fuelBlock = a1[0]
clad = fuelBlock.getComponent(Flags.CLAD)
# now construct clad programmatically like in test_Blocks
programmaticBlock = test_blocks.buildSimpleFuelBlock()
programaticClad = programmaticBlock.getComponent(Flags.CLAD)
self.assertAlmostEqual(
clad.density(),
clad.material.density(Tc=clad.temperatureInC),
)
self.assertAlmostEqual(
clad.density(),
programaticClad.density(),
)
================================================
FILE: armi/reactor/blueprints/tests/test_blueprints.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the blueprints (loading input) file."""
import io
import os
import pathlib
import unittest
import yamlize
from armi import settings
from armi.nucDirectory.nuclideBases import NuclideBases
from armi.physics.neutronics.settings import CONF_XS_KERNEL
from armi.reactor import blueprints, parameters
from armi.reactor.blueprints.componentBlueprint import ComponentBlueprint
from armi.reactor.blueprints.gridBlueprint import saveToStream
from armi.reactor.blueprints.isotopicOptions import CustomIsotopics, NuclideFlags
from armi.reactor.flags import Flags
from armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT
from armi.tests import TEST_ROOT
from armi.utils import directoryChangers, textProcessors
class TestBlueprints(unittest.TestCase):
"""Test that the basic functionality of faithfully receiving user input to construct ARMI data
model objects works as expected.
Try to ensure you test for ideas and not exact matches here, to make the tests more robust.
"""
@classmethod
def setUpClass(cls):
cls.cs = settings.Settings()
cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT)
cls.directoryChanger.open()
y = textProcessors.resolveMarkupInclusions(pathlib.Path(os.getcwd()) / "refSmallReactor.yaml")
cls.blueprints = blueprints.Blueprints.load(y)
cls.blueprints._prepConstruction(cls.cs)
@classmethod
def tearDownClass(cls):
cls.directoryChanger.close()
@staticmethod
def __stubify(latticeMap):
"""Little helper method to allow lattie maps to be compared free of whitespace."""
return latticeMap.replace(" ", "").replace("-", "").replace("\n", "")
def test_roundTripCompleteBP(self):
"""Test the round-tip of reading and writing blueprint files.
.. test:: Validates the round trip of reading and writing blueprints.
:id: T_ARMI_BP_TO_DB1
:tests: R_ARMI_BP_TO_DB
"""
# the correct lattice map
latticeMap = """- - SH
- SH SH
- SH OC SH
SH OC OC SH
OC IC OC SH
OC IC IC OC SH
IC IC IC OC SH
IC IC PC OC SH
IC PC IC IC OC SH
LA IC IC IC OC
IC IC IC IC SH
IC LB IC IC OC
IC IC PC IC SH
LA IC IC OC
IC IC IC IC SH
IC IC IC OC
IC IC IC PC SH"""
latticeMap = self.__stubify(latticeMap)
# validate some core elements from the blueprints
self.assertEqual(self.blueprints.gridDesigns["core"].symmetry, "third periodic")
map0 = self.__stubify(self.blueprints.gridDesigns["core"].latticeMap)
self.assertEqual(map0, latticeMap)
# save the blueprint to a stream
stream = io.StringIO()
stream.seek(0)
self.blueprints.dump(self.blueprints)
saveToStream(stream, self.blueprints, True, True)
stream.seek(0)
with directoryChangers.TemporaryDirectoryChanger():
# save the stream to a file
filePath = "test_roundTripCompleteBP.yaml"
with open(filePath, "w") as fout:
fout.write(stream.read())
# load the blueprint from that file again
bp = blueprints.Blueprints.load(open(filePath, "r").read())
# re-validate some core elements from the blueprints
self.assertEqual(bp.gridDesigns["core"].symmetry, "third periodic")
map1 = self.__stubify(bp.gridDesigns["core"].latticeMap)
self.assertEqual(map1, latticeMap)
def test_nuclides(self):
"""Tests the available sets of nuclides work as expected."""
actives = set(self.blueprints.activeNuclides)
inerts = set(self.blueprints.inertNuclides)
self.assertEqual(actives.union(inerts), set(self.blueprints.allNuclidesInProblem))
self.assertEqual(actives.intersection(inerts), set())
def test_getAssemblyTypeBySpecifier(self):
aDesign = self.blueprints.assemDesigns.bySpecifier["IC"]
self.assertEqual(aDesign.name, "igniter fuel")
self.assertEqual(aDesign.specifier, "IC")
def test_specialIsotopicVectors(self):
mox = self.blueprints.customIsotopics["MOX"]
allNucsInProblem = set(self.blueprints.allNuclidesInProblem)
for a in mox.keys():
self.assertIn(a, allNucsInProblem)
self.assertIn("U235", mox)
self.assertAlmostEqual(mox["PU239"], 0.00286038)
def test_componentDimensions(self):
"""Tests that the user can specify the dimensions of a component with arbitrary fidelity.
.. test:: A component can be correctly created from a blueprint file.
:id: T_ARMI_BP_COMP
:tests: R_ARMI_BP_COMP
"""
fuelAssem = self.blueprints.constructAssem(self.cs, name="igniter fuel")
fuel = fuelAssem.getComponents(Flags.FUEL)[0]
self.assertAlmostEqual(fuel.getDimension("od", cold=True), 0.86602)
self.assertAlmostEqual(fuel.getDimension("id", cold=True), 0.0)
self.assertAlmostEqual(fuel.getDimension("od"), 0.87763665, 4)
self.assertAlmostEqual(fuel.getDimension("id"), 0.0)
self.assertAlmostEqual(fuel.getDimension("mult"), 169)
def test_traceNuclides(self):
"""Ensure that armi.reactor.blueprints.componentBlueprint.insertDepletableNuclideKeys runs.
.. test:: Users marking components as depletable will affect number densities.
:id: T_ARMI_BP_NUC_FLAGS1
:tests: R_ARMI_BP_NUC_FLAGS
"""
fuel = (
self.blueprints.constructAssem(self.cs, "igniter fuel").getFirstBlock(Flags.FUEL).getComponent(Flags.FUEL)
)
self.assertIn("AM241", fuel.getNuclides())
self.assertLess(fuel.getNumberDensity("AM241"), 1e-5)
class TestBlueprintsSchema(unittest.TestCase):
"""Test the blueprint schema checks."""
_yamlString = r"""blocks:
fuel: &block_fuel
fuel: &component_fuel_fuel
shape: Hexagon
material: UZr
Tinput: 25.0
Thot: 600.0
ip: 0.0
mult: 1.0
op: 10.0
fuel2: &block_fuel2
group1:
shape: Group
duct:
shape: Hexagon
material: UZr
Tinput: 25.0
Thot: 600.0
ip: 9.0
mult: 1.0
op: 10.0
matrix:
shape: DerivedShape
material: Graphite
Tinput: 25.0
Thot: 600.0
components:
freefuel:
shape: Sphere
material: UZr
Tinput: 25.0
Thot: 600.0
id: 0.0
mult: 1.0
od: 4.0
freeclad:
shape: Sphere
material: HT9
Tinput: 25.0
Thot: 600.0
id: 4.0
mult: 1.0
od: 4.1
component groups:
group1:
freefuel:
mult: 1.0
freeclad:
mult: 1.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel]
height: [1.0]
axial mesh points: [1]
xs types: [A]
fuel b:
<<: *assembly_a
hotChannelFactors: Reactor
fuel c: &assembly_c
specifier: OC
blocks: [*block_fuel2]
height: [1.0]
axial mesh points: [1]
xs types: [A]
grids:
pins:
geom: cartesian
lattice map: |
2 2 2 2 2
2 1 1 1 2
2 1 3 1 2
2 3 1 1 2
2 2 2 2 2
"""
def test_noDuplicateKeysInYamlBlueprints(self):
"""
Prove that if you duplicate a section of a YAML blueprint file,
a hard error will be thrown.
"""
# loop through a few different sections, to test blueprints broadly
sections = ["blocks:", "components:", "component groups:"]
for sectionName in sections:
# modify blueprint YAML to duplicate this section
yamlString = str(self._yamlString)
i = yamlString.find(sectionName)
lenSection = yamlString[i:].find("\n\n")
section = yamlString[i : i + lenSection]
yamlString = yamlString[:i] + section + yamlString[i : i + lenSection]
# validate that this is now an invalid YAML blueprint
with self.assertRaises(Exception):
_design = blueprints.Blueprints.load(yamlString)
def test_assemblyParameters(self):
cs = settings.Settings()
design = blueprints.Blueprints.load(self._yamlString)
fa = design.constructAssem(cs, name="fuel a")
fb = design.constructAssem(cs, name="fuel b")
for paramDef in fa.p.paramDefs.inCategory(parameters.Category.assignInBlueprints):
# Semantics of __iter__() and items() is different now in the parameter system. We use the parameter
# definitions (which have a global-ish sense of `assigned`ness), so we can't tell, per-object, whether
# they've been set.
self.assertEqual(paramDef.default, fa.p[paramDef.name])
self.assertIn(paramDef.name, fb.p)
self.assertEqual(fa.p.hotChannelFactors, "Default")
self.assertEqual(fb.p.hotChannelFactors, "Reactor")
def test_nuclidesMc2v2(self):
"""Tests that ZR is not expanded to its isotopics for this setting."""
cs = settings.Settings()
newSettings = {CONF_XS_KERNEL: "MC2v2"}
cs = cs.modified(newSettings=newSettings)
design = blueprints.Blueprints.load(self._yamlString)
design._prepConstruction(cs)
self.assertTrue(set({"U238", "U235", "ZR"}).issubset(set(design.allNuclidesInProblem)))
assem = design.constructAssem(cs, name="fuel a")
self.assertTrue(set(assem.getNuclides()).issubset(set(design.allNuclidesInProblem)))
def test_nuclidesMc2v3(self):
"""Tests that ZR is expanded to its isotopics for MC2v3."""
cs = settings.Settings()
newSettings = {CONF_XS_KERNEL: "MC2v3"}
cs = cs.modified(newSettings=newSettings)
design = blueprints.Blueprints.load(self._yamlString)
design._prepConstruction(cs)
# 93 and 95 are not naturally occurring.
zrNucs = {"ZR" + str(A) for A in range(90, 97)} - {"ZR93", "ZR95"}
self.assertTrue(set({"U238", "U235"} | zrNucs).issubset(set(design.allNuclidesInProblem)))
self.assertTrue(zrNucs.issubset(set(design.inertNuclides)))
assem = design.constructAssem(cs, name="fuel a")
# the assembly won't get non-naturally occurring nuclides
nb = NuclideBases()
unnaturalZr = (n.name for n in nb.elements.bySymbol["ZR"].nuclides if n.abundance == 0.0)
designNucs = set(design.allNuclidesInProblem).difference(unnaturalZr)
self.assertTrue(set(assem.getNuclides()).issubset(designNucs))
def test_merge(self):
yamlString = r"""
nuclide flags:
B10: {burn: true, xs: true}
B11: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
FE: {burn: true, xs: true}
NI: {burn: true, xs: true}
C: {burn: true, xs: true}
MO: {burn: true, xs: true}
SI: {burn: true, xs: true}
CR: {burn: true, xs: true}
MN: {burn: true, xs: true}
NA: {burn: true, xs: true}
V: {burn: true, xs: true}
W: {burn: true, xs: true}
blocks:
nomerge block: &unmerged_block
A: &comp_a
shape: Circle
material: B4C
Tinput: 50.0
Thot: 500.0
id: 0.0
mult: 1
od: .5
Gap1: &comp_gap
shape: Circle
material: Void
Tinput: 50.0
Thot: 500.0
id: A.od
mult: 1
od: B.id
B: &gcomp_b
shape: Circle
material: HT9
Tinput: 20.0
Thot: 600.0
id: .5
mult: 1
od: .75
Gap2: &comp_gap2
shape: Circle
material: Void
Tinput: 50.0
Thot: 500.0
id: B.od
mult: 1
od: Clad.id
Clad: &comp_clad
shape: Circle
material: HT9
Tinput: 20.0
Thot: 700.0
id: .75
mult: 1
od: 1.0
coolant: &comp_coolant
shape: DerivedShape
material: Sodium
Tinput: 600.0
Thot: 600.0
duct: &comp_duct
shape: Hexagon
material: HT9
Tinput: 20.0
Thot: 500.0
ip: 1.2
mult: 1
op: 1.4
intercoolant: &comp_intercoolant
shape: Hexagon
material: Sodium
Tinput: 500.0
Thot: 500.0
ip: duct.op
mult: 1
op: 1.6
merge block: &merged_block
A:
<<: *comp_a
mergeWith: Clad
Gap1: *comp_gap
B:
<<: *gcomp_b
mergeWith: Clad
Gap2: *comp_gap2
Clad: *comp_clad
coolant: *comp_coolant
duct: *comp_duct
intercoolant: *comp_intercoolant
assemblies:
a: &assembly_a
specifier: IC
blocks: [*merged_block, *unmerged_block]
height: [1.0, 1.0]
axial mesh points: [1, 1]
xs types: [A, A]
"""
bp = blueprints.Blueprints.load(yamlString)
a = bp.constructAssem(settings.Settings(), name="a")
mergedBlock, unmergedBlock = a
self.assertNotIn("A", mergedBlock.getComponentNames())
self.assertNotIn("B", mergedBlock.getComponentNames())
self.assertEqual(len(mergedBlock) + 4, len(unmergedBlock))
self.assertAlmostEqual(
sum(c.getArea() for c in mergedBlock),
sum(c.getArea() for c in unmergedBlock),
)
mergedNucs, unmergedNucs = (
mergedBlock.getNumberDensities(),
unmergedBlock.getNumberDensities(),
)
errorMessage = ""
for nucName in set(unmergedNucs) | set(mergedNucs):
n1, n2 = unmergedNucs[nucName], mergedNucs[nucName]
try:
self.assertAlmostEqual(n1, n2)
except AssertionError:
errorMessage += "\nnuc {} not equal. unmerged: {} merged: {}".format(nucName, n1, n2)
self.assertTrue(not errorMessage, errorMessage)
self.assertAlmostEqual(mergedBlock.getMass(), unmergedBlock.getMass())
def test_nuclideFlags(self):
with self.assertRaises(yamlize.YamlizingError):
NuclideFlags.load("{potato: {burn: true, xs: true}}")
with self.assertRaises(yamlize.YamlizingError):
NuclideFlags.load("{U238: {burn: 12, xs: 0}}")
def test_customIsotopics(self):
with self.assertRaises(yamlize.YamlizingError):
CustomIsotopics.load("MOX: {input format: applesauce}")
with self.assertRaises(yamlize.YamlizingError):
CustomIsotopics.load("MOX: {input format: number densities, density: -0.1}")
with self.assertRaises(yamlize.YamlizingError):
CustomIsotopics.load("MOX: {input format: number densities, density: 1.5, FAKENUC234: 0.000286}")
def test_components(self):
bads = [
# bad shape
{
"shape": "potato",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
},
# bad merge
{
"shape": "circle",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
"mergeWith": 6,
},
# bad isotopics
{
"shape": "circle",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
"isotopics": 4,
},
# bad key
{
"shape": "circle",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
5: "od",
},
# bad linked dimension
{
"shape": "circle",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
"mult": "potato,mult",
},
]
for bad in bads:
with self.assertRaises(yamlize.YamlizingError):
ComponentBlueprint.load(repr(bad))
def test_cladding_invalid(self):
"""Make sure cladding input components are flagged as invalid."""
bad = {
"name": "cladding",
"shape": "Circle",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
}
with self.assertRaises(yamlize.YamlizingError):
ComponentBlueprint.load(repr(bad))
def test_withoutBlocks(self):
# Some projects use a script to generate an input that has completely unique blocks,
# so the blocks: section is not needed
yamlWithoutBlocks = """
nuclide flags:
U238: {burn: true, xs: true}
U235: {burn: true, xs: true}
LFP35: {burn: true, xs: true}
U236: {burn: true, xs: true}
PU239: {burn: true, xs: true}
DUMP2: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
NP237: {burn: true, xs: true}
PU238: {burn: true, xs: true}
PU236: {burn: true, xs: true}
LFP39: {burn: true, xs: true}
PU238: {burn: true, xs: true}
LFP40: {burn: true, xs: true}
PU241: {burn: true, xs: true}
LFP38: {burn: true, xs: true}
U234: {burn: true, xs: true}
AM241: {burn: true, xs: true}
LFP41: {burn: true, xs: true}
PU242: {burn: true, xs: true}
AM243: {burn: true, xs: true}
CM244: {burn: true, xs: true}
CM242: {burn: true, xs: true}
AM242: {burn: true, xs: true}
PU240: {burn: true, xs: true}
CM245: {burn: true, xs: true}
NP238: {burn: true, xs: true}
CM243: {burn: true, xs: true}
CM246: {burn: true, xs: true}
CM247: {burn: true, xs: true}
ZR: {burn: false, xs: true}
assemblies:
fuel a: &assembly_a
specifier: FF
blocks:
- { name: fuel,
fuel: { shape: Hexagon, material: UZr, Tinput: 25.0, Thot: 600.0, ip: 0.0, mult: 1.0, op: 10.0} }
height: [1.0]
axial mesh points: [1]
xs types: [A]
fuel b:
<<: *assembly_a
specifier: IF
"""
cs = settings.Settings()
design = blueprints.Blueprints.load(yamlWithoutBlocks)
design.constructAssem(cs, name="fuel a")
fa = design.constructAssem(cs, name="fuel a")
fb = design.constructAssem(cs, name="fuel b")
for a in (fa, fb):
self.assertEqual(1, len(a))
self.assertEqual(1, len(a[0]))
def test_topLevelComponentInput(self):
"""
Make sure components defined at the top level are loaded.
Components can be loaded either within the block blueprint
or on their own outside of blocks. This checks the latter
form.
We specified a 3D component in the test input (sphere)
so that it has a height and therefore a volume
without requiring a parent.
"""
cs = settings.Settings()
design = blueprints.Blueprints.load(self._yamlString)
# The following is needed to prep customisotopics
# which is required during construction of a component
design._resolveNuclides(cs)
componentDesign = design.componentDesigns["freefuel"]
topComponent = componentDesign.construct(design, {}, cs[CONF_INPUT_HEIGHTS_HOT])
self.assertEqual(topComponent.getDimension("od", cold=True), 4.0)
self.assertGreater(topComponent.getVolume(), 0.0)
self.assertGreater(topComponent.getMass("U235"), 0.0)
def test_componentGroupInput(self):
"""Make sure component groups can be input in blueprints."""
design = blueprints.Blueprints.load(self._yamlString)
componentGroup = design.componentGroups["group1"]
self.assertEqual(componentGroup["freefuel"].name, "freefuel")
self.assertEqual(componentGroup["freefuel"].mult, 1.0)
================================================
FILE: armi/reactor/blueprints/tests/test_componentBlueprint.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing componentBlueprint."""
import inspect
import unittest
from armi import settings
from armi.reactor import blueprints
from armi.reactor.flags import Flags
class TestComponentBlueprint(unittest.TestCase):
componentString = r"""
blocks:
block: &block
component:
flags: {flags}
shape: Hexagon
material: {material} # This is being used to format a string to allow for different materials to be added
{isotopics} # This is being used to format a string to allow for different isotopics to be added
Tinput: 25.0
Thot: 600.0
ip: 0.0
mult: 169.0
op: 0.86602
assemblies:
assembly: &assembly_a
specifier: IC
blocks: [*block]
height: [1.0]
axial mesh points: [1]
xs types: [A]
"""
def test_compInitIncompleteBurnChain(self):
nuclideFlagsFuelWithBurn = (
inspect.cleandoc(
r"""
nuclide flags:
U238: {burn: true, xs: true}
U235: {burn: true, xs: true}
ZR: {burn: false, xs: true}
"""
)
+ "\n"
)
bp = blueprints.Blueprints.load(
nuclideFlagsFuelWithBurn + self.componentString.format(material="UZr", isotopics="", flags="")
)
cs = settings.Settings()
with self.assertRaises(ValueError):
bp.constructAssem(cs, "assembly")
def test_compInitControlCustomIso(self):
nuclideFlags = (
inspect.cleandoc(
"""
nuclide flags:
U234: {burn: true, xs: true}
U235: {burn: true, xs: true}
U238: {burn: true, xs: true}
B10: {burn: true, xs: true}
B11: {burn: true, xs: true}
C: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
custom isotopics:
B4C:
input format: number densities
B10: 1.0
B11: 1.0
C: 1.0
"""
)
+ "\n"
)
bp = blueprints.Blueprints.load(
nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: B4C", flags="")
)
cs = settings.Settings()
_ = bp.constructAssem(cs, "assembly")
def test_autoDepletable(self):
nuclideFlags = (
inspect.cleandoc(
"""
nuclide flags:
U234: {burn: true, xs: true}
U235: {burn: true, xs: true}
U238: {burn: true, xs: true}
B10: {burn: true, xs: true}
B11: {burn: true, xs: true}
C: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
custom isotopics:
B4C:
input format: number densities
B10: 1.0
B11: 1.0
C: 1.0
"""
)
+ "\n"
)
bp = blueprints.Blueprints.load(
nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: B4C", flags="")
)
cs = settings.Settings()
a = bp.constructAssem(cs, "assembly")
expectedNuclides = ["B10", "B11", "C", "DUMP1"]
unexpectedNuclides = ["U234", "U325", "U238"]
for nuc in expectedNuclides:
self.assertIn(nuc, a[0][0].getNuclides())
for nuc in unexpectedNuclides:
self.assertNotIn(nuc, a[0][0].getNuclides())
c = a[0][0]
# Since we didn't supply flags, we should get the DEPLETABLE flag added
# automatically, since this one has depletable nuclides
self.assertEqual(c.p.flags, Flags.DEPLETABLE)
# More robust test, but worse unittest.py output when it fails
self.assertTrue(c.hasFlags(Flags.DEPLETABLE))
# repeat the process with some flags set explicitly
bp = blueprints.Blueprints.load(
nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: B4C", flags="fuel test")
)
cs = settings.Settings()
a = bp.constructAssem(cs, "assembly")
c = a[0][0]
# Since we supplied flags, we should NOT get the DEPLETABLE flag added
self.assertEqual(c.p.flags, Flags.FUEL | Flags.TEST)
# More robust test, but worse unittest.py output when it fails
self.assertTrue(c.hasFlags(Flags.FUEL | Flags.TEST))
def test_compInitAmericiumCustomIso(self):
nuclideFlags = (
inspect.cleandoc(
r"""
nuclide flags:
CM242: {burn: true, xs: true}
PU241: {burn: true, xs: true}
AM242G: {burn: true, xs: true}
AM242M: {burn: true, xs: true}
AM241: {burn: true, xs: true}
LFP41: {burn: true, xs: true}
PU240: {burn: true, xs: true}
AM243: {burn: true, xs: true}
NP238: {burn: true, xs: true}
PU242: {burn: true, xs: true}
CM243: {burn: true, xs: true}
PU238: {burn: true, xs: true}
DUMP2: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
U238: {burn: true, xs: true}
CM244: {burn: true, xs: true}
LFP40: {burn: true, xs: true}
U236: {burn: true, xs: true}
PU236: {burn: true, xs: true}
U234: {burn: true, xs: true}
CM245: {burn: true, xs: true}
PU239: {burn: true, xs: true}
NP237: {burn: true, xs: true}
U235: {burn: true, xs: true}
LFP39: {burn: true, xs: true}
LFP35: {burn: true, xs: true}
LFP38: {burn: true, xs: true}
CM246: {burn: true, xs: true}
CM247: {burn: true, xs: true}
B10: {burn: true, xs: true}
B11: {burn: true, xs: true}
W186: {burn: true, xs: true}
C: {burn: true, xs: true}
S: {burn: true, xs: true}
P: {burn: true, xs: true}
custom isotopics:
AM:
input format: number densities
AM241: 1.0
"""
)
+ "\n"
)
bp = blueprints.Blueprints.load(
nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: AM", flags="")
)
cs = settings.Settings()
a = bp.constructAssem(cs, "assembly")
expectedNuclides = [
"AM241",
"U238",
"AM243",
"AM242M",
"NP237",
"NP238",
"U234",
"U235",
"LFP38",
"LFP39",
"PU239",
"PU238",
"LFP35",
"U236",
"CM247",
"CM246",
"CM245",
"CM244",
"PU240",
"PU241",
"PU242",
"PU236",
"CM243",
"CM242",
"DUMP2",
"LFP41",
"LFP40",
]
unexpectedNuclides = ["B10", "B11", "W186", "C", "S", "P"]
for nuc in expectedNuclides:
self.assertIn(nuc, a[0][0].getNuclides())
for nuc in unexpectedNuclides:
self.assertNotIn(nuc, a[0][0].getNuclides())
def test_compInitThoriumBurnCustomIso(self):
nuclideFlags = (
inspect.cleandoc(
r"""
nuclide flags:
TH232: {burn: true, xs: true}
PA233: {burn: true, xs: true}
PA231: {burn: true, xs: true}
U232: {burn: true, xs: true}
U233: {burn: true, xs: true}
CM242: {burn: true, xs: true}
PU241: {burn: true, xs: true}
AM242G: {burn: true, xs: true}
AM242M: {burn: true, xs: true}
AM241: {burn: true, xs: true}
LFP41: {burn: true, xs: true}
PU240: {burn: true, xs: true}
AM243: {burn: true, xs: true}
NP238: {burn: true, xs: true}
PU242: {burn: true, xs: true}
CM243: {burn: true, xs: true}
PU238: {burn: true, xs: true}
DUMP2: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
U238: {burn: true, xs: true}
CM244: {burn: true, xs: true}
LFP40: {burn: true, xs: true}
U236: {burn: true, xs: true}
PU236: {burn: true, xs: true}
U234: {burn: true, xs: true}
CM245: {burn: true, xs: true}
PU239: {burn: true, xs: true}
NP237: {burn: true, xs: true}
U235: {burn: true, xs: true}
LFP39: {burn: true, xs: true}
LFP35: {burn: true, xs: true}
LFP38: {burn: true, xs: true}
CM246: {burn: true, xs: true}
CM247: {burn: true, xs: true}
custom isotopics:
Thorium:
input format: number densities
TH232: 1.0
"""
)
+ "\n"
)
bp = blueprints.Blueprints.load(
nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: Thorium", flags="")
)
cs = settings.Settings()
a = bp.constructAssem(cs, "assembly")
expectedNuclides = ["TH232", "PA233", "PA231", "DUMP2", "LFP35"]
for nuc in expectedNuclides:
self.assertIn(nuc, a[0][0].getNuclides())
def test_compInitThoriumNoBurnCustomIso(self):
nuclideFlags = (
inspect.cleandoc(
r"""
nuclide flags:
TH232: {burn: false, xs: true}
custom isotopics:
Thorium:
input format: number densities
TH232: 1.0
"""
)
+ "\n"
)
bp = blueprints.Blueprints.load(
nuclideFlags + self.componentString.format(material="Custom", isotopics="isotopics: Thorium", flags="")
)
cs = settings.Settings()
a = bp.constructAssem(cs, "assembly")
expectedNuclides = ["TH232"]
for nuc in expectedNuclides:
self.assertIn(nuc, a[0][0].getNuclides())
================================================
FILE: armi/reactor/blueprints/tests/test_customIsotopics.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test custom isotopics."""
import unittest
from logging import DEBUG
import numpy as np
import yamlize
from armi import runLog, settings
from armi.materials import Fluid, Sodium
from armi.physics.neutronics.settings import (
CONF_MCNP_LIB_BASE,
CONF_NEUTRONICS_KERNEL,
CONF_XS_KERNEL,
)
from armi.reactor import blueprints
from armi.reactor.blueprints import isotopicOptions
from armi.reactor.flags import Flags
from armi.tests import mockRunLogs
from armi.utils.customExceptions import InputError
from armi.utils.directoryChangers import TemporaryDirectoryChanger
class TestCustomIsotopics(unittest.TestCase):
yamlPreamble = r"""
nuclide flags:
U238: {burn: true, xs: true}
U235: {burn: true, xs: true}
U234: {burn: true, xs: true}
ZR: {burn: false, xs: true}
AL: {burn: false, xs: true}
FE: {burn: false, xs: true}
C: {burn: false, xs: true}
NA: {burn: false, xs: true}
DUMP2: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
LFP35: {burn: true, xs: true}
PU239: {burn: true, xs: true}
NP237: {burn: true, xs: true}
LFP38: {burn: true, xs: true}
LFP39: {burn: true, xs: true}
PU240: {burn: true, xs: true}
PU236: {burn: true, xs: true}
PU238: {burn: true, xs: true}
U236: {burn: true, xs: true}
LFP40: {burn: true, xs: true}
PU241: {burn: true, xs: true}
AM241: {burn: true, xs: true}
LFP41: {burn: true, xs: true}
PU242: {burn: true, xs: true}
AM243: {burn: true, xs: true}
CM244: {burn: true, xs: true}
CM242: {burn: true, xs: true}
AM242: {burn: true, xs: true}
CM245: {burn: true, xs: true}
NP238: {burn: true, xs: true}
CM243: {burn: true, xs: true}
CM246: {burn: true, xs: true}
CM247: {burn: true, xs: true}
NI: {burn: true, xs: true}
W: {burn: true, xs: true, expandTo: ["W182", "W183", "W184", "W186"]}
MN: {burn: true, xs: true}
CR: {burn: true, xs: true}
V: {burn: true, xs: true}
SI: {burn: true, xs: true}
MO: {burn: true, xs: true}
custom isotopics:
uranium isotopic mass fractions:
input format: mass fractions
U238: 0.992742
U235: 0.007204
U234: 0.000054
density: 19.1
uranium isotopic number fractions:
input format: number fractions
U238: 0.992650
U235: 0.007295
U234: 0.000055
density: 19.1
uranium isotopic number densities: &u_isotopics
input format: number densities
U234: 2.6539102e-06
U235: 3.5254048e-04
U238: 4.7967943e-02
bad uranium isotopic mass fractions:
input format: mass fractions
U238: 0.992742
U235: 0.007204
U234: 0.000054
density: 0
negative uranium isotopic mass fractions:
input format: mass fractions
U238: 0.992742
U235: 0.007204
U234: 0.000054
density: -1
linked uranium number densities: *u_isotopics
steel:
input format: mass fractions
FE: 0.7
C: 0.3
density: 7.0
sodium custom isotopics:
input format: mass fractions
NA: 1
density: 666
"""
yamlGoodBlocks = r"""
blocks:
uzr fuel: &block_0
fuel: &basic_fuel
shape: Hexagon
material: UZr
Tinput: 25.0
Thot: 600.0
ip: 0.0
mult: 1.0
op: 10.0
clad:
shape: Circle
material: HT9
Tinput: 25.0
Thot: 600.0
id: 0.0
mult: 1.0
od: 10.0
sodium1:
shape: Circle
material: Sodium
Tinput: 100
Thot: 600
id: 0
mult: 1
od: 1
sodium2:
shape: Circle
material: Sodium
isotopics: sodium custom isotopics
Tinput: 100
Thot: 600
id: 0
mult: 1
od: 1
uranium fuel from isotopic mass fractions : &block_1
fuel:
<<: *basic_fuel
material: Custom
isotopics: uranium isotopic mass fractions
wrong material: &block_2
fuel:
<<: *basic_fuel
isotopics: uranium isotopic mass fractions
uranium fuel from number fractions: &block_3
fuel:
<<: *basic_fuel
material: Custom
isotopics: uranium isotopic number fractions
uranium fuel from number densities: &block_4
fuel:
<<: *basic_fuel
material: Custom
isotopics: uranium isotopic number densities
uranium fuel from nd link: &block_5
fuel:
<<: *basic_fuel
material: Custom
isotopics: linked uranium number densities
fuel with no modifications: &block_6 # after a custom density has been set
fuel:
<<: *basic_fuel
overspecified fuel: &block_7
fuel:
<<: *basic_fuel
material: UraniumOxide
isotopics: uranium isotopic number densities
density set via number density: &block_8
fuel:
<<: *basic_fuel
isotopics: uranium isotopic number densities
steel: &block_9
clad:
shape: Hexagon
material: Custom
isotopics: steel
Tinput: 100
Thot: 600.0
ip: 0.0
mult: 169.0
op: 0.86602
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_0, *block_1, *block_2, *block_3, *block_4, *block_5, *block_6, *block_7, *block_8, *block_9]
height: [10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
axial mesh points: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
xs types: [A, A, A, A, A, A, A, A, A, A]
material modifications:
TD_frac: ["", "", "", "", "", "", "", 0.1, "", ""]
"""
yamlBadBlocks = r"""
blocks:
uzr fuel: &block_0
fuel: &basic_fuel
shape: Hexagon
material: UZr
Tinput: 100
Thot: 600.0
ip: 0.0
mult: 1.0
op: 10.0
clad:
shape: Circle
material: HT9
Tinput: 100
Thot: 600.0
id: 0.0
mult: 1.0
od: 10.0
custom void: &block_1
fuel:
<<: *basic_fuel
material: Void
isotopics: uranium isotopic number densities
steel: &block_2
clad:
shape: Hexagon
material: Custom
isotopics: steel
Tinput: 100
Thot: 600.0
ip: 0.0
mult: 169.0
op: 0.86602
no density uo2: &block_3
fuel:
<<: *basic_fuel
material: UraniumOxide
isotopics: uranium isotopic number densities
no density uo2: &block_4
fuel:
<<: *basic_fuel
material: UraniumOxide
isotopics: bad uranium isotopic mass fractions
no density uo2: &block_5
fuel:
<<: *basic_fuel
material: UraniumOxide
isotopics: bad uranium isotopic mass fractions
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_0, *block_1, *block_2]
height: [10, 10, 10]
axial mesh points: [1, 1, 1]
xs types: [A, A, A]
material modifications:
TD_frac: ["", "", ""]
fuel b: &assembly_b
specifier: IC
blocks: [*block_0, *block_3, *block_2]
height: [10, 10, 10]
axial mesh points: [1, 1, 1]
xs types: [A, A, A]
material modifications:
TD_frac: ["", "0.0", ""] # set density to 0 to cause error in custom density
fuel c: &assembly_c
specifier: IC
blocks: [*block_0, *block_4, *block_2]
height: [10, 10, 10]
axial mesh points: [1, 1, 1]
xs types: [A, A, A]
fuel d: &assembly_d
specifier: IC
blocks: [*block_0, *block_5, *block_2]
height: [10, 10, 10]
axial mesh points: [1, 1, 1]
xs types: [A, A, A]
"""
# this yaml is supposed to successfully build
yamlString = yamlPreamble + yamlGoodBlocks
# This yaml is designed to raise an error when built
yamlStringWithError = yamlPreamble + yamlBadBlocks
""":meta hide-value:"""
@classmethod
def setUpClass(cls):
cs = settings.Settings()
cs = cs.modified(
newSettings={
CONF_XS_KERNEL: "MC2v2",
"inputHeightsConsideredHot": False,
}
)
cls.bp = blueprints.Blueprints.load(cls.yamlString)
cls.a = cls.bp.constructAssem(cs, name="fuel a")
cls.numUZrNuclides = 29 # Number of nuclides defined `nuclide flags`
cls.numCustomNuclides = 28 # Number of nuclides defined in `nuclide flags` without Zr
def test_unmodified(self):
"""Ensure that unmodified components have the correct isotopics."""
fuel = self.a[0].getComponent(Flags.FUEL)
self.assertEqual(self.numUZrNuclides, len(fuel.p.numberDensities))
# NOTE: This density does not come from the material but is based on number densities.
self.assertAlmostEqual(15.5, fuel.density(), 0) # i.e. it is not 19.1
def test_massFractionsAreApplied(self):
"""Ensure that the custom isotopics can be specified via mass fractions.
.. test:: Test that custom isotopics can be specified via mass fractions.
:id: T_ARMI_MAT_USER_INPUT3
:tests: R_ARMI_MAT_USER_INPUT
"""
fuel1 = self.a[1].getComponent(Flags.FUEL)
fuel2 = self.a[2].getComponent(Flags.FUEL)
self.assertEqual(self.numCustomNuclides, len(fuel1.p.numberDensities))
self.assertAlmostEqual(19.1, fuel1.density())
# keys are same
keys1 = set([i for i, v in enumerate(fuel1.p.numberDensities) if v == 0.0])
keys2 = set([i for i, v in enumerate(fuel2.p.numberDensities) if v == 0.0])
self.assertEqual(keys1, keys2)
def test_densAppliedToNonCustomMats(self):
"""Ensure that a density can be set in custom isotopics for components using library materials."""
# The template block
fuel0 = self.a[0].getComponent(Flags.FUEL)
# The block with custom density but not the 'Custom' material
fuel2 = self.a[2].getComponent(Flags.FUEL)
# A block like the template block, but made after the custom block
fuel6 = self.a[6].getComponent(Flags.FUEL)
# A block with custom density set via number density
fuel8 = self.a[8].getComponent(Flags.FUEL)
dLL = fuel2.material.linearExpansionFactor(Tc=600, T0=25)
# the exponent here is 3 because inputHeightsConsideredHot = False.
# if inputHeightsConsideredHot were True, then we would use a factor of 2 instead
f = 1 / ((1 + dLL) ** 3)
# Check that the density is set correctly on the custom density block,
# and that it is not the same as the original
self.assertAlmostEqual(19.1 * f, fuel2.density())
self.assertNotAlmostEqual(fuel0.density(), fuel2.density(), places=2)
# Check that the custom density block has the correct material
self.assertEqual("UZr", fuel2.material.name)
# Check that the block with only number densities set has a new density
self.assertAlmostEqual(19.1 * f, fuel8.density())
# original material density should not be changed after setting a custom density component,
# so a new block without custom isotopics and density should have the same density as the original
self.assertAlmostEqual(fuel6.density(), fuel0.density())
self.assertEqual(fuel6.material.name, fuel0.material.name)
self.assertEqual("UZr", fuel0.material.name)
def test_densAppliedToNonCustomMatsFluid(self):
"""
Ensure that a density can be set in custom isotopics for components using library materials, specifically in the
case of a fluid component. In this case, inputHeightsConsideredHot does not matter, and the material has a zero
dLL value.
"""
# The template block
sodium1 = self.a[0].getComponentByName("sodium1")
sodium2 = self.a[0].getComponentByName("sodium2")
self.assertEqual(sodium1.material.name, "Sodium")
self.assertEqual(sodium2.material.name, "Sodium")
self.assertTrue(isinstance(sodium1.material, Fluid))
self.assertTrue(isinstance(sodium2.material, Fluid))
self.assertEqual(sodium1.p.customIsotopicsName, "")
self.assertEqual(sodium2.p.customIsotopicsName, "sodium custom isotopics")
# show that, even though the two components have the same material class
# and the same temperatures, their densities are different
self.assertNotEqual(sodium1.density(), sodium2.density())
# show that sodium1 has a density from the material class, while sodium2
# has a density from the blueprint and adjusted from Tinput -> Thot
s = Sodium()
self.assertAlmostEqual(sodium1.density(), s.density(Tc=600))
self.assertAlmostEqual(sodium2.density(), s.density(Tc=600) * (666 / s.density(Tc=100)))
def test_customDensityLogsAndErrors(self):
"""Test that the right warning messages and errors are emitted when applying custom densities."""
# Check for warnings when specifying both TD_frac and custom isotopics
with mockRunLogs.BufferLog() as mockLog:
# we should start with a clean slate
self.assertEqual("", mockLog.getStdout())
runLog.LOG.startLog("test_customDensityLogsAndErrors")
runLog.LOG.setVerbosity(DEBUG)
# rebuild the input to capture the logs
cs = settings.Settings()
cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v2"})
bp = blueprints.Blueprints.load(self.yamlString)
bp.constructAssem(cs, name="fuel a")
# Check for log messages
streamVal = mockLog.getStdout()
self.assertIn("and a custom isotopic with density", streamVal, msg=streamVal)
self.assertIn("Custom isotopics and material modifications have both", streamVal, msg=streamVal)
self.assertIn("A custom material density was specified", streamVal, msg=streamVal)
self.assertIn(
"A custom isotopic with associated density has been specified for non-`Custom`",
streamVal,
msg=streamVal,
)
# Check that assigning a custom density to the Void material fails
cs = settings.Settings()
cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v2"})
bp = blueprints.Blueprints.load(self.yamlStringWithError)
# Ensure we have some Void
self.assertEqual(bp.blockDesigns["custom void"]["fuel"].material, "Void")
# Can't have stuff in Void
with self.assertRaises(ValueError):
bp.constructAssem(cs, name="fuel a")
# Try making a 0 density non-Void material by setting TD_frac to 0.0
with self.assertRaises(ValueError):
bp.constructAssem(cs, name="fuel b")
# Try making a material with mass fractions with a density of 0
with self.assertRaises(ValueError):
bp.constructAssem(cs, name="fuel c")
# Try making a material with mass fractions with a negative density
with self.assertRaises(ValueError):
bp.constructAssem(cs, name="fuel d")
def test_numberFractions(self):
"""Ensure that the custom isotopics can be specified via number fractions.
.. test:: Test that custom isotopics can be specified via number fractions.
:id: T_ARMI_MAT_USER_INPUT4
:tests: R_ARMI_MAT_USER_INPUT
"""
# fuel blocks 2 and 4 should be the same, one is defined as mass fractions, and the other as number fractions
fuel2 = self.a[1].getComponent(Flags.FUEL)
fuel4 = self.a[3].getComponent(Flags.FUEL)
self.assertAlmostEqual(fuel2.density(), fuel4.density())
keys2 = set([i for i, v in enumerate(fuel2.p.numberDensities) if v == 0.0])
keys4 = set([i for i, v in enumerate(fuel4.p.numberDensities) if v == 0.0])
self.assertEqual(keys2, keys4)
np.testing.assert_almost_equal(fuel2.p.numberDensities, fuel4.p.numberDensities)
def test_numberDensities(self):
"""Ensure that the custom isotopics can be specified via number densities.
.. test:: Test that custom isotopics can be specified via number fractions.
:id: T_ARMI_MAT_USER_INPUT5
:tests: R_ARMI_MAT_USER_INPUT
"""
# fuel blocks 2 and 5 should be the same, one is defined as mass fractions, and the other as number densities
fuel2 = self.a[1].getComponent(Flags.FUEL)
fuel5 = self.a[4].getComponent(Flags.FUEL)
self.assertAlmostEqual(fuel2.density(), fuel5.density())
for i, nuc in enumerate(fuel2.p.nuclides):
self.assertIn(nuc, fuel5.p.nuclides)
j = np.where(fuel5.p.nuclides == nuc)[0][0]
self.assertAlmostEqual(fuel2.p.numberDensities[i], fuel5.p.numberDensities[j])
def test_numberDensitiesAnchor(self):
fuel4 = self.a[4].getComponent(Flags.FUEL)
fuel5 = self.a[5].getComponent(Flags.FUEL)
self.assertAlmostEqual(fuel4.density(), fuel5.density())
np.testing.assert_almost_equal(fuel4.p.numberDensities, fuel5.p.numberDensities)
def test_expandedNatural(self):
cs = settings.Settings()
cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v3"})
bp = blueprints.Blueprints.load(self.yamlString)
a = bp.constructAssem(cs, name="fuel a")
b = a[-1]
c = b.getComponent(Flags.CLAD)
self.assertIn("FE56", c.getNumberDensities()) # natural isotopic
self.assertNotIn("FE51", c.getNumberDensities()) # un-natural
self.assertNotIn("FE", c.getNumberDensities())
def test_infDiluteAreOnlyNatural(self):
"""Make sure nuclides specified as In-Problem but not actually in any material are only natural isotopics."""
self.assertIn("AL27", self.bp.allNuclidesInProblem)
self.assertNotIn("AL26", self.bp.allNuclidesInProblem)
def test_getDefaultNuclideFlags(self):
# This is a bit of a silly test. We are checking what is essentially a hard coded dictionary
nucDict = isotopicOptions.getDefaultNuclideFlags()
entry = {"burn": True, "xs": True, "expandTo": None}
self.assertEqual(nucDict["DUMP1"], entry)
self.assertEqual(nucDict["CM244"], entry)
self.assertEqual(nucDict["LFP38"], entry)
entry = {"burn": False, "xs": True, "expandTo": None}
self.assertEqual(nucDict["B10"], entry)
self.assertEqual(nucDict["NI"], entry)
class TestCustomIsotopicsErrors(unittest.TestCase):
def test_densityMustBePositive(self):
with self.assertRaises(yamlize.YamlizingError):
_ = isotopicOptions.CustomIsotopic.load(
r"""
name: atom repellent
input format: mass fractions
U234: 2.6539102e-06
U235: 3.5254048e-04
U238: 4.7967943e-02
density: -0.0001
"""
)
def test_nonConformantElementName(self):
with self.assertRaises(yamlize.YamlizingError):
_ = isotopicOptions.CustomIsotopic.load(
r"""
name: non-upper case
input format: number densities
Au: 0.01
"""
)
def test_numberDensitiesCannotSpecifyDensity(self):
with self.assertRaises(yamlize.YamlizingError):
_ = isotopicOptions.CustomIsotopic.load(
r"""
name: over-specified isotopics
input format: number densities
AU: 0.01
density: 10.0
"""
)
class TestIsotopicsMissingData(unittest.TestCase):
"""Custom materials must define isotopics."""
yamlBlocksBadIsotopics = r"""
blocks:
steel: &block_0
clad:
shape: Hexagon
material: Custom
#isotopics: sodium custom isotopics
Tinput: 25.0
Thot: 600.0
ip: 0.0
mult: 169.0
op: 0.86602
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_0]
height: [10]
axial mesh points: [1]
xs types: [A]
"""
def test_customComponentsWithoutComposition(self):
cs = settings.Settings()
bp = blueprints.Blueprints.load(self.yamlBlocksBadIsotopics)
with self.assertRaises(IOError):
_a = bp.constructAssem(cs, name="fuel a")
class TestNuclideFlagsExpansion(unittest.TestCase):
yamlString = r"""
nuclide flags:
U238: {burn: false, xs: true}
U235: {burn: false, xs: true}
ZR: {burn: false, xs: true}
AL: {burn: false, xs: true}
FE: {burn: false, xs: true, expandTo: ["FE54"]}
C: {burn: false, xs: true}
NI: {burn: true, xs: true}
MN: {burn: true, xs: true}
CR: {burn: true, xs: true}
V: {burn: true, xs: true}
SI: {burn: true, xs: true}
MO: {burn: true, xs: true}
W: {burn: true, xs: true}
ZN: {burn: true, xs: true}
O: {burn: true, xs: true}
blocks:
uzr fuel: &block_0
fuel:
shape: Hexagon
material: UZr
Tinput: 25.0
Thot: 600.0
mult: 1.0
op: 10.0
clad:
shape: Circle
material: HT9
Tinput: 25.0
Thot: 600.0
id: 0.0
mult: 1.0
od: 10.0
dummy:
shape: Circle
material: ZnO
Tinput: 25.0
Thot: 600.0
id: 0.0
mult: 1.0
od: 10.0
assemblies:
fuel a:
specifier: IC
blocks: [*block_0]
height: [10]
axial mesh points: [1]
xs types: [A]
"""
def test_expandedNatural(self):
cs = settings.Settings()
cs = cs.modified(newSettings={CONF_XS_KERNEL: "MC2v3"})
bp = blueprints.Blueprints.load(self.yamlString)
a = bp.constructAssem(cs, name="fuel a")
b = a[-1]
c = b.getComponent(Flags.CLAD)
nd = c.getNumberDensities()
self.assertIn("FE54", nd) # natural isotopic as requested
self.assertNotIn("FE56", nd) # natural isotopic not requested
self.assertNotIn("FE51", nd) # un-natural
self.assertNotIn("FE", nd)
def test_eleExpandInfoBasedOnCodeENDF(self):
with TemporaryDirectoryChanger():
# Reference elements to expand by library
ref_E70_elem = ["C", "V", "ZN"]
ref_E71_elem = ["C"]
ref_E80_elem = []
# Load settings and set neutronics kernel to MCNP
cs = settings.Settings()
cs = cs.modified(newSettings={CONF_NEUTRONICS_KERNEL: "MCNP"})
# Set ENDF/B-VII.0 as MCNP cross section library base
cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: "ENDF/B-VII.0"})
eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)
E70_elem = [x.label for x in eleToKeep]
# Set ENDF/B-VII.1 as MCNP cross section library base
cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: "ENDF/B-VII.1"})
eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)
E71_elem = [x.label for x in eleToKeep]
# Set ENDF/B-VIII.0 as MCNP cross section library base
cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: "ENDF/B-VIII.0"})
eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)
E80_elem = [x.label for x in eleToKeep]
# Assert equality of returned elements to reference elements
self.assertEqual(sorted(E70_elem), sorted(ref_E70_elem))
self.assertEqual(sorted(E71_elem), sorted(ref_E71_elem))
self.assertEqual(sorted(E80_elem), sorted(ref_E80_elem))
# Disallowed inputs
not_allowed = ["ENDF/B-VIIII.0", "ENDF/B-VI.0", "JEFF-3.3"]
# Assert raise InputError in case of invalid library setting
for x in not_allowed:
with self.assertRaises(InputError) as context:
cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: x})
_ = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)
self.assertTrue("Failed to determine nuclides for modeling" in str(context.exception))
================================================
FILE: armi/reactor/blueprints/tests/test_gridBlueprints.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for grid blueprints."""
import io
import os
import unittest
from armi import configure, isConfigured
if not isConfigured():
configure()
from armi.reactor.blueprints import Blueprints
from armi.reactor.blueprints.gridBlueprint import Grids, Pitch, saveToStream
from armi.utils.customExceptions import InputError
from armi.utils.directoryChangers import TemporaryDirectoryChanger
LATTICE_BLUEPRINT = """
control:
geom: hex_corners_up
symmetry: full
lattice pitch:
hex: 1.2
lattice map: |
- - - - - - - - - 1 1 1 1 1 1 1 1 1 4
- - - - - - - - 1 1 1 1 1 1 1 1 1 1 1
- - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1
- - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1
- - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 3 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1
1 6 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1
pins:
geom: hex
symmetry: full
lattice pitch:
hex: 1.3
lattice map: |
- - FP
- FP FP
- CL CL CL
FP FP FP FP
FP FP FP FP FP
CL CL CL CL
FP FP FP FP FP
FP FP FP FP
CL CL CL CL CL
FP FP FP FP
FP FP FP FP FP
CL CL CL CL
FP FP FP FP FP
FP FP FP FP
CL CL CL
FP FP
FP
sfp:
geom: cartesian
symmetry: full
lattice map: |
2 2 2 2 2
2 1 1 1 2
2 1 3 1 2
2 3 1 1 2
2 2 2 2 2
sfp quarter:
geom: cartesian
symmetry: quarter through center assembly
lattice map: |
2 2 2 2 2
2 1 1 1 2
2 1 3 1 2
2 3 1 1 2
2 2 2 2 2
sfp quarter even:
geom: cartesian
symmetry: quarter core
lattice map: |
2 2 2 2 2
2 1 1 1 2
2 1 3 1 2
2 3 1 1 2
2 2 2 2 2
sfp even:
geom: cartesian
symmetry: full
lattice map: |
1 2 2 2 2 2
1 2 1 1 1 2
1 2 1 4 1 2
1 2 2 1 1 2
1 2 2 2 2 2
1 1 1 1 1 1
"""
RZT_BLUEPRINT = """
rzt_core:
geom: thetarz
symmetry: eighth core periodic
grid bounds:
r:
- 0.0
- 14.2857142857
- 28.5714285714
- 42.8571428571
- 57.1428571429
- 71.4285714286
- 85.7142857143
- 100.001
- 115.001
- 130.001
theta:
- 0.0
- 0.11556368446681414
- 0.2311273689343264
- 0.34669105340061696
- 0.43870710999683127
- 0.5542707944631219
- 0.6698344789311578
- 0.7853981633974483
grid contents:
[0,0]: assembly1_1 fuel
[0,1]: assembly1_2 fuel
[0,2]: assembly1_3 fuel
[0,3]: assembly1_4 fuel
[0,4]: assembly1_5 fuel
[0,5]: assembly1_6 fuel
[0,6]: assembly1_7 fuel
[1,0]: assembly2_1 fuel
[1,1]: assembly2_2 fuel
[1,2]: assembly2_3 fuel
[1,3]: assembly2_4 fuel
[1,4]: assembly2_5 fuel
[1,5]: assembly2_6 fuel
[1,6]: assembly2_7 fuel
[2,0]: assembly3_1 fuel
[2,1]: assembly3_2 fuel
[2,2]: assembly3_3 fuel
[2,3]: assembly3_4 fuel
[2,4]: assembly3_5 fuel
[2,5]: assembly3_6 fuel
[2,6]: assembly3_7 fuel
[3,0]: assembly4_1 fuel
[3,1]: assembly4_2 fuel
[3,2]: assembly4_3 fuel
[3,3]: assembly4_4 fuel
[3,4]: assembly4_5 fuel
[3,5]: assembly4_6 fuel
[3,6]: assembly4_7 fuel
[4,0]: assembly5_1 fuel
[4,1]: assembly5_2 fuel
[4,2]: assembly5_3 fuel
[4,3]: assembly5_4 fuel
[4,4]: assembly5_5 fuel
[4,5]: assembly5_6 fuel
[4,6]: assembly5_7 fuel
[5,0]: assembly6_1 fuel
[5,1]: assembly6_2 fuel
[5,2]: assembly6_3 fuel
[5,3]: assembly6_4 fuel
[5,4]: assembly6_5 fuel
[5,5]: assembly6_6 fuel
[5,6]: assembly6_7 fuel
[6,0]: assembly7_1 fuel
[6,1]: assembly7_2 fuel
[6,2]: assembly7_3 fuel
[6,3]: assembly7_4 fuel
[6,4]: assembly7_5 fuel
[6,5]: assembly7_6 fuel
[6,6]: assembly7_7 fuel
[7,0]: assembly8_1 fuel
[7,1]: assembly8_2 fuel
[7,2]: assembly8_3 fuel
[7,3]: assembly8_4 fuel
[7,4]: assembly8_5 fuel
[7,5]: assembly8_6 fuel
[7,6]: assembly8_7 fuel
[8,0]: assembly9_1 fuel
[8,1]: assembly9_2 fuel
[8,2]: assembly9_3 fuel
[8,3]: assembly9_4 fuel
[8,4]: assembly9_5 fuel
[8,5]: assembly9_6 fuel
[8,6]: assembly9_7 fuel
"""
SMALL_HEX = """core:
geom: hex
symmetry: third periodic
lattice map: |
F
F
F F
F
F F
pins:
geom: hex
symmetry: full
lattice map: |
- - FP
- FP FP
- CL CL CL
FP FP FP FP
FP FP FP FP FP
CL CL CL CL
FP FP FP FP FP
FP FP FP FP
CL CL CL CL CL
FP FP FP FP
FP FP FP FP FP
CL CL CL CL
FP FP FP FP FP
FP FP FP FP
CL CL CL
FP FP
FP
"""
TINY_GRID = """core:
geom: hex
lattice map:
grid bounds:
symmetry: full
grid contents:
? - 0
- 0
: IF
"""
BIG_FULL_HEX_CORE = """core:
geom: hex
symmetry: full
lattice map: |
- - - - - - SS SS
- - - - SS SS SS SS SS
- - - - SS DD DD DD DD SS
- - - SS DD DD DD DD DD SS
- - - SS DD DD DD DD DD DD SS
- - SS DD DD DD DD DD DD DD SS
- - SS DD DD DD DD DD DD DD DD SS
- - SS DD DD DD RB DD DD DD SS
- - SS DD DD RB RB RB RB DD DD SS
- SS DD DD RB RB FF RB RB DD DD SS
- SS SS DD RB FF FF FF FF RB DD DD SS
- SS DD RB FF FF FF FF FF RB DD RR
- SS DD DD FF FF PC PC PC FF DD DD SS
SS SS DD RB FF II PC FF FF RB DD DD SS
- SS DD RB FF SS II II PC FF RB DD RR
SS DD DD FF II II II II II FF DD DD SS
- SS DD RB II II II II II II RB DD SS
SS DD RB FF RC II SS II II FF RB DD SS
SS DD DD FF II II II RC PC II FF DD DD SS
SS DD RB II PC II II II PC II RB DD SS
SS DD RB FF II II II II II II FF RB DD SS
SS DD FF II II WW II II II II FF DD SS
SS DD RB FF II II WW XX PC II FF RB DD SS
SS DD FF PC II BB AA YY SS DC FF DD SS
SS DD RB FF II RC CC ZZ II II FF RB DD SS
SS DD FF II II II II II II II FF DD SS
SS DD RB FF II II II II II II FF RB DD SS
SS DD RB II II II II RC II II RB DD SS
SS DD DD FF PC II SS II II PC FF DD DD SS
SS DD RB II II II II II II II RB DD SS
- SS DD FF II PC II II II II FF DD SS
SS DD RB FF II II PC II II FF RB DD SS
- SS DD RB FF SS II II PC FF RB DD SS
SS SS DD RB FF II II II FF RB DD SS SS
- SS DD DD FF FF II II FF FF DD DD SS
- SS DD RB FF FF FF FF FF RB DD SS
- SS SS DD RB FF FF FF FF RB DD SS SS
- SS DD DD RB RB RB RB RB DD DD SS
- SS DD DD RB RB RB RB DD DD SS
- SS DD DD DD DD DD DD DD SS
SS DD DD DD DD DD DD DD DD SS
SS DD DD DD DD DD DD DD SS
SS DD DD DD DD DD DD SS
SS DD DD DD DD DD SS
SS DD DD DD DD SS
SS SS SS SS SS
- SS SS -
"""
class TestGridBPRoundTrip(unittest.TestCase):
def setUp(self):
self.grids = Grids.load(SMALL_HEX)
def test_contents(self):
self.assertIn("core", self.grids)
def test_roundTrip(self):
"""
Test saving blueprint data to a stream.
.. test:: Grid blueprints can be written to disk.
:id: T_ARMI_BP_TO_DB0
:tests: R_ARMI_BP_TO_DB
"""
stream = io.StringIO()
saveToStream(stream, self.grids, False, True)
stream.seek(0)
gridBp = Grids.load(stream)
self.assertIn("third", gridBp["core"].symmetry)
def test_tinyMap(self):
"""
Test that a lattice map can be defined, written, and read in from blueprint file.
.. test:: Define a lattice map in reactor core.
:id: T_ARMI_BP_GRID1
:tests: R_ARMI_BP_GRID
"""
grid = Grids.load(TINY_GRID)
stream = io.StringIO()
saveToStream(stream, grid, full=True, tryMap=True)
stream.seek(0)
text = stream.read()
self.assertIn("IF", text)
stream.seek(0)
gridBp = Grids.load(stream)
self.assertIn("full", gridBp["core"].symmetry)
self.assertIn("IF", gridBp["core"].latticeMap)
class TestGridBPRoundTripFull(unittest.TestCase):
def test_fullMap(self):
"""
Test that a lattice map can be defined, written, and read in from blueprint file.
.. test:: Define a lattice map in reactor core.
:id: T_ARMI_BP_GRID2
:tests: R_ARMI_BP_GRID
"""
grid = Grids.load(BIG_FULL_HEX_CORE)
gridDesign = grid["core"]
_ = gridDesign.construct()
# test before the round-trip
self.assertEqual(gridDesign.gridContents[0, 0], "AA")
self.assertEqual(gridDesign.gridContents[-2, 1], "BB")
self.assertEqual(gridDesign.gridContents[-1, 0], "CC")
self.assertEqual(gridDesign.gridContents[-1, 1], "WW")
self.assertEqual(gridDesign.gridContents[1, 0], "XX")
self.assertEqual(gridDesign.gridContents[2, -1], "YY")
self.assertEqual(gridDesign.gridContents[1, -1], "ZZ")
self.assertEqual(gridDesign.gridContents[-3, 1], "RC")
self.assertEqual(gridDesign.gridContents[3, -1], "PC")
# perform a roundtrip
stream = io.StringIO()
saveToStream(stream, grid, full=True, tryMap=True)
stream.seek(0)
gridBp = Grids.load(stream)
gridDesign = gridBp["core"]
_ = gridDesign.construct()
# test again after the round-trip
self.assertEqual(gridDesign.gridContents[0, 0], "AA")
self.assertEqual(gridDesign.gridContents[-2, 1], "BB")
self.assertEqual(gridDesign.gridContents[-1, 0], "CC")
self.assertEqual(gridDesign.gridContents[-1, 1], "WW")
self.assertEqual(gridDesign.gridContents[1, 0], "XX")
self.assertEqual(gridDesign.gridContents[2, -1], "YY")
self.assertEqual(gridDesign.gridContents[1, -1], "ZZ")
self.assertEqual(gridDesign.gridContents[-3, 1], "RC")
self.assertEqual(gridDesign.gridContents[3, -1], "PC")
class TestGridBlueprintsSection(unittest.TestCase):
"""Tests for lattice blueprint section."""
def setUp(self):
self.td = TemporaryDirectoryChanger()
self.td.__enter__()
self.grids = Grids.load(LATTICE_BLUEPRINT.format(self._testMethodName))
def tearDown(self):
self.td.__exit__(None, None, None)
def test_simpleRead(self):
gridDesign = self.grids["control"]
grid = gridDesign.construct()
self.assertAlmostEqual(grid.pitch, 1.2)
self.assertEqual(gridDesign.gridContents[-8, 0], "6")
gridDesign = self.grids["pins"]
grid = gridDesign.construct()
self.assertAlmostEqual(grid.pitch, 1.3)
self.assertEqual(gridDesign.gridContents[-4, 0], "FP")
self.assertEqual(gridDesign.gridContents[-3, 3], "CL")
# Cartesian full, odd
gridDesign2 = self.grids["sfp"]
_ = gridDesign2.construct()
self.assertEqual(gridDesign2.gridContents[1, 1], "1")
self.assertEqual(gridDesign2.gridContents[0, 0], "3")
self.assertEqual(gridDesign2.gridContents[-1, -1], "3")
# Cartesian quarter, odd
gridDesign3 = self.grids["sfp quarter"]
grid = gridDesign3.construct()
self.assertEqual(gridDesign3.gridContents[0, 0], "2")
self.assertEqual(gridDesign3.gridContents[1, 1], "3")
self.assertEqual(gridDesign3.gridContents[2, 2], "3")
self.assertEqual(gridDesign3.gridContents[3, 3], "1")
self.assertTrue(grid.symmetry.isThroughCenterAssembly)
# cartesian quarter, even not through center
gridDesign3 = self.grids["sfp quarter even"]
grid = gridDesign3.construct()
self.assertFalse(grid.symmetry.isThroughCenterAssembly)
# Cartesian full, even/odd hybrid
gridDesign4 = self.grids["sfp even"]
grid = gridDesign4.construct()
self.assertEqual(gridDesign4.gridContents[0, 0], "4")
self.assertEqual(gridDesign4.gridContents[-1, -1], "2")
self.assertEqual(gridDesign4.gridContents[2, 2], "2")
self.assertEqual(gridDesign4.gridContents[-3, -3], "1")
with self.assertRaises(KeyError):
self.assertEqual(gridDesign4.gridContents[-4, -3], "1")
def test_pitchBasics(self):
# use only hex input
p = Pitch(123, 0, 0, 0)
self.assertEqual(p.hex, 123)
self.assertEqual(p.x, 0)
self.assertEqual(p.y, 0)
self.assertEqual(p.z, 0)
# use only X, Y, Z inputs
p = Pitch(0, 1, 2, 3)
self.assertEqual(p.hex, 1)
self.assertEqual(p.x, 1)
self.assertEqual(p.y, 2)
self.assertEqual(p.z, 3)
def test_pitchEdgeCases(self):
with self.assertRaises(InputError):
# cannot mix hex with x,y,z pitch
Pitch(1, 2, 3, 4)
with self.assertRaises(InputError):
# SOMETHING needs to be non-zero
Pitch(0, 0, 0, 0)
def test_simpleReadLatticeMap(self):
"""Read lattice map and create a grid.
.. test:: Define a lattice map in reactor core.
:id: T_ARMI_BP_GRID0
:tests: R_ARMI_BP_GRID
"""
from armi.reactor.blueprints.tests.test_blockBlueprints import FULL_BP
# Cartesian full, even/odd hybrid
gridDesign4 = self.grids["sfp even"]
_grid = gridDesign4.construct()
# test that we can correctly save this to a YAML
bp = Blueprints.load(FULL_BP)
filePath = "TestGridBlueprintsSection__test_simpleReadLatticeMap.log"
with open(filePath, "w") as stream:
saveToStream(stream, bp, True)
# test that the output looks valid, and includes a lattice map
with open(filePath, "r") as f:
outText = f.read()
self.assertIn("blocks:", outText)
self.assertIn("shape: Circle", outText)
self.assertIn("assemblies:", outText)
self.assertIn("flags: fuel test", outText)
self.assertIn("grid contents:", outText)
self.assertIn("lattice map:", outText)
before, after = outText.split("lattice map:")
self.assertGreater(len(before), 100)
self.assertGreater(len(after), 20)
self.assertIn("1 2 1 2 1 2 1", after, msg="lattice map not showing up")
self.assertNotIn("- -3", after, msg="grid contents are showing up when they shouldn't")
self.assertNotIn("readFromLatticeMap", outText)
self.assertTrue(os.path.exists(filePath))
def test_simpleReadNoLatticeMap(self):
from armi.reactor.blueprints.tests.test_blockBlueprints import FULL_BP_GRID
# Cartesian full, even/odd hybrid
gridDesign4 = self.grids["sfp even"]
_grid = gridDesign4.construct()
# test that we can correctly save this to a YAML
bp = Blueprints.load(FULL_BP_GRID)
filePath = "TestGridBlueprintsSection__test_simpleReadNoLatticeMap.log"
with open(filePath, "w") as stream:
saveToStream(stream, bp, True)
# test that the output looks valid, and includes a lattice map
with open(filePath, "r") as f:
outText = f.read()
self.assertIn("blocks:", outText)
self.assertIn("shape: Circle", outText)
self.assertIn("assemblies:", outText)
self.assertIn("flags: fuel test", outText)
self.assertIn("grid contents:", outText)
self.assertIn("lattice map:", outText)
before, after = outText.split("grid contents:")
self.assertGreater(len(before), 100)
self.assertGreater(len(after), 20)
self.assertIn("- -3", after, msg="grid contents not showing up")
self.assertNotIn("1 3 1 2 1 3 1", after, msg="lattice map showing up when it shouldn't")
self.assertNotIn("readFromLatticeMap", outText)
self.assertTrue(os.path.exists(filePath))
class TestRZTGridBlueprint(unittest.TestCase):
"""Tests for R-Z-Theta grid inputs."""
def setUp(self):
self.grids = Grids.load(RZT_BLUEPRINT)
def test_construct(self):
gridDesign = self.grids["rzt_core"]
grid = gridDesign.construct()
self.assertEqual(gridDesign.gridContents[2, 2], "assembly3_3 fuel")
self.assertEqual(
grid.indicesOfBounds(57.1428571429, 71.4285714286, 0.5542707944631219, 0.6698344789311578),
(5, 4, 0),
)
================================================
FILE: armi/reactor/blueprints/tests/test_materialModifications.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for material modifications."""
import unittest
from numpy.testing import assert_allclose
from armi import materials, settings
from armi.reactor import blueprints
from armi.reactor.blueprints.blockBlueprint import BlockBlueprint
class TestMaterialModifications(unittest.TestCase):
uZrInput = r"""
nuclide flags:
U: {burn: false, xs: true}
ZR: {burn: false, xs: true}
blocks:
fuel: &block_fuel
fuel1: &component_fuel_fuel1
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
fuel2: &component_fuel_fuel2
shape: Hexagon
material: UZr
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel]
height: [1.0]
axial mesh points: [1]
xs types: [A]
"""
b4cInput = r"""
nuclide flags:
B: {burn: false, xs: true}
C: {burn: false, xs: true}
blocks:
poison: &block_poison
poison:
shape: Hexagon
material: B4C
Tinput: 600.0
Thot: 600.0
ip: 0.0
mult: 1
op: 10.0
assemblies:
assem a: &assembly_a
specifier: IC
blocks: [*block_poison]
height: [1.0]
axial mesh points: [1]
xs types: [A]
"""
def loadUZrAssembly(self, materialModifications):
return self._loadAssembly(self.uZrInput, materialModifications, "fuel a")
@staticmethod
def _loadAssembly(bpBase: str, materialModifications: str, assem: str):
yamlString = bpBase + "\n" + materialModifications
design = blueprints.Blueprints.load(yamlString)
design._prepConstruction(settings.Settings())
return design.assemblies[assem]
def loadB4CAssembly(self, materialModifications: str):
return self._loadAssembly(self.b4cInput, materialModifications, "assem a")
def test_noMaterialModifications(self):
a = self.loadUZrAssembly("")
# mass fractions should be whatever UZr is
uzr = materials.UZr()
fuelComponent = a[0][0]
totalMass = fuelComponent.getMass()
for nucName in uzr.massFrac:
massFrac = fuelComponent.getMass(nucName) / totalMass
assert_allclose(uzr.massFrac[nucName], massFrac)
def test_u235_wt_frac_modification(self):
"""Test constructing a component where the blueprints specify a material
modification for one nuclide.
.. test:: A material modification can be applied to all the components in an assembly.
:id: T_ARMI_MAT_USER_INPUT0
:tests: R_ARMI_MAT_USER_INPUT
"""
a = self.loadUZrAssembly(
"""
material modifications:
U235_wt_frac: [0.20]
"""
)
fuelComponent = a[0][0]
u235 = fuelComponent.getMass("U235")
u = fuelComponent.getMass("U")
assert_allclose(0.20, u235 / u)
fuelComponent = a[0][1]
u235 = fuelComponent.getMass("U235")
u = fuelComponent.getMass("U")
assert_allclose(0.20, u235 / u)
def test_u235_wt_frac_byComponent_modification1(self):
"""Test constructing a component where the blueprints specify a material
modification for one nuclide, for just one component.
.. test:: A material modification can be applied to one component in an assembly.
:id: T_ARMI_MAT_USER_INPUT1
:tests: R_ARMI_MAT_USER_INPUT
"""
a = self.loadUZrAssembly(
"""
material modifications:
by component:
fuel1:
U235_wt_frac: [0.20]
U235_wt_frac: [0.30]
"""
)
fuelComponent = a[0][0]
u235 = fuelComponent.getMass("U235")
u = fuelComponent.getMass("U")
assert_allclose(0.20, u235 / u)
fuelComponent = a[0][1]
u235 = fuelComponent.getMass("U235")
u = fuelComponent.getMass("U")
assert_allclose(0.30, u235 / u)
def test_u235_wt_frac_byComponent_modification2(self):
"""Test constructing a component where the blueprints specify a material
modification for one nuclide, for multiple components.
.. test:: A material modification can be applied to multiple components in an assembly.
:id: T_ARMI_MAT_USER_INPUT2
:tests: R_ARMI_MAT_USER_INPUT
"""
a = self.loadUZrAssembly(
"""
material modifications:
by component:
fuel1:
U235_wt_frac: [0.20]
fuel2:
U235_wt_frac: [0.50]
U235_wt_frac: [0.30]
"""
)
fuelComponent = a[0][0]
u235 = fuelComponent.getMass("U235")
u = fuelComponent.getMass("U")
assert_allclose(0.20, u235 / u)
fuelComponent = a[0][1]
u235 = fuelComponent.getMass("U235")
u = fuelComponent.getMass("U")
assert_allclose(0.50, u235 / u)
def test_materialModificationLength(self):
"""If the wrong number of material modifications are defined, there is an error."""
with self.assertRaises(ValueError):
_a = self.loadUZrAssembly(
"""
material modifications:
by component:
fuel1:
U235_wt_frac: [0.2]
U235_wt_frac: [0.11, 0.22, 0.33, 0.44]
"""
)
def test_invalidComponentModification(self):
with self.assertRaises(ValueError):
_a = self.loadUZrAssembly(
"""
material modifications:
by component:
invalid component:
U235_wt_frac: [0.2]
"""
)
def test_zrWtFracModification(self):
a = self.loadUZrAssembly(
"""
material modifications:
ZR_wt_frac: [0.077]
"""
)
fuelComponent = a[0][0]
totalMass = fuelComponent.getMass()
zr = fuelComponent.getMass("ZR")
assert_allclose(0.077, zr / totalMass)
def test_bothU235ZrWtFracModification(self):
a = self.loadUZrAssembly(
"""
material modifications:
ZR_wt_frac: [0.077]
U235_wt_frac: [0.20]
"""
)
fuelComponent = a[0][0]
# check u235 enrichment
u235 = fuelComponent.getMass("U235")
u = fuelComponent.getMass("U")
assert_allclose(0.20, u235 / u)
# check zr frac
totalMass = fuelComponent.getMass()
zr = fuelComponent.getMass("ZR")
assert_allclose(0.077, zr / totalMass)
def test_checkByComponentMaterialInput(self):
a = self.loadUZrAssembly("")
materialInput = {"fake_material": {"ZR_wt_frac": 0.5}}
with self.assertRaises(ValueError):
BlockBlueprint._checkByComponentMaterialInput(a, materialInput)
def test_filterMaterialInput(self):
a = self.loadUZrAssembly("")
materialInput = {
"byBlock": {"ZR_wt_frac": 0.1, "U235_wt_frac": 0.1},
"fuel1": {"U235_wt_frac": 0.2},
"fuel2": {"ZR_wt_frac": 0.3, "U235_wt_frac": 0.3},
}
componentDesign = a[0][0]
filteredMaterialInput, _ = BlockBlueprint._filterMaterialInput(materialInput, componentDesign)
filteredMaterialInput_reference = {"ZR_wt_frac": 0.1, "U235_wt_frac": 0.2}
self.assertEqual(filteredMaterialInput, filteredMaterialInput_reference)
def test_invalidMatModName(self):
"""
This test shows that we can detect invalid material modification
names when they are specified on an assembly blueprint. We happen to know
that ZR_wt_frac is a valid modification for the UZr material class, so we
use that in the first call to prove that things initially work fine.
"""
a = self.loadUZrAssembly(
"""
material modifications:
ZR_wt_frac: [1]
by component:
fuel2:
ZR_wt_frac: [0]
"""
)
# just to prove that the above works fine before we modify it
self.assertAlmostEqual(a[0][0].getMassFrac("ZR"), 1)
self.assertAlmostEqual(a[0][1].getMassFrac("ZR"), 0)
with self.assertRaises(ValueError):
a = self.loadUZrAssembly(
"""
material modifications:
this_is_a_fake_name: [1]
by component:
fuel2:
ZR_wt_frac: [0]
"""
)
with self.assertRaises(ValueError):
a = self.loadUZrAssembly(
"""
material modifications:
ZR_wt_frac: [1]
by component:
fuel2:
this_is_a_fake_name: [0]
"""
)
def test_invalidMatModType(self):
"""
This test shows that we can detect material modifications that are invalid
because of their values, not just their names.
We happen to know that ZR_wt_frac is a valid modification for UZr, so we
use that in the first call to prove that things initially work fine.
"""
a = self.loadUZrAssembly(
"""
material modifications:
ZR_wt_frac: [1]
"""
)
# just to prove that the above works fine before we modify it
self.assertAlmostEqual(a[0][0].getMassFrac("ZR"), 1)
with self.assertRaises(ValueError) as ee:
a = self.loadUZrAssembly(
"""
material modifications:
ZR_wt_frac: [this_is_a_value_of_incompatible_type]
"""
)
self.assertIn(
"Something went wrong in applying the material modifications",
ee.args[0],
)
def test_matModsUpTheMRO(self):
"""
Make sure that valid/invalid material modifications are searched up
the MRO for a material class.
"""
_a = self.loadUZrAssembly(
"""
material modifications:
ZR_wt_frac: [1]
class1_wt_frac: [1]
class1_custom_isotopics: [dummy]
class2_custom_isotopics: [dummy]
by component:
fuel2:
ZR_wt_frac: [0]
class1_wt_frac: [1]
class1_custom_isotopics: [dummy]
class2_custom_isotopics: [dummy]
custom isotopics:
dummy:
input format: mass fractions
density: 1
U: 1
"""
)
with self.assertRaises(ValueError):
_a = self.loadUZrAssembly(
"""
material modifications:
ZR_wt_frac: [1]
klass1_wt_frac: [1]
klass1_custom_isotopics: [dummy]
klass2_custom_isotopics: [dummy]
by component:
fuel2:
ZR_wt_frac: [0]
klass1_wt_frac: [1]
klass1_custom_isotopics: [dummy]
klass2_custom_isotopics: [dummy]
custom isotopics:
dummy:
input format: mass fractions
density: 1
U: 1
"""
)
def test_theoreticalDensity(self):
"""Test the theoretical density can be loaded from material modifications."""
mods = """
material modifications:
TD_frac: [0.5]
"""
a = self.loadB4CAssembly(mods)
comp = a[0][0]
mat = comp.material
self.assertEqual(mat.getTD(), 0.5)
self.assertEqual(comp.p.theoreticalDensityFrac, 0.5)
================================================
FILE: armi/reactor/blueprints/tests/test_reactorBlueprints.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for reactor blueprints."""
import logging
import os
import unittest
from armi import runLog, settings
from armi.reactor import blueprints, reactors
from armi.reactor.blueprints import gridBlueprint, reactorBlueprint
from armi.reactor.blueprints.tests import test_customIsotopics
from armi.reactor.composites import Composite
from armi.reactor.excoreStructure import ExcoreStructure
from armi.reactor.reactors import Core, loadFromCs
from armi.reactor.spentFuelPool import SpentFuelPool
from armi.settings.caseSettings import Settings
from armi.testing import TESTING_ROOT
from armi.tests import mockRunLogs
CORE_BLUEPRINT = """
core:
grid name: core
origin:
x: 0.0
y: 10.1
z: 1.1
sfp:
type: sfp
grid name: sfp
origin:
x: 0.0
y: 12.1
z: 1.1
evst:
type: excore
grid name: evst
origin:
x: 0.0
y: 100.0
z: 0.0
"""
GRIDS = """
core:
geom: hex
symmetry: third core periodic
grid contents:
[0, 0]: IC
[1, 1]: IC
orientationBOL:
[1, 1]: 60.0
[3, 2]: 120.0
sfp:
lattice pitch:
x: 25.0
y: 25.0
geom: cartesian
symmetry: full
lattice map: |
IC IC
IC IC
orientationBOL:
[0, 0]: 60.0
[0, -1]: 120.0
evst:
lattice pitch:
x: 32.0
y: 32.0
geom: cartesian
symmetry: full
lattice map: |
IC IC
IC IC
"""
SMALL_YAML = """
systems:
core:
grid name: core
origin:
x: 0.0
y: 0.0
z: 0.0
sfp:
type: sfp
grid name: sfp
origin:
x: 1000.0
y: 1000.0
z: 1000.0
evst:
type: excore
grid name: evst
origin:
x: 2000.0
y: 2000.0
z: 2000.0
grids:
core:
geom: hex
symmetry: third core periodic
grid contents:
[0, 0]: IC
[1, 1]: IC
sfp:
lattice pitch:
x: 25.0
y: 25.0
geom: cartesian
symmetry: full
lattice map: |
IC IC
IC IC
evst:
lattice pitch:
x: 32.0
y: 32.0
geom: hex
symmetry: full
"""
class TestReactorBlueprints(unittest.TestCase):
"""Tests for reactor blueprints."""
def setUp(self):
# add testMethodName to avoid I/O collisions during parallel testing
self.systemDesigns = reactorBlueprint.Systems.load(CORE_BLUEPRINT)
self.gridDesigns = gridBlueprint.Grids.load(GRIDS)
def test_simpleRead(self):
self.assertAlmostEqual(self.systemDesigns["core"].origin.y, 10.1)
self.assertAlmostEqual(self.systemDesigns["sfp"].origin.y, 12.1)
self.assertAlmostEqual(self.systemDesigns["evst"].origin.y, 100)
def _setupReactor(self):
fnames = [self._testMethodName + n for n in ["geometry.yaml", "sfp-geom.yaml"]]
for fn in fnames:
with open(fn, "w") as f:
f.write(SMALL_YAML)
cs = settings.Settings()
bp = blueprints.Blueprints.load(test_customIsotopics.TestCustomIsotopics.yamlString)
bp.systemDesigns = self.systemDesigns
bp.gridDesigns = self.gridDesigns
reactor = reactors.Reactor(cs.caseTitle, bp)
core = bp.systemDesigns["core"].construct(cs, bp, reactor)
sfp = bp.systemDesigns["sfp"].construct(cs, bp, reactor)
evst = bp.systemDesigns["evst"].construct(cs, bp, reactor)
for fn in fnames:
os.remove(fn)
return core, sfp, evst
def test_construct(self):
"""Actually construct some reactor systems.
.. test:: Create core and spent fuel pool with blueprint.
:id: T_ARMI_BP_SYSTEMS
:tests: R_ARMI_BP_SYSTEMS
.. test:: Create core object with blueprint.
:id: T_ARMI_BP_CORE
:tests: R_ARMI_BP_CORE
"""
core, sfp, evst = self._setupReactor()
self.assertEqual(len(core), 2)
self.assertEqual(len(sfp), 4)
self.assertEqual(len(evst), 4)
self.assertIsInstance(core, Core)
self.assertIsInstance(sfp, SpentFuelPool)
self.assertIsInstance(evst, ExcoreStructure)
def test_materialDataSummary(self):
"""Test that the material data summary for the core is valid as a printout to the stdout."""
expectedMaterialData = [
("Custom", "ARMI"),
("HT9", "ARMI"),
("Sodium", "ARMI"),
("UZr", "ARMI"),
]
core, _sfp, _evst = self._setupReactor()
materialData = reactorBlueprint.summarizeMaterialData(core)
for actual, expected in zip(materialData, expectedMaterialData):
self.assertEqual(actual, expected)
def test_excoreStructure(self):
_core, _sfp, evst = self._setupReactor()
self.assertIsInstance(evst, ExcoreStructure)
self.assertEqual(evst.parent.__class__.__name__, "Reactor")
self.assertEqual(evst.spatialGrid.__class__.__name__, "CartesianGrid")
# add one composite object and validate
comp1 = Composite("thing1")
loc = evst.spatialGrid[(0, 0, 0)]
self.assertEqual(len(evst.getChildren()), 4)
evst.add(comp1, loc)
self.assertEqual(len(evst.getChildren()), 5)
def test_spentFuelPool(self):
_core, sfp, evst = self._setupReactor()
self.assertIsInstance(sfp, SpentFuelPool)
self.assertEqual(sfp.parent.__class__.__name__, "Reactor")
self.assertEqual(sfp.spatialGrid.__class__.__name__, "CartesianGrid")
self.assertEqual(sfp.numColumns, 2)
# add one assembly and validate
self.assertEqual(len(sfp.getChildren()), 4)
sfp.add(evst.getChildren()[0])
self.assertEqual(len(sfp.getChildren()), 5)
def test_orientationBOL(self):
core, sfp, _evst = self._setupReactor()
# test for hex core
a0 = core.getAssembly(locationString="001-001")
self.assertAlmostEqual(a0.p.orientation[2], 60.0, delta=1e-9)
a1 = core.getAssembly(locationString="003-002")
self.assertAlmostEqual(a1.p.orientation[2], 120.0, delta=1e-9)
# test cartesian, non-core
a0 = sfp.getAssembly("A0005")
self.assertAlmostEqual(a0.p.orientation[2], 60.0, delta=1e-9)
a1 = sfp.getAssembly("A0003")
self.assertAlmostEqual(a1.p.orientation[2], 120.0, delta=1e-9)
def test_fullCoreAreNotConverted(self):
"""Prove that geometries aren't being converted when reading in a full-core BP."""
cs = Settings(os.path.join(TESTING_ROOT, "reactors", "smallHexReactor", "smallHexReactor.yaml"))
runLog.setVerbosity(logging.INFO)
with mockRunLogs.BufferLog() as log:
self.assertEqual("", log.getStdout())
r = loadFromCs(cs)
# ensure that, for full core, only the correct parts of the geom modification are hit
self.assertIn("Applying Geometry Modifications", log.getStdout())
self.assertIn("Updating spatial grid", log.getStdout())
self.assertNotIn("Applying non-full core", log.getStdout())
a = r.core.getAssemblyWithStringLocation("003-012")
self.assertIn("fuel assembly", str(a).lower())
b = a[2]
self.assertIn("fuel", str(b).lower())
self.assertEqual(b.p.molesHmBOL, b.getHMMoles())
================================================
FILE: armi/reactor/components/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Components package contains components and shapes.
These objects hold the dimensions, temperatures, composition, and shape of reactor primitives.
.. _component-class-diagram:
.. pyreverse:: armi.reactor.components -A -k --ignore=componentParameters.py
:align: center
:alt: Component class diagram
:width: 100%
Class inheritance diagram for :py:mod:`armi.reactor.components`.
"""
# ruff: noqa: F405, I001
import math
import numpy as np
from armi import runLog
from armi.reactor.components.component import * # noqa: F403
from armi.reactor.components.basicShapes import * # noqa: F403
from armi.reactor.components.complexShapes import * # noqa: F403
from armi.reactor.components.volumetricShapes import * # noqa: F403
def factory(shape, bcomps, kwargs):
"""
Build a new component object.
Parameters
----------
shape : str
lowercase string corresponding to the component type name
bcomps : list(Component)
list of "sibling" components. This list is used to find component links, which are of the form
``. d = sqrt(4*area/pi)
return math.sqrt(4.0 * self.getComponentArea() / math.pi)
def computeVolume(self):
"""Cannot compute volume until it is derived.
.. impl:: The volume of a DerivedShape depends on the solid shapes surrounding
them.
:id: I_ARMI_COMP_FLUID0
:implements: R_ARMI_COMP_FLUID
Computing the volume of a ``DerivedShape`` means looking at the solid
materials around it, and finding what shaped space is left over in between
them. This method calls the method ``_deriveVolumeAndArea``, which makes
use of the fact that the ARMI reactor data model is hierarchical. It starts
by finding the parent of this object, and then finding the volume of all
the other objects at this level. Whatever is left over, is the volume of
this object. Obviously, you can only have one ``DerivedShape`` child of any
parent for this logic to work.
"""
return self._deriveVolumeAndArea()
def getMaxVolume(self):
"""
The maximum volume of the parent Block.
Returns
-------
vol : float
volume in cm^3.
"""
return self.parent.getMaxArea() * self.parent.getHeight()
def _deriveVolumeAndArea(self):
"""
Derive the volume and area of a ``DerivedShape``.
Notes
-----
If a parent exists, this will iterate over it and then determine both the volume and area
based on its context within the scope of the parent object by considering the volumes and
areas of the surrounding components.
Since some components are volumetric shapes, this must consider the volume so that it wraps
around in all three dimensions.
But there are also situations where we need to handle zero-height blocks with purely 2D
components. Thus we track area and volume fractions here when possible.
"""
if self.parent is None:
raise ValueError(f"Cannot compute volume/area of {self} without a parent object.")
# Determine the volume/areas of the non-derived shape components within the parent.
siblingVolume = 0.0
siblingArea = 0.0
for sibling in self.parent:
if sibling is self:
continue
elif not self and isinstance(sibling, DerivedShape):
raise ValueError(f"More than one ``DerivedShape`` component in {self.parent} is not allowed.")
siblingVolume += sibling.getVolume()
try:
if siblingArea is not None:
siblingArea += sibling.getArea()
except Exception:
siblingArea = None
remainingVolume = self.getMaxVolume() - siblingVolume
if siblingArea:
remainingArea = self.parent.getMaxArea() - siblingArea
# Check for negative
if remainingVolume < 0:
msg = (
f"The component areas in {self.parent} exceed the maximum "
"allowable volume based on the geometry. Check that the "
"geometry is defined correctly.\n"
f"Maximum allowable volume: {self.getMaxVolume()} "
f"cm^3\nVolume of all non-derived shape components: {siblingVolume} cm^3\n"
)
runLog.error(msg)
raise ValueError(f"Negative area/volume errors occurred for {self.parent}. Check log for errors.")
height = self.parent.getHeight()
if not height:
# special handling for 0-height blocks
if not remainingArea:
raise ValueError(f"Cannot derive area in 0-height block {self.parent}")
self.p.area = remainingArea
else:
self.p.area = remainingVolume / height
return remainingVolume
def getVolume(self):
"""
Get volume of derived shape.
The DerivedShape must pay attention to all of the companion objects, because if
they change, this changes. However it's inefficient to always recompute the
derived volume, so we have to rely on the parent to know if anything has changed.
Since each parent is only allowed one DerivedShape, we can reset the update flag
here.
Returns
-------
float
volume of component in cm^3.
"""
if self.parent.derivedMustUpdate:
# tell _updateVolume to update it during the below getVolume call
self.p.volume = None
self.parent.derivedMustUpdate = False
vol = UnshapedComponent.getVolume(self)
return vol
def getComponentArea(self, cold=False, Tc=None):
"""
Get the area of this component in cm^2.
Parameters
----------
cold : bool, optional
If True, compute the area with as-input dimensions, instead of thermally-expanded.
Tc : float, optional
Temperature in C to compute the area at
"""
if cold and Tc is not None:
raise ValueError(f"Cannot compute component area at {Tc} and cold dimensions simultaneously.")
if cold:
# At cold temp, the DerivedShape has the area of the parent minus the other siblings
parentArea = self.parent.getMaxArea()
# NOTE: Here we assume there is one-and-only-one DerivedShape in each Component
siblings = sum([c.getArea(cold=True) for c in self.parent if not isinstance(c, DerivedShape)])
return parentArea - siblings
if Tc is not None:
# The DerivedShape has the area of the parent minus the other siblings
parentArea = self.parent.getMaxArea()
# NOTE: Here we assume there is one-and-only-one DerivedShape in each Component
siblings = sum([c.getArea(Tc=Tc) for c in self.parent if not isinstance(c, DerivedShape)])
return parentArea - siblings
if self.parent.derivedMustUpdate:
self.computeVolume()
return self.p.area
================================================
FILE: armi/reactor/components/basicShapes.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Components represented by basic shapes.
Many reactor components can be described in 2D by circles, hexagons, rectangles, etc. These
are defined in this subpackage.
"""
import math
from armi.reactor.components import ShapedComponent, componentParameters
class Circle(ShapedComponent):
"""A Circle.
.. impl:: Circle shaped Component
:id: I_ARMI_COMP_SHAPES0
:implements: R_ARMI_COMP_SHAPES
This class provides the implementation of a Circle Component. This includes
setting key parameters such as its material, temperature, and dimensions. It
also includes a method to retrieve the area of a Circle
Component via the ``getComponentArea`` method.
"""
is3D = False
THERMAL_EXPANSION_DIMS = {"od", "id"}
pDefs = componentParameters.getCircleParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
od,
id=0.0,
mult=1.0,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(components, od=od, id=id, mult=mult, modArea=modArea)
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
return max(self.getDimension("id", Tc, cold), self.getDimension("od", Tc, cold))
def getCircleInnerDiameter(self, Tc=None, cold=False):
return min(self.getDimension("id", Tc, cold), self.getDimension("od", Tc, cold))
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area for the circle component in cm^2."""
idiam = self.getDimension("id", cold=cold, Tc=Tc)
od = self.getDimension("od", cold=cold, Tc=Tc)
mult = self.getDimension("mult", cold=cold, Tc=Tc)
area = math.pi * (od**2 - idiam**2) / 4.0
area *= mult
return area
def isEncapsulatedBy(self, other):
"""Return True if this ring lies completely inside the argument component."""
otherID, otherOD = other.getDimension("id"), other.getDimension("od")
myID, myOD = self.getDimension("id"), self.getDimension("od")
return otherID <= myID < otherOD and otherID < myOD <= otherOD
class Hexagon(ShapedComponent):
"""A Hexagon.
This hexagonal shape has a hexagonal hole cut out of the center of it. By default, that inner
hole has a diameter of zero, making this a solid object with no hole.
.. impl:: Hexagon shaped Component
:id: I_ARMI_COMP_SHAPES1
:implements: R_ARMI_COMP_SHAPES
This class provides the implementation of a hexagonal Component. This includes setting key
parameters such as its material, temperature, and dimensions. It also includes methods for
retrieving geometric dimension information unique to hexagons such as the ``getPitchData``
method.
"""
is3D = False
pDefs = componentParameters.getHexagonParameterDefinitions()
THERMAL_EXPANSION_DIMS = {"ip", "op"}
def __init__(
self,
name,
material,
Tinput,
Thot,
op,
ip=0.0,
mult=1.0,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(components, op=op, ip=ip, mult=mult, modArea=modArea)
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
sideLength = self.getDimension("op", Tc, cold) / math.sqrt(3)
return 2.0 * sideLength
def getCircleInnerDiameter(self, Tc=None, cold=False):
sideLength = self.getDimension("ip", Tc, cold) / math.sqrt(3)
return 2.0 * sideLength
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area for the hexagon component in cm^2."""
op = self.getDimension("op", cold=cold, Tc=Tc)
ip = self.getDimension("ip", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
area = math.sqrt(3.0) / 2.0 * (op**2 - ip**2)
area *= mult
return area
def getPitchData(self):
"""
Return the pitch data that should be used to determine block pitch.
Notes
-----
This pitch data should only be used if this is the pitch defining component in
a block. The block is responsible for determining which component in it is the
pitch defining component.
"""
return self.getDimension("op")
class Rectangle(ShapedComponent):
"""A Rectangle.
.. impl:: Rectangle shaped Component
:id: I_ARMI_COMP_SHAPES2
:implements: R_ARMI_COMP_SHAPES
This class provides the implementation for a rectangular Component. This
includes setting key parameters such as its material, temperature, and
dimensions. It also includes methods for computing geometric
information related to rectangles, such as the
``getBoundingCircleOuterDiameter`` and ``getPitchData`` methods.
"""
is3D = False
THERMAL_EXPANSION_DIMS = {"lengthInner", "lengthOuter", "widthInner", "widthOuter"}
pDefs = componentParameters.getRectangleParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
lengthOuter=None,
lengthInner=0.0,
widthOuter=None,
widthInner=0.0,
mult=None,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
lengthOuter=lengthOuter,
lengthInner=lengthInner,
widthOuter=widthOuter,
widthInner=widthInner,
mult=mult,
modArea=modArea,
)
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
lengthO = self.getDimension("lengthOuter", Tc, cold=cold)
widthO = self.getDimension("widthOuter", Tc, cold=cold)
return math.sqrt(widthO**2 + lengthO**2)
def getCircleInnerDiameter(self, Tc=None, cold=False):
lengthI = self.getDimension("lengthInner", Tc, cold=cold)
widthI = self.getDimension("widthInner", Tc, cold=cold)
return math.sqrt(widthI**2 + lengthI**2)
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area of the rectangle in cm^2."""
lengthO = self.getDimension("lengthOuter", cold=cold, Tc=Tc)
widthO = self.getDimension("widthOuter", cold=cold, Tc=Tc)
lengthI = self.getDimension("lengthInner", cold=cold, Tc=Tc)
widthI = self.getDimension("widthInner", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
area = mult * (lengthO * widthO - lengthI * widthI)
return area
def isLatticeComponent(self):
"""Return true if the component is a `lattice component` containing void material and zero area."""
return self.containsVoidMaterial() and self.getArea() == 0.0
def getPitchData(self):
"""
Return the pitch data that should be used to determine block pitch.
Notes
-----
For rectangular components there are two pitches, one for each dimension.
This pitch data should only be used if this is the pitch defining component in
a block. The block is responsible for determining which component in it is the
pitch defining component.
"""
return (self.getDimension("lengthOuter"), self.getDimension("widthOuter"))
class SolidRectangle(Rectangle):
"""Solid rectangle component."""
is3D = False
THERMAL_EXPANSION_DIMS = {"lengthOuter", "widthOuter"}
def __init__(
self,
name,
material,
Tinput,
Thot,
lengthOuter=None,
widthOuter=None,
mult=None,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
lengthOuter=lengthOuter,
widthOuter=widthOuter,
mult=mult,
modArea=modArea,
)
# these need to be set so that we don't try to write NoDefaults to the database.
# Ultimately, it makes more sense to have the non-Solid Rectangle inherit from
# this (and probably be called a HollowRectangle or RectangularShell or
# whatever), since a solid rectangle is more generic of the two. Then the
# Parameter definitions for the hollow rectangle could inherit from the ones,
# adding the inner dimensions so that we wouldn't need to do this here.
self.p.lengthInner = 0
self.p.widthInner = 0
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area of the solid rectangle in cm^2."""
lengthO = self.getDimension("lengthOuter", cold=cold, Tc=Tc)
widthO = self.getDimension("widthOuter", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
area = mult * (lengthO * widthO)
return area
class Square(Rectangle):
"""Square component that can be solid or hollow.
.. impl:: Square shaped Component
:id: I_ARMI_COMP_SHAPES3
:implements: R_ARMI_COMP_SHAPES
This class provides the implementation for a square Component. This class
subclasses the ``Rectangle`` class because a square is a type of rectangle.
This includes setting key parameters such as its material, temperature, and
dimensions.
"""
is3D = False
def __init__(
self,
name,
material,
Tinput,
Thot,
widthOuter=None,
widthInner=0.0,
mult=None,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
lengthOuter=widthOuter,
widthOuter=widthOuter,
widthInner=widthInner,
lengthInner=widthInner,
mult=mult,
modArea=modArea,
)
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area of the square in cm^2."""
widthO = self.getDimension("widthOuter", cold=cold, Tc=Tc)
widthI = self.getDimension("widthInner", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
area = mult * (widthO * widthO - widthI * widthI)
return area
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
widthO = self.getDimension("widthOuter", Tc, cold=cold)
return math.sqrt(widthO**2 + widthO**2)
def getCircleInnerDiameter(self, Tc=None, cold=False):
widthI = self.getDimension("widthInner", Tc, cold=cold)
return math.sqrt(widthI**2 + widthI**2)
def getPitchData(self):
"""
Return the pitch data that should be used to determine block pitch.
Notes
-----
For rectangular components there are two pitches, one for each dimension.
This pitch data should only be used if this is the pitch defining component in
a block. The block is responsible for determining which component in it is the
pitch defining component.
"""
# both dimensions are the same for a square.
return (self.getDimension("widthOuter"), self.getDimension("widthOuter"))
class Triangle(ShapedComponent):
"""
Triangle with defined base and height.
.. impl:: Triangle shaped Component
:id: I_ARMI_COMP_SHAPES4
:implements: R_ARMI_COMP_SHAPES
This class provides the implementation for defining a triangular Component. This
includes setting key parameters such as its material, temperature, and
dimensions. It also includes providing a method for retrieving the area of a
Triangle Component via the ``getComponentArea`` method.
Notes
-----
The exact angles of the triangle are undefined. The exact side lengths and angles
are not critical to calculation of component area, so area can still be calculated.
"""
is3D = False
THERMAL_EXPANSION_DIMS = {"base", "height"}
pDefs = componentParameters.getTriangleParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
base=None,
height=None,
mult=None,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(components, base=base, height=height, mult=mult, modArea=modArea)
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area of the triangle in cm^2."""
base = self.getDimension("base", cold=cold, Tc=Tc)
height = self.getDimension("height", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
area = mult * base * height / 2.0
return area
================================================
FILE: armi/reactor/components/complexShapes.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Components represented by complex shapes, and typically less widely used."""
import math
from armi.reactor.components import ShapedComponent, basicShapes, componentParameters
class HoledHexagon(basicShapes.Hexagon):
"""Hexagon with n uniform circular holes hollowed out of it.
.. impl:: Holed hexagon shaped Component
:id: I_ARMI_COMP_SHAPES5
:implements: R_ARMI_COMP_SHAPES
This class provides an implementation for a holed hexagonal Component. This includes setting
key parameters such as its material, temperature, and dimensions. It also provides the
capability to retrieve the diameter of the inner hole via the ``getCircleInnerDiameter``
method.
"""
THERMAL_EXPANSION_DIMS = {"op", "holeOD", "holeRadFromCenter"}
pDefs = componentParameters.getHoledHexagonParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
op,
holeOD,
nHoles,
holeRadFromCenter=0.0,
mult=1.0,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
op=op,
holeOD=holeOD,
nHoles=nHoles,
holeRadFromCenter=holeRadFromCenter,
mult=mult,
modArea=modArea,
)
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area for the hexagon with n number of circular holes in cm^2."""
op = self.getDimension("op", cold=cold, Tc=Tc)
holeOD = self.getDimension("holeOD", cold=cold, Tc=Tc)
nHoles = self.getDimension("nHoles", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
hexArea = math.sqrt(3.0) / 2.0 * (op**2)
circularArea = nHoles * math.pi * ((holeOD / 2.0) ** 2)
area = mult * (hexArea - circularArea)
return area
def getCircleInnerDiameter(self, Tc=None, cold=False):
"""
For the special case of only one single hole, returns the diameter of that hole.
For any other case, returns 0.0 because an "circle inner diameter" becomes undefined.
"""
if self.getDimension("nHoles") == 1:
return self.getDimension("holeOD", Tc, cold)
else:
return 0.0
class HexHoledCircle(basicShapes.Circle):
"""Circle with a single uniform hexagonal hole hollowed out of it."""
THERMAL_EXPANSION_DIMS = {"od", "holeOP"}
pDefs = componentParameters.getHexHoledCircleParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
od,
holeOP,
mult=1.0,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(components, od=od, holeOP=holeOP, mult=mult, modArea=modArea)
def getComponentArea(self, cold=False, Tc=None):
r"""Computes the area for the circle with one hexagonal hole."""
od = self.getDimension("od", cold=cold, Tc=Tc)
holeOP = self.getDimension("holeOP", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
hexArea = math.sqrt(3.0) / 2.0 * (holeOP**2)
circularArea = math.pi * ((od / 2.0) ** 2)
area = mult * (circularArea - hexArea)
return area
def getCircleInnerDiameter(self, Tc=None, cold=False):
"""Returns the diameter of the hole equal to the hexagon outer pitch."""
return self.getDimension("holeOP", Tc, cold)
class FilletedHexagon(basicShapes.Hexagon):
"""
A hexagon with a hexagonal hole cut out of the center of it, where the corners of both the
outer and inner hexagons are rounded, with independent radii of curvature.
By default, the inner hole has a diameter of zero, making this a solid object with no hole.
"""
THERMAL_EXPANSION_DIMS = {"iR", "oR", "ip", "op"}
pDefs = componentParameters.getFilletedHexagonParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
op,
ip=0.0,
iR=0.0,
oR=0.0,
mult=1.0,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(components, op=op, ip=ip, iR=iR, oR=oR, mult=mult, modArea=modArea)
@staticmethod
def _area(D, r):
"""Helper function, to calculate the area of a hexagon with rounded corners."""
if D <= 0.0:
return 0.0
area = 1.0 - (1.0 - (math.pi / (2.0 * math.sqrt(3)))) * (2 * r / D) ** 2
area *= (math.sqrt(3.0) / 2.0) * D**2
return area
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area for the rounded hexagon component in cm^2."""
op = self.getDimension("op", cold=cold, Tc=Tc)
ip = self.getDimension("ip", cold=cold, Tc=Tc)
oR = self.getDimension("oR", cold=cold, Tc=Tc)
iR = self.getDimension("iR", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
area = self._area(op, oR) - self._area(ip, iR)
area *= mult
return area
class HoledRectangle(basicShapes.Rectangle):
"""Rectangle with one circular hole in it."""
THERMAL_EXPANSION_DIMS = {"lengthOuter", "widthOuter", "holeOD"}
pDefs = componentParameters.getHoledRectangleParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
holeOD,
lengthOuter=None,
widthOuter=None,
mult=1.0,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
lengthOuter=lengthOuter,
widthOuter=widthOuter,
holeOD=holeOD,
mult=mult,
modArea=modArea,
)
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area (in cm^2) for the the rectangle with one hole in it."""
length = self.getDimension("lengthOuter", cold=cold, Tc=Tc)
width = self.getDimension("widthOuter", cold=cold, Tc=Tc)
rectangleArea = length * width
holeOD = self.getDimension("holeOD", cold=cold, Tc=Tc)
circularArea = math.pi * ((holeOD / 2.0) ** 2)
mult = self.getDimension("mult")
area = mult * (rectangleArea - circularArea)
return area
def getCircleInnerDiameter(self, Tc=None, cold=False):
"""Returns the ``holeOD``."""
return self.getDimension("holeOD", Tc, cold)
class HoledSquare(basicShapes.Square):
"""Square with one circular hole in it.
.. impl:: Holed square shaped Component
:id: I_ARMI_COMP_SHAPES6
:implements: R_ARMI_COMP_SHAPES
This class provides an implementation for a holed square Component. This includes setting
key parameters such as its material, temperature, and dimensions. It also includes methods
to retrieve geometric dimension information unique to holed squares via the
``getComponentArea`` and ``getCircleInnerDiameter`` methods.
"""
THERMAL_EXPANSION_DIMS = {"widthOuter", "holeOD"}
pDefs = componentParameters.getHoledRectangleParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
holeOD,
widthOuter=None,
mult=1.0,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(components, widthOuter=widthOuter, holeOD=holeOD, mult=mult, modArea=modArea)
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area (in cm^2) for the the square with one hole in it."""
width = self.getDimension("widthOuter", cold=cold, Tc=Tc)
rectangleArea = width**2
holeOD = self.getDimension("holeOD", cold=cold, Tc=Tc)
circularArea = math.pi * ((holeOD / 2.0) ** 2)
mult = self.getDimension("mult")
area = mult * (rectangleArea - circularArea)
return area
def getCircleInnerDiameter(self, Tc=None, cold=False):
"""Returns the ``holeOD``."""
return self.getDimension("holeOD", Tc, cold)
class Helix(ShapedComponent):
"""A spiral wire component used to model a pin wire-wrap.
.. impl:: Helix shaped Component
:id: I_ARMI_COMP_SHAPES7
:implements: R_ARMI_COMP_SHAPES
This class provides the implementation for a helical Component. This includes setting key
parameters such as its material, temperature, and dimensions. It also includes the
``getComponentArea`` method to retrieve the area of a helix. Helixes can be used for wire
wrapping around fuel pins in fast reactor designs.
Notes
-----
http://mathworld.wolfram.com/Helix.html
In a single rotation with an axial climb of P, the length of the helix will be a factor of
2*pi*sqrt(r^2+c^2)/2*pi*c longer than vertical length L. P = 2*pi*c.
- od: outer diameter of the helix wire
- id: inner diameter of the helix wire (if non-zero, helix wire is annular.)
- axialPitch: vertical distance between wraps. Is also the axial distance required to complete a
full 2*pi rotation.
- helixDiameter: The helix diameter is the distance from the center of the wire-wrap on one side
to the center of the wire-wrap on the opposite side (can be visualized if the
axial pitch is 0.0 - creates a circle).
"""
is3D = False
THERMAL_EXPANSION_DIMS = {"od", "id", "axialPitch", "helixDiameter"}
pDefs = componentParameters.getHelixParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
od,
axialPitch,
helixDiameter,
mult=1.0,
id=0.0,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
od=od,
axialPitch=axialPitch,
mult=mult,
helixDiameter=helixDiameter,
id=id,
modArea=modArea,
)
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
"""The diameter of a circle which is encompassed by the exterior of the wire-wrap."""
return self.getDimension("helixDiameter", Tc, cold=cold) + self.getDimension("od", Tc, cold)
def getCircleInnerDiameter(self, Tc=None, cold=False):
"""The diameter of a circle which is encompassed by the interior of the wire-wrap.
This should be equal to the outer diameter of the pin in which the wire is wrapped around.
"""
return self.getDimension("helixDiameter", Tc, cold=cold) - self.getDimension("od", Tc, cold)
def getComponentArea(self, cold=False, Tc=None):
"""Computes the area for the helix in cm^2."""
ap = self.getDimension("axialPitch", cold=cold, Tc=Tc)
hd = self.getDimension("helixDiameter", cold=cold, Tc=Tc)
id = self.getDimension("id", cold=cold, Tc=Tc)
od = self.getDimension("od", cold=cold, Tc=Tc)
mult = self.getDimension("mult")
c = ap / (2.0 * math.pi)
helixFactor = math.sqrt((hd / 2.0) ** 2 + c**2) / c
area = mult * math.pi * ((od / 2.0) ** 2 - (id / 2.0) ** 2) * helixFactor
return area
================================================
FILE: armi/reactor/components/component.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Components represent geometric objects within an assembly such as fuel, bond, coolant, ducts, wires, etc.
This module contains the abstract definition of a Component.
"""
import copy
import re
from typing import Union
import numpy as np
from armi import materials, runLog
from armi.bookkeeping import report
from armi.materials import custom, material, void
from armi.reactor import composites, flags, parameters
from armi.reactor.components import componentParameters
from armi.utils import densityTools
from armi.utils.units import C_TO_K
COMPONENT_LINK_REGEX = re.compile(r"^\s*(.+?)\s*\.\s*(.+?)\s*$")
_NICE_DIM_NAMES = {
"id": "Inner Diameter (cm)",
"od": "Outer Diameter (cm)",
"ip": "Inner Pitch (cm)",
"op": "Outer Pitch (cm)",
"mult": "Multiplicity",
"axialPitch": "Axial Pitch (cm)",
"helixDiameter": "Helix Diameter (cm)",
"length": "Length (cm)",
"height": "Height (cm)",
"width": "Width (cm)",
"areaMod": "Area Mod. Factor",
}
class _DimensionLink(tuple):
"""
A linked dimension, where one component uses a dimension from another.
Useful when the boundaries are physically shared and should move together.
The tuple contains (linkedComponent, linkedDimensionName).
In equating two components, we need the linked dimensions to resolve responsibly/precisely.
"""
def getLinkedComponent(self):
"""Return the linked component."""
return self[0]
def resolveDimension(self, Tc=None, cold=False):
"""Return the current value of the linked dimension."""
linkedComponent = self[0]
dimID = self[1]
return linkedComponent.getDimension(dimID, Tc=Tc, cold=cold)
def __eq__(self, other):
otherDimension = other.resolveDimension() if isinstance(other, _DimensionLink) else other
return self.resolveDimension() == otherDimension
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
"""Return a string representation of a dimension link.
These look like ``otherComponentName.otherDimensionName``. For example, if a link were to a
``fuel`` component's ``od`` param, the link would render as ``fuel.od``.
"""
return f"{self[0].name}.{self[1]}"
class ComponentType(composites.CompositeModelType):
"""
ComponetType is a metaclass for storing and initializing Component subclass types.
The construction of Component subclasses is being done through factories for ease of user input.
As a consequence, the ``__init__`` methods' arguments need to be known in order to conform them
to the correct format. Additionally, the constructors arguments can be used to determine the
Component subclasses dimensions.
Warning
-------
The import-time metaclass-based component subclass registration was a good idea, but in practice
has caused significant confusion and trouble. We will replace this soon with an explicit
plugin-based component subclass registration system.
"""
TYPES = dict() #: :meta hide-value:
NON_DIMENSION_NAMES = (
"Tinput",
"Thot",
"isotopics",
"mergeWith",
"material",
"name",
"components",
"area",
)
def __new__(cls, name, bases, attrs):
newType = composites.CompositeModelType.__new__(cls, name, bases, attrs)
ComponentType.TYPES[name.lower()] = newType
# the co_varnames attribute contains arguments and then locals so we must
# restrict it to just the arguments.
signature = newType.__init__.__code__.co_varnames[1 : newType.__init__.__code__.co_argcount]
# INIT_SIGNATURE and DIMENSION_NAMES are in the same order as the method signature
newType.INIT_SIGNATURE = tuple(signature)
newType.DIMENSION_NAMES = tuple(k for k in newType.INIT_SIGNATURE if k not in ComponentType.NON_DIMENSION_NAMES)
return newType
class Component(composites.Composite, metaclass=ComponentType):
"""
A primitive object in a reactor that has definite area/volume, material and composition.
Could be fuel pins, cladding, duct, wire wrap, etc. One component object may represent
multiple physical components via the ``multiplicity`` mechanism.
.. impl:: Define a physical piece of a reactor.
:id: I_ARMI_COMP_DEF
:implements: R_ARMI_COMP_DEF
The primitive object in an ARMI reactor is a Component. A Component is comprised
of a shape and composition. This class serves as a base class which all
Component types within ARMI are built upon. All primitive shapes (such as a
square, circle, holed hexagon, helix etc.) are derived from this base class.
Fundamental capabilities of this class include the ability to store parameters
and attributes which describe the physical state of each Component within the
ARMI data model.
.. impl:: Order Components by their outermost diameter (using the < operator).
:id: I_ARMI_COMP_ORDER
:implements: R_ARMI_COMP_ORDER
Determining Component order by outermost diameters is implemented via
the ``__lt__()`` method, which is used to control ``sort()`` as the
standard approach in Python. However, ``__lt__()`` does not show up in the API.
Attributes
----------
temperatureInC : float
Current temperature of component in celsius.
inputTemperatureInC : float
Reference temperature in C at which dimension definitions were input
temperatureInC : float
Temperature in C to which dimensions were thermally-expanded upon input.
material : str or material.Material
The material object that makes up this component and give it its thermo-mechanical properties.
"""
DIMENSION_NAMES = tuple() # will be assigned by ComponentType
INIT_SIGNATURE = tuple() # will be assigned by ComponentType
is3D = False # flag to show that area is 2D by default
_COMP_REPORT_GROUPS = {
"intercoolant": report.INTERCOOLANT_DIMS,
"bond": report.BOND_DIMS,
"duct": report.DUCT_DIMS,
"coolant": report.COOLANT_DIMS,
"clad": report.CLAD_DIMS,
"fuel": report.FUEL_DIMS,
"wire": report.WIRE_DIMS,
"liner": report.LINER_DIMS,
"gap": report.GAP_DIMS,
}
_TOLERANCE = 1e-10
THERMAL_EXPANSION_DIMS = set()
pDefs = componentParameters.getComponentParameterDefinitions()
material: materials.Material
def __init__(
self,
name,
material,
Tinput,
Thot,
area=None,
isotopics="",
mergeWith="",
components=None,
):
if components and name in components:
raise ValueError(f"Non-unique component name {name} repeated in same block.")
composites.Composite.__init__(self, str(name))
self.p.area = area
self.inputTemperatureInC = Tinput
self.temperatureInC = Thot
self.material = None
self.setProperties(material)
self.applyMaterialMassFracsToNumberDensities() # not necessary when duplicating
self.setType(name)
self.p.mergeWith = mergeWith
self.p.customIsotopicsName = isotopics
@property
def temperatureInC(self):
"""Return the hot temperature in Celsius."""
return self.p.temperatureInC
@temperatureInC.setter
def temperatureInC(self, value):
"""Set the hot temperature in Celsius."""
self.p.temperatureInC = value
@property
def temperatureInK(self):
"""Current hot temperature in Kelvin."""
return self.temperatureInC + C_TO_K
def __lt__(self, other):
"""
True if a circle encompassing this object has a smaller diameter than one encompassing
another component.
If the bounding circles for both components have identical size, then revert to checking the
inner diameter of each component for sorting.
This allows sorting because the Python sort functions only use this method.
"""
thisOD = self.getBoundingCircleOuterDiameter(cold=True)
thatOD = other.getBoundingCircleOuterDiameter(cold=True)
try:
if thisOD == thatOD:
thisID = self.getCircleInnerDiameter(cold=True)
thatID = other.getCircleInnerDiameter(cold=True)
return thisID < thatID
else:
return thisOD < thatOD
except (NotImplementedError, Exception) as e:
if isinstance(e, NotImplementedError):
raise NotImplementedError(f"getCircleInnerDiameter not implemented for at least one of {self}, {other}")
else:
raise ValueError(
f"Components 1 ({self} with OD {thisOD}) and 2 ({other} and OD {thatOD}) cannot be ordered because "
"their bounding circle outer diameters are not comparable."
)
def __setstate__(self, state):
composites.Composite.__setstate__(self, state)
self.material.parent = self
def _linkAndStoreDimensions(self, components, **dims):
"""Link dimensions to another component."""
for key, val in dims.items():
self.setDimension(key, val)
if components:
self.resolveLinkedDims(components)
def resolveLinkedDims(self, components):
"""Convert dimension link strings to actual links.
.. impl:: The volume of some defined shapes depend on the solid components surrounding them.
:id: I_ARMI_COMP_FLUID1
:implements: R_ARMI_COMP_FLUID
Some Components are fluids and are thus defined by the shapes surrounding
them. This method cycles through each dimension defining the border of this
Component and converts the name of that Component to a link to the object
itself. This series of links is then used downstream to resolve dimensional information.
"""
for dimName in self.DIMENSION_NAMES:
value = self.p[dimName]
if not isinstance(value, str):
continue
match = COMPONENT_LINK_REGEX.search(value)
if match:
try:
name = match.group(1)
comp = components[name]
linkedKey = match.group(2)
self.p[dimName] = _DimensionLink((comp, linkedKey))
except Exception:
if value.count(".") > 1:
raise ValueError(
f"Name of {self} has a period in it. "
f"Components cannot not have periods in their names: `{value}`"
)
else:
raise KeyError(f"Bad component link `{dimName}` defined as `{value}` in {self}")
def setLink(self, key, otherComp, otherCompKey):
"""Set the dimension link."""
self.p[key] = _DimensionLink((otherComp, otherCompKey))
def setProperties(self, properties):
"""Apply thermo-mechanical properties of a Material."""
if isinstance(properties, str):
mat = materials.resolveMaterialClassByName(properties)()
# note that the material will not be expanded to natural isotopics
# here because the user-input blueprints information is not available
else:
mat = properties
self.material = mat
self.material.parent = self
self.clearLinkedCache()
def applyMaterialMassFracsToNumberDensities(self):
"""
Set the hot number densities for the component based on material mass fractions/density.
Notes
-----
- the density returned accounts for the expansion of the component
due to the difference in self.inputTemperatureInC and self.temperatureInC
- After the expansion, the density of the component should reflect the 3d
density of the material
"""
# note, that this is not the actual material density, but rather 2D expanded
# `density` is 3D density
# call getProperty to cache and improve speed
density = self.material.getProperty("pseudoDensity", Tc=self.temperatureInC)
self.p.numberDensities = densityTools.getNDensFromMasses(density, self.material.massFrac)
# Sometimes material thermal expansion depends on its parent's composition (e.g. Pu frac) so
# setting number densities can sometimes change thermal expansion behavior. Call again so
# the material has access to its parent's comp when providing the reference initial density.
densityBasedOnParentComposition = self.material.getProperty("pseudoDensity", Tc=self.temperatureInC)
self.p.nuclides, self.p.numberDensities = densityTools.getNDensFromMasses(
densityBasedOnParentComposition, self.material.massFrac
)
# material needs to be expanded from the material's cold temp to hot,
# not components cold temp, so we don't use mat.linearExpansionFactor or
# component.getThermalExpansionFactor.
# Materials don't typically define the temperature for which their references
# density is defined so linearExpansionPercent must be called
coldMatAxialExpansionFactor = 1.0 + self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100
self.changeNDensByFactor(1.0 / coldMatAxialExpansionFactor)
def adjustDensityForHeightExpansion(self, newHot):
"""
Change the densities in cases where height of the block/component is changing with expansion.
Notes
-----
Call before setTemperature since we need old hot temp. This works well if there is only 1
solid component. If there are multiple components expanding at different rates during
thermal expansion this becomes more complicated and, and axial expansion should be used.
Multiple expansion rates cannot trivially be accommodated.
"""
self.changeNDensByFactor(1.0 / self.getHeightFactor(newHot))
def getHeightFactor(self, newHot):
"""
Return the factor by which height would change by if we did 3D expansion.
Notes
-----
Call before setTemperature since we need old hot temp.
"""
return self.getThermalExpansionFactor(Tc=newHot, T0=self.temperatureInC)
def getProperties(self):
"""Return the active Material object defining thermo-mechanical properties.
.. impl:: Material properties are retrievable.
:id: I_ARMI_COMP_MAT0
:implements: R_ARMI_COMP_MAT
This method returns the material object that is assigned to the Component.
.. impl:: Components have one-and-only-one material.
:id: I_ARMI_COMP_1MAT
:implements: R_ARMI_COMP_1MAT
This method returns the material object that is assigned to the Component.
"""
return self.material
@property
def liquidPorosity(self):
return self.parent.p.liquidPorosity
@liquidPorosity.setter
def liquidPorosity(self, porosity):
self.parent.p.liquidPorosity = porosity
@property
def gasPorosity(self):
return self.parent.p.gasPorosity
@gasPorosity.setter
def gasPorosity(self, porosity):
self.parent.p.gasPorosity = porosity
def __copy__(self):
"""Duplicate a component, used for breaking fuel into separate components."""
linkedDims = self._getLinkedDimsAndValues()
newC = copy.deepcopy(self)
self._restoreLinkedDims(linkedDims)
newC._restoreLinkedDims(linkedDims)
return newC
def setLumpedFissionProducts(self, lfpCollection):
"""Sets lumped fission product collection on a lfp compatible material if possible."""
try:
self.getProperties().setLumpedFissionProducts(lfpCollection)
except AttributeError:
# This material doesn't setLumpedFissionProducts because it's a regular
# material, not a lumpedFissionProductCompatable material
pass
def getArea(self, cold=False, Tc=None):
"""
Get the area of a Component in cm^2.
.. impl:: Get a dimension of a Component.
:id: I_ARMI_COMP_VOL0
:implements: R_ARMI_COMP_VOL
This method returns the area of a Component.
See Also
--------
block.getVolumeFractions: component coolant is typically the "leftover" and is calculated and set here
"""
area = self.getComponentArea(cold=cold, Tc=Tc)
if self.p.get("modArea", None):
comp, arg = self.p.modArea
if arg == "sub":
area -= comp.getComponentArea(cold=cold, Tc=Tc)
elif arg == "add":
area += comp.getComponentArea(cold=cold, Tc=Tc)
else:
raise ValueError(f"Option {arg} does not exist")
self._checkNegativeArea(area, cold)
return area
def getVolume(self):
"""
Return the volume [cm^3] of the Component.
.. impl:: Get a dimension of a Component.
:id: I_ARMI_COMP_VOL1
:implements: R_ARMI_COMP_VOL
This method returns the volume of a Component.
Notes
-----
``self.p.volume`` is not set until this method is called, so under most circumstances it is
probably not safe to access ``self.p.volume`` directly. This is because not all components
(e.g., ``DerivedShape``) can compute their volume during initialization.
"""
if self.p.volume is None:
self._updateVolume()
if self.p.volume is None:
raise ValueError(f"{self} has undefined volume.")
return self.p.volume
def clearCache(self):
"""
Invalidate the volume so that it will be recomputed from current dimensions upon next access.
The updated value will be based on its shape and current dimensions.
If there is a parent container and that container contains a DerivedShape, then that must be
updated as well since its volume may be changing.
See Also
--------
clearLinkedCache: Clears cache of components that depend on this component's dimensions.
"""
self.p.volume = None
if self.parent:
self.parent.derivedMustUpdate = True
def _updateVolume(self):
"""Recompute and store volume."""
self.p.volume = self.computeVolume()
def computeVolume(self):
"""Compute volume."""
if not self.is3D:
volume = self.getArea() * self.parent.getHeight()
else:
volume = self.getComponentVolume()
self._checkNegativeVolume(volume)
return volume
def _checkNegativeArea(self, area, cold):
"""
Check for negative area and warn/error when appropriate.
Negative component area is allowed for Void materials (such as gaps) which may be placed
between components that will overlap during thermal expansion (such as liners and cladding
and annular fuel).
Overlapping is allowed to maintain conservation of atoms while sticking close to the
as-built geometry. Modules that need true geometries will have to handle this themselves.
"""
if np.isnan(area):
return
if area < 0.0:
if (cold and not self.containsVoidMaterial()) or self.containsSolidMaterial():
negAreaFailure = (
f"Component {self} with {self.material} has cold negative area of {area} cm^2. "
"This can be caused by component overlap with component dimension linking or by invalid inputs."
)
raise ArithmeticError(negAreaFailure)
def _checkNegativeVolume(self, volume):
"""Check for negative volume.
See Also
--------
self._checkNegativeArea
"""
if np.isnan(volume):
return
if volume < 0.0 and self.containsSolidMaterial():
negVolFailure = (
f"Component {self} with {self.material} has cold negative volume of {volume} cm^3. "
"This can be caused by component overlap with component dimension linking or by invalid inputs."
)
raise ArithmeticError(negVolFailure)
def containsVoidMaterial(self):
"""Returns True if component material is void."""
return isinstance(self.material, void.Void)
def containsSolidMaterial(self):
"""Returns True if the component material is a solid."""
return not isinstance(self.material, material.Fluid)
def getComponentArea(self, cold=False, Tc=None):
"""
Get the area of this component in cm^2.
Parameters
----------
cold : bool, optional
Compute the area with as-input dimensions instead of thermally-expanded
Tc : float, optional
Temperature to compute the area at
"""
raise NotImplementedError
def getComponentVolume(self):
return self.p.volume
def setVolume(self, val):
raise NotImplementedError
def setArea(self, val):
raise NotImplementedError
def setTemperature(self, temperatureInC):
r"""
Adjust temperature of this component.
This will cause thermal expansion or contraction of solid or liquid components and will
accordingly adjust number densities to conserve mass.
Liquids still have a number density adjustment, but some mass tends to expand in or out of
the bounding area.
Since some composites have multiple materials in them that thermally expand differently,
the axial dimension is generally left unchanged. Hence, this a 2-D thermal expansion.
Number density change is proportional to mass density change :math:`\frac{d\rho}{\rho}`.
A multiplicative factor :math:`f_N` to apply to number densities when going from T to T'
is as follows:
.. math::
N^{\prime} = N \cdot f_N \\
\frac{dN}{N} = f_N - 1
Since :math:`\frac{dN}{N} \sim\frac{d\rho}{\rho}`, we have:
.. math::
f_N = \frac{d\rho}{\rho} + 1 = \frac{\rho^{\prime}}{\rho}
"""
prevTemp, self.temperatureInC = self.temperatureInC, float(temperatureInC)
f = self.material.getThermalExpansionDensityReduction(prevTemp, self.temperatureInC)
self.changeNDensByFactor(f)
self.clearLinkedCache()
def getNuclides(self):
"""
Return nuclides in this component.
This includes anything that has been specified in here, including trace nuclides.
"""
if self.p.nuclides is None:
return []
return [nucName.decode() for nucName in self.p.nuclides]
def getNumberDensity(self, nucName):
"""
Get the number density of nucName, return zero if it does not exist here.
Parameters
----------
nucName : str
Nuclide name
Returns
-------
number density : float
number density in atoms/bn-cm.
"""
i = np.where(self.p.nuclides == nucName.encode())[0]
if i.size > 0:
return self.p.numberDensities[i[0]]
else:
return 0.0
def getNuclideNumberDensities(self, nucNames: list[str]) -> list[float]:
"""Return a list of number densities for the nuc names requested."""
if isinstance(nucNames, (list, tuple, np.ndarray)):
byteNucs = np.asanyarray(nucNames, dtype="S6")
else:
byteNucs = [nucName.encode() for nucName in nucNames]
if self.p.numberDensities is None:
return np.zeros(len(byteNucs), dtype=np.float64)
# trivial case where nucNames is the full set of nuclides in the same order
if np.array_equal(byteNucs, self.p.nuclides):
return np.array(self.p.numberDensities)
if len(byteNucs) < len(self.p.nuclides) / 10:
return self._getNumberDensitiesArray(byteNucs)
nDensDict = dict(zip(self.p.nuclides, self.p.numberDensities))
return [nDensDict.get(nuc, 0.0) for nuc in byteNucs]
def _getNumberDensitiesArray(self, byteNucs):
"""
Get number densities using direct array lookup.
When only a small subset of nuclide number densities are requested, it is
likely faster to lookup the index for each nuclide than to recreate the
entire dictionary for a lookup.
Parameters
----------
byteNucs : np.ndarray, dtype="S6"
List of nuclides for which to retrieve number densities, as encoded byte strings
"""
ndens = np.zeros(len(byteNucs), dtype=np.float64)
nuclides = self.p.nuclides
numberDensities = self.p.numberDensities
# if it's just a small subset of nuclides, use np.where for direct index lookup
for i, nuc in enumerate(byteNucs):
j = np.where(nuclides == nuc)[0]
if j.size > 0:
ndens[i] = numberDensities[j[0]]
return ndens
def _getNdensHelper(self):
nucs = self.getNuclides()
return dict(zip(nucs, self.p.numberDensities)) if len(nucs) > 0 else {}
def setName(self, name):
"""Components use name for type and name."""
composites.Composite.setName(self, name)
self.setType(name)
def setNumberDensity(self, nucName, val):
"""
Set heterogeneous number density.
.. impl:: Setting nuclide fractions.
:id: I_ARMI_COMP_NUCLIDE_FRACS0
:implements: R_ARMI_COMP_NUCLIDE_FRACS
The method allows a user or plugin to set the number density of a Component. It also
indicates to other processes that may depend on a Component's status about this change
via the ``assigned`` attribute.
Parameters
----------
nucName : str
nuclide to modify
val : float
Number density to set in atoms/bn-cm (heterogeneous)
"""
self.updateNumberDensities({nucName: val})
def setNumberDensities(self, numberDensities):
"""
Set one or more multiple number densities. Clears out any number density not listed.
.. impl:: Setting nuclide fractions.
:id: I_ARMI_COMP_NUCLIDE_FRACS1
:implements: R_ARMI_COMP_NUCLIDE_FRACS
The method allows a user or plugin to set the number densities of a Component. In
contrast to the ``setNumberDensity`` method, it sets all densities within a Component.
Parameters
----------
numberDensities : dict
nucName: ndens pairs.
Notes
-----
We don't just call setNumberDensity for each nuclide because we don't want to call
``getVolumeFractions`` for each nuclide (it's inefficient).
"""
self.updateNumberDensities(numberDensities, wipe=True)
def updateNumberDensities(self, numberDensities, wipe=False):
"""
Set one or more multiple number densities. Leaves unlisted number densities alone.
Parameters
----------
numberDensities : dict
nucName: ndens pairs.
wipe : bool, optional
Controls whether the old number densities are wiped. Any nuclide densities not provided
in numberDensities will be effectively set to 0.0.
Notes
-----
Sometimes volume/dimensions change due to number density change when the material thermal
expansion depends on the component's composition (e.g. its plutonium fraction). In this
case, changing the density will implicitly change the area/volume. Since it is difficult to
predict the new dimensions, and perturbation/depletion calculations almost exclusively
assume constant volume, the densities sent are automatically adjusted to conserve mass with
the original dimensions. That is, the component's densities are not exactly as passed, but
whatever they would need to be to preserve volume integrated number densities (moles) from
the pre-perturbed component's volume/dimensions.
This has no effect if the material thermal expansion has no dependence on component
composition. If this is not desired, `self.p.numberDensities` and `self.p.nuclides` can be
set directly.
"""
# prepare to change the densities with knowledge that dims could change due to material
# thermal expansion dependence on composition
if self.p.numberDensities is not None and self.p.numberDensities.size > 0:
dLLprev = self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100.0
materialExpansion = True
else:
dLLprev = 0.0
materialExpansion = False
try:
vol = self.getVolume()
except (AttributeError, TypeError):
# Either no parent to get height or parent's height is None. Which would be
# AttributeError and TypeError respectively, but other errors could be possible.
vol = None
area = self.getArea()
# change the densities
if wipe:
self.p.nuclides = np.asanyarray(list(numberDensities.keys()), dtype="S6")
self.p.numberDensities = np.array(list(numberDensities.values()))
else:
newNucs = []
newNumDens = []
nucs = self.p.nuclides
ndens = self.p.numberDensities
for nucName, dens in numberDensities.items():
i = np.where(nucs == nucName.encode())[0]
if i.size > 0:
ndens[i[0]] = dens
else:
newNucs.append(nucName.encode())
newNumDens.append(dens)
self.p.nuclides = np.append(nucs, newNucs)
self.p.numberDensities = np.append(ndens, newNumDens)
# check if thermal expansion changed
dLLnew = self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100.0
if dLLprev != dLLnew and materialExpansion:
# the thermal expansion changed so the volume change is happening at same time as
# density change was requested. Attempt to make mass consistent with old dims (since the
# density change was for the old volume and otherwise mass wouldn't be conserved).
self.clearLinkedCache() # enable recalculation of volume, otherwise it uses cached
if vol is not None:
factor = vol / self.getVolume()
else:
factor = area / self.getArea()
self.changeNDensByFactor(factor)
# since we are updating the object the param points to but not the param itself, we have to
# inform the param system to flag it as modified so it syncs during ``syncMpiState``.
self.p.assigned = parameters.SINCE_ANYTHING
self.p.paramDefs["numberDensities"].assigned = parameters.SINCE_ANYTHING
def changeNDensByFactor(self, factor):
"""Change the number density of all nuclides within the object by a multiplicative factor."""
if self.p.numberDensities is not None:
self.p.numberDensities *= factor
self._changeOtherDensParamsByFactor(factor)
def _changeOtherDensParamsByFactor(self, factor):
"""Change the number density of all nuclides within the object by a multiplicative factor."""
if self.p.detailedNDens is not None:
self.p.detailedNDens *= factor
# Update pinNDens
if self.p.pinNDens is not None:
self.p.pinNDens *= factor
def getEnrichment(self):
"""Get the mass enrichment of this component, as defined by the material."""
return self.getMassEnrichment()
def getMassEnrichment(self):
"""
Get the mass enrichment of this component, as defined by the material.
Notes
-----
Getting mass enrichment on any level higher than this is ambiguous because you may have
enriched boron in one pin and uranium in another and blending those doesn't make sense.
"""
if self.material.enrichedNuclide is None:
raise ValueError(f"Cannot get enrichment of {self.material} because `enrichedNuclide` is not defined.")
enrichedNuclide = self.nuclideBases.byName[self.material.enrichedNuclide]
baselineNucNames = [nb.name for nb in enrichedNuclide.element.nuclides]
massFracs = self.getMassFracs()
massFracEnrichedElement = sum(
massFrac for nucName, massFrac in massFracs.items() if nucName in baselineNucNames
)
try:
return massFracs.get(self.material.enrichedNuclide, 0.0) / massFracEnrichedElement
except ZeroDivisionError:
return 0.0
def getMass(self, nuclideNames: Union[None, str, list[str]] = None) -> float:
r"""
Determine the mass in grams of nuclide(s) and/or elements in this object.
.. math::
\text{mass} = \frac{\sum_i (N_i \cdot V \cdot A_i)}{N_A \cdot 10^{-24}}
where
:math:`N_i` is number density of nuclide i in (1/bn-cm),
:math:`V` is the object volume in :math:`cm^3`
:math:`N_A` is Avogadro's number in 1/moles,
:math:`A_i` is the atomic weight of of nuclide i in grams/mole
Parameters
----------
nuclideNames : str, optional
The nuclide/element specifier to get the mass of in the object.
If omitted, total mass is returned.
Returns
-------
mass : float
The mass in grams.
"""
volume = self.getVolume() / (self.parent.getSymmetryFactor() if self.parent else 1.0)
if nuclideNames is None:
nDens = self._getNdensHelper()
else:
nuclideNames = self._getNuclidesFromSpecifier(nuclideNames)
# densities comes from self.p.numberDensities
if len(nuclideNames) > 0:
densities = self.getNuclideNumberDensities(nuclideNames)
nDens = dict(zip(nuclideNames, densities))
else:
nDens = {}
return densityTools.calculateMassDensity(nDens) * volume
def setDimension(self, key, val, retainLink=False, cold=True):
"""
Set a single dimension on the component.
.. impl:: Set a Component dimension, considering thermal expansion.
:id: I_ARMI_COMP_EXPANSION1
:implements: R_ARMI_COMP_EXPANSION
Dimensions should be set considering the impact of thermal expansion. This
method allows for a user or plugin to set a dimension and indicate if the
dimension is for a cold configuration or not. If it is not for a cold
configuration, the thermal expansion factor is considered when setting the dimension.
If the ``retainLink`` argument is ``True``, any Components linked to this one will also
have its dimensions changed consistently. After a dimension is updated, the
``clearLinkedCache`` method is called which sets the volume of this Component to
``None``. This ensures that when the volume is next accessed it is recomputed using the
updated dimensions.
Parameters
----------
key : str
The dimension key (op, ip, mult, etc.)
val : float
The value to set on the dimension
retainLink : bool, optional
If True, the val will be applied to the dimension of linked component which indirectly
changes this component's dimensions.
cold : bool, optional
If True sets the component cold dimension to the specified value.
"""
if not key:
return
if retainLink and self.dimensionIsLinked(key):
linkedComp, linkedDimName = self.p[key]
linkedComp.setDimension(linkedDimName, val, cold=cold)
else:
if not cold:
expansionFactor = self.getThermalExpansionFactor() if key in self.THERMAL_EXPANSION_DIMS else 1.0
val /= expansionFactor
self.p[key] = val
self.clearLinkedCache()
def getDimension(self, key, Tc=None, cold=False):
"""
Return a specific dimension at temperature as determined by key.
.. impl:: Retrieve a dimension at a specified temperature.
:id: I_ARMI_COMP_DIMS
:implements: R_ARMI_COMP_DIMS
Due to thermal expansion, Component dimensions depend on their temperature. This method
retrieves a dimension from the Component at a particular temperature, if provided. If
the Component is a LinkedComponent then the dimensions are resolved to ensure that any
thermal expansion that has occurred to the Components that the LinkedComponent depends
on is reflected in the returned dimension.
Parameters
----------
key : str
The dimension key (op, ip, mult, etc.)
Tc : float
Temperature in C. If None, the current temperature of the component is used.
cold : bool, optional
If true, will return cold (input) value of the requested dimension
"""
dimension = self.p[key]
if isinstance(dimension, _DimensionLink):
return dimension.resolveDimension(Tc=Tc, cold=cold)
if not dimension or cold or key not in self.THERMAL_EXPANSION_DIMS:
return dimension
return self.getThermalExpansionFactor(Tc) * dimension
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
"""Abstract bounding circle method that should be overwritten by each shape subclass."""
raise NotImplementedError
def getCircleInnerDiameter(self, Tc=None, cold=False):
"""Abstract inner circle method that should be overwritten by each shape subclass.
Notes
-----
The inner circle is meaningful for annular shapes, i.e., circle with non-zero ID, hexagon
with non-zero IP, etc. For shapes with corners (e.g., hexagon, rectangle, etc) the inner
circle intersects the corners of the inner bound, opposed to intersecting the "flats".
"""
raise NotImplementedError
def dimensionIsLinked(self, key):
"""True if a the specified dimension is linked to another dimension."""
return key in self.p and isinstance(self.p[key], _DimensionLink)
def getDimensionNamesLinkedTo(self, otherComponent):
"""Find dimension names linked to the other component in this component."""
dimNames = []
for dimName in self.DIMENSION_NAMES:
isLinked = self.dimensionIsLinked(dimName)
if isLinked and self.p[dimName].getLinkedComponent() is otherComponent:
dimNames.append((dimName, self.p[dimName][1]))
return dimNames
def clearLinkedCache(self):
"""Clear this cache and any other dependent volumes."""
self.clearCache()
if self.parent:
# changes in dimensions can affect cached variables such as pitch
self.parent.cached = {}
for c in self.getLinkedComponents():
# no clearCache since parent already updated derivedMustUpdate in self.clearCache()
c.p.volume = None
def getLinkedComponents(self):
"""Find other components that are linked to this component."""
dependents = []
for child in self.parent:
for dimName in child.DIMENSION_NAMES:
isLinked = child.dimensionIsLinked(dimName)
if isLinked and child.p[dimName].getLinkedComponent() is self:
dependents.append(child)
return dependents
def getThermalExpansionFactor(self, Tc=None, T0=None):
"""
Retrieves the material thermal expansion fraction.
.. impl:: Calculates radial thermal expansion factor.
:id: I_ARMI_COMP_EXPANSION0
:implements: R_ARMI_COMP_EXPANSION
This method enables the calculation of the thermal expansion factor for a given
material. If the material is solid, the difference between ``T0`` and ``Tc`` is used to
calculate the thermal expansion factor. If a solid material does not have a linear
expansion factor defined and the temperature difference is greater than a predetermined
tolerance, an error is raised. Thermal expansion of fluids or custom materials is
neglected, currently.
Parameters
----------
Tc : float, optional
Adjusted temperature to get the thermal expansion factor at relative to the reference temperature
Returns
-------
Thermal expansion factor as a percentage (1.0 + dLL), where dLL is the linear expansion factor.
"""
if isinstance(self.material, (material.Fluid, custom.Custom)):
return 1.0 # No thermal expansion of fluids or custom materials
if T0 is None:
T0 = self.inputTemperatureInC
if Tc is None:
Tc = self.temperatureInC
dLL = self.material.linearExpansionFactor(Tc=Tc, T0=T0)
if not dLL and abs(Tc - T0) > self._TOLERANCE:
runLog.error(
f"Linear expansion percent may not be implemented in the {self.material} material class.\n"
"This method needs to be implemented on the material to allow thermal expansion."
f".\nReference temperature: {T0}, Adjusted temperature: {Tc}, Temperature difference: {(Tc - T0)}, "
f"Specified tolerance: {self._TOLERANCE}",
single=True,
)
raise RuntimeError(
f"Linear expansion percent may not be implemented in the {self.material} material class."
)
return 1.0 + dLL
def printContents(self, includeNuclides=True):
"""Print a listing of the dimensions and composition of this component."""
runLog.important(self)
runLog.important(self.setDimensionReport())
if includeNuclides:
for nuc in self.getNuclides():
runLog.important(f"{nuc:10s} {self.getNumberDensity(nuc):.7e}")
def setDimensionReport(self):
"""Gives a report of the dimensions of this component."""
reportGroup = None
for componentType, componentReport in self._COMP_REPORT_GROUPS.items():
if componentType in self.getName():
reportGroup = componentReport
break
if not reportGroup:
return f"No report group designated for {self.getName()} component."
reportGroup.header = [
"",
f"Tcold ({self.inputTemperatureInC})",
f"Thot ({self.temperatureInC})",
]
dimensions = {
k: self.p[k] for k in self.DIMENSION_NAMES if k not in ("modArea", "area") and self.p[k] is not None
} # py3 cannot format None
# Set component name and material
report.setData("Name", [self.getName(), ""], reportGroup)
report.setData("Material", [self.getProperties().name, ""], reportGroup)
for dimName in dimensions:
niceName = _NICE_DIM_NAMES.get(dimName, dimName)
refVal = self.getDimension(dimName, cold=True)
hotVal = self.getDimension(dimName)
try:
report.setData(niceName, [refVal, hotVal], reportGroup)
except ValueError:
runLog.warning(f"{self} has an invalid dimension for {dimName}. refVal: {refVal} hotVal: {hotVal}")
# calculate thickness if applicable.
suffix = None
if "id" in dimensions:
suffix = "d"
elif "ip" in dimensions:
suffix = "p"
if suffix:
coldIn = self.getDimension(f"i{suffix}", cold=True)
hotIn = self.getDimension(f"i{suffix}")
coldOut = self.getDimension(f"o{suffix}", cold=True)
hotOut = self.getDimension(f"o{suffix}")
if suffix and coldIn > 0.0:
hotThick = (hotOut - hotIn) / 2.0
coldThick = (coldOut - coldIn) / 2.0
vals = (
"Thickness (cm)",
f"{coldThick:.7f}",
f"{hotThick:.7f}",
)
report.setData(vals[0], [vals[1], vals[2]], reportGroup)
return report.ALL[reportGroup]
def updateDims(self, key="", val=None):
self.setDimension(key, val)
def mergeNuclidesInto(self, compToMergeWith):
"""
Set another component's number densities to reflect this one merged into it.
You must also modify the geometry of the other component and remove this component to
conserve atoms.
"""
# record pre-merged number densities and areas
aMe = self.getArea()
# if negative-area gap, treat is as 0.0 and return
if aMe <= 0.0:
return
aMerge = compToMergeWith.getArea()
meNDens = {nucName: aMe / aMerge * self.getNumberDensity(nucName) for nucName in self.getNuclides()}
mergeNDens = {nucName: compToMergeWith.getNumberDensity(nucName) for nucName in compToMergeWith.getNuclides()}
# set the new homogenized number densities from both. Allow overlapping nuclides.
for nucName in set(meNDens) | set(mergeNDens):
compToMergeWith.setNumberDensity(nucName, (meNDens.get(nucName, 0.0) + mergeNDens.get(nucName, 0.0)))
def iterComponents(self, typeSpec=None, exact=False):
if self.hasFlags(typeSpec, exact):
yield self
def backUp(self):
"""
Create and store a backup of the state.
This needed to be overridden due to linked components which actually have a parameter value
of another ARMI component.
"""
linkedDims = self._getLinkedDimsAndValues()
composites.Composite.backUp(self)
self._restoreLinkedDims(linkedDims)
def restoreBackup(self, paramsToApply):
"""
Restore the parameters from previously created backup.
This needed to be overridden due to linked components which actually have a parameter value
of another ARMI component.
"""
linkedDims = self._getLinkedDimsAndValues()
composites.Composite.restoreBackup(self, paramsToApply)
self._restoreLinkedDims(linkedDims)
def _getLinkedDimsAndValues(self):
linkedDims = []
for dimName in self.DIMENSION_NAMES:
# backUp and restore are called in tight loops, getting the value and checking here is
# faster than calling self.dimensionIsLinked because that requires and extra
# p.__getitem__
try:
val = self.p[dimName]
except Exception:
raise RuntimeError(
f"Could not find parameter {dimName} defined for {self}. Is the desired Component class?"
)
if isinstance(val, _DimensionLink):
linkedDims.append((self.p.paramDefs[dimName].fieldName, val))
del self.p[dimName]
return linkedDims
def _restoreLinkedDims(self, linkedDims):
# force update without setting the ".assigned" flag
for fieldName, value in linkedDims:
setattr(self.p, fieldName, value)
def adjustMassEnrichment(self, massFraction):
"""
Change the mass fraction of this component.
The nuclides to adjust are defined by the material. This changes whichever nuclides are to
be enriched vs. the baseline nuclides of that element while holding mass constant. For
example it might adjust boron or uranium enrichment.
Conceptually, you could hold number of atoms, volume, or mass constant during this
operation. Historically ARMI adjusted mass fractions which was meant to keep mass constant.
If you have 20 mass % Uranium and adjust the enrichment, you will still have 20% Uranium
mass. But, the actual mass actually might change a bit because the enriched nuclide weighs
less.
See Also
--------
Material.enrichedNuclide
"""
if self.material.enrichedNuclide is None:
raise ValueError(f"Cannot adjust enrichment of {self.material} because `enrichedNuclide` is not defined.")
enrichedNuclide = self.nuclideBases.byName[self.material.enrichedNuclide]
baselineNucNames = [nb.name for nb in enrichedNuclide.element.nuclides]
massFracsBefore = self.getMassFracs()
massFracEnrichedElement = sum(
massFrac for nucName, massFrac in massFracsBefore.items() if nucName in baselineNucNames
)
adjustedMassFracs = {self.material.enrichedNuclide: massFracEnrichedElement * massFraction}
baselineNucNames.remove(self.material.enrichedNuclide)
massFracTotalUnenriched = massFracEnrichedElement - massFracsBefore[self.material.enrichedNuclide]
for baseNucName in baselineNucNames:
# maintain relative mass fractions of baseline nuclides.
frac = massFracsBefore.get(baseNucName, 0.0) / massFracTotalUnenriched
if not frac:
continue
adjustedMassFracs[baseNucName] = massFracEnrichedElement * (1 - massFraction) * frac
self.setMassFracs(adjustedMassFracs)
def getMgFlux(self, adjoint=False, average=False, gamma=False):
"""
Return the multigroup neutron flux in [n/cm^2/s].
The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as
set in the ISOTXS library.
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
average : bool, optional
If True, will return average flux between latest and previous. Does not work for pin detailed.
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
flux : np.ndarray
multigroup neutron flux in [n/cm^2/s]
"""
if average:
raise NotImplementedError("Component has no method for producing average MG flux -- tryusing blocks")
volume = self.getVolume() / self.parent.getSymmetryFactor()
return self.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) / volume
def getIntegratedMgFlux(self, adjoint=False, gamma=False):
"""
Return the multigroup neutron tracklength in [n-cm/s].
The first entry is the first energy group (fastest neutrons). Each additional group is the
next energy group, as set in the ISOTXS library.
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
integratedFlux : multigroup neutron tracklength in [n-cm/s]
"""
if self.p.pinNum is None:
# no pin-level flux is available
if not self.parent:
return np.zeros(1)
volumeFraction = (self.getVolume() / self.parent.getSymmetryFactor()) / self.parent.getVolume()
return volumeFraction * self.parent.getIntegratedMgFlux(adjoint, gamma)
# pin-level flux is available. Note that it is NOT integrated on the param level.
if gamma:
if adjoint:
raise ValueError("Adjoint gamma flux is currently unsupported.")
else:
pinFluxes = self.parent.p.pinMgFluxesGamma
else:
if adjoint:
pinFluxes = self.parent.p.pinMgFluxesAdj
else:
pinFluxes = self.parent.p.pinMgFluxes
return pinFluxes[self.p.pinNum - 1] * self.getVolume() / self.parent.getSymmetryFactor()
def getPinMgFluxes(self, adjoint: bool = False, gamma: bool = False) -> np.ndarray[tuple[int, int], float]:
"""Retrieves the pin multigroup fluxes for the component.
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
np.ndarray
A ``(N, nGroup)`` array of pin multigroup fluxes, where ``N`` is the equivalent to the
multiplicity of the component (``self.p.mult``) and ``nGroup`` is the number of energy
groups of the flux.
Raises
------
ValueError
If the location(s) of the component are not aligned with pin indices from the block.
This would happen if this component is not actually a pin.
"""
# If we get a None, for a non-pin thing, the exception block at the bottom will catch
# that and inform the user. so we don't need to add extra guard rails here
indexMap = self.getPinIndices()
# Get the parameter name we are trying to retrieve
if gamma:
if adjoint:
raise ValueError("Adjoint gamma flux is currently unsupported.")
else:
param = "pinMgFluxesGamma"
else:
if adjoint:
param = "pinMgFluxesAdj"
else:
param = "pinMgFluxes"
try:
return self.parent.p[param][indexMap]
except Exception as ee:
msg = f"Failure getting {param} from {self} via parent {self.parent}"
runLog.error(msg)
runLog.error(ee)
raise ValueError(msg) from ee
def getPinIndices(self) -> np.ndarray[tuple[int], np.uint16]:
"""Find the indices for the locations where this component can be found in the block.
Returns
-------
np.array[int]
The indices in various Block-level pin methods,
e.g., :meth:`armi.reactor.blocks.Block.getPinLocations`, that correspond to
this component.
Raises
------
ValueError
If this does not have pin indices. This can be the case for components that live
on blocks without spatial grids, or if they do not share lattice sites, via
``spatialLocator`` with other pins.
See Also
--------
:meth`:armi.reactor.blocks.HexBlock.assignPinIndices`
"""
ix = self.p.pinIndices
if isinstance(ix, np.ndarray):
return ix
# Find a sibling that has pin indices and has the same spatial locator as us
withPinIndices = (c for c in self.parent if c is not self and c.p.pinIndices is not None)
for sibling in withPinIndices:
if sibling.spatialLocator == self.spatialLocator:
return sibling.p.pinIndices
msg = f"{self} on {self.parent} has no pin indices."
raise ValueError(msg)
def density(self) -> float:
"""Returns the mass density of the object in g/cc."""
density = composites.Composite.density(self)
if not density and not isinstance(self.material, void.Void):
# possible that there are no nuclides in this component yet. In that case,
# defer to Material. Material.density is wrapped to warn if it's attached
# to a parent. Avoid that by calling the inner function directly
density = self.material.density.__wrapped__(self.material, Tc=self.temperatureInC)
return density
def getLumpedFissionProductCollection(self):
"""
Get collection of LFP objects. Will work for global or block-level LFP models.
Returns
-------
lfps : LumpedFissionProduct
lfpName keys , lfp object values
See Also
--------
armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct
"""
if self.parent:
return self.parent.getLumpedFissionProductCollection()
else:
return composites.ArmiObject.getLumpedFissionProductCollection(self)
def getMicroSuffix(self):
return self.parent.getMicroSuffix()
def getPitchData(self):
"""
Return the pitch data that should be used to determine block pitch.
Notes
-----
This pitch data should only be used if this is the pitch defining component in a block. The
block is responsible for determining which component in it is the pitch defining component.
"""
raise NotImplementedError(
f"Method not implemented on component {self}. "
"Please implement if this component type can be a pitch defining component."
)
def getFuelMass(self) -> float:
"""Return the mass in grams if this is a fueled component."""
return self.getMass() if self.hasFlags(flags.Flags.FUEL) else 0.0
def finalizeLoadingFromDB(self):
"""Apply any final actions after creating the component from database.
This should **only** be called internally by the database loader. Otherwise some properties
could be doubly applied.
This exists because the theoretical density is initially defined as a material modification,
and then stored as a Material attribute. When reading from blueprints, the blueprint loader
sets the theoretical density parameter from the Material attribute. Component parameters are
also set when reading from the database. But, we need to set the Material attribute so
routines that fetch a material's density property account for the theoretical density.
"""
self.material.adjustTD(self.p.theoreticalDensityFrac)
class ShapedComponent(Component):
"""A component with well-defined dimensions."""
================================================
FILE: armi/reactor/components/componentParameters.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Component parameter definitions."""
import numpy as np
from armi.reactor import parameters
from armi.reactor.parameters import ParamLocation
from armi.reactor.parameters.parameterDefinitions import isNumpyArray, isNumpyF32Array
from armi.utils import units
def getComponentParameterDefinitions():
"""Return the base Component parameters."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("volume", units=f"{units.CM}^3", description="Volume of this object.")
pb.defParam(
"area",
units=f"{units.CM}^2",
description="Cross sectional area of this component.",
)
pb.defParam(
"mult",
units=units.UNITLESS,
description="The multiplicity of this component, i.e. how many of them there are.",
default=1,
)
pb.defParam(
"mergeWith",
units=units.UNITLESS,
description="Label of other component to merge with",
)
pb.defParam(
"type",
units=units.UNITLESS,
description="The name of this object as input on the blueprints",
)
pb.defParam(
"temperatureInC",
units=units.DEGC,
description="Component temperature in {}".format(units.DEGC),
)
pb.defParam(
"numberDensities",
setter=isNumpyArray("numberDensities"),
units=f"#/(bn*{units.CM})",
description="Number densities of each nuclide.",
)
pb.defParam(
"nuclides",
setter=isNumpyArray("nuclides"),
units=units.UNITLESS,
description="Nuclide names corresponding to numberDensities array.",
)
pb.defParam(
"detailedNDens",
setter=isNumpyArray("detailedNDens"),
units=f"atoms/(bn*{units.CM})",
description=(
"High-fidelity number density vector with up to thousands of nuclides. "
"Used in high-fi depletion runs where low-fi depletion may also be occurring. "
"This param keeps the hi-fi and low-fi depletion values from interfering."
),
saveToDB=True,
default=None,
)
pb.defParam(
"pinNDens",
setter=isNumpyF32Array("pinNDens"),
units=f"atoms/(bn*{units.CM})",
description="Pin-wise number densities of each nuclide.",
location=ParamLocation.AVERAGE,
saveToDB=True,
categories=["depletion", parameters.Category.pinQuantities],
default=None,
)
pb.defParam(
"percentBu",
units=f"{units.PERCENT_FIMA}",
description="Burnup as a percentage of initial (heavy) metal atoms.",
default=0.0,
)
pb.defParam(
"pinPercentBu",
setter=isNumpyArray("pinPercentBu"),
units=units.PERCENT_FIMA,
description="Pin-wise burnup as a percentage of initial (heavy) metal atoms.",
default=None,
)
pb.defParam(
"buRate",
units=f"{units.PERCENT_FIMA}/{units.DAYS}",
# This is very related to power, but normalized to %FIMA.
description=(
"Current rate of burnup accumulation. Useful for estimating times when burnup limits may be exceeded."
),
)
pb.defParam(
"enrichmentBOL",
units=units.UNITLESS,
description="Enrichment during fabrication (mass fraction)",
default=0.0,
)
pb.defParam(
"massHmBOL",
units=units.GRAMS,
description="Mass of heavy metal at BOL",
default=None,
)
pb.defParam(
"customIsotopicsName",
units=units.UNITLESS,
description="Label of isotopics applied to this component.",
)
pb.defParam(
"modArea",
units=units.UNITLESS,
description="A (component, operation) tuple used to add/subtract area (in "
"cm^2) from another components area. See c.getArea()",
)
pb.defParam(
"zrFrac",
units=units.UNITLESS,
description="Original Zr frac of this, used for material properties.",
)
pb.defParam(
"pinNum",
units=units.UNITLESS,
description="Pin number of this component in some mesh. Starts at 1.",
default=None,
)
def _assignTDFrac(self, val):
if val > 1 or val < 0:
raise ValueError(f"Theoretical density fraction must be in range [0,1], got {val}")
self._p_theoreticalDensityFrac = val
pb.defParam(
"theoreticalDensityFrac",
description=(
"Fractional value between zero and one, inclusive, for the theoretical density "
"of the material stored on this component."
),
units=units.UNITLESS,
default=1,
setter=_assignTDFrac,
)
pb.defParam(
"molesHmBOL",
units=units.MOLES,
default=0.0,
description="Total number of moles of heavy metal at BOL.",
)
def _validatePinIndices(self, val):
if val is not None:
# holds [0, 65_535] so at most, 65_535 pins per block
self._p_pinIndices = np.array(val, dtype=np.uint16)
else:
self._p_pinIndices = None
pb.defParam(
"pinIndices",
default=None,
description=(
"Indices within data arrays where values for this component are stored. "
"The array is zero indexed and structured such that the j-th pin on this "
"component can be found at ``Block.getPinLocations()[pinIndices[j]]``. "
),
units=units.UNITLESS,
setter=_validatePinIndices,
)
return pDefs
def getCircleParameterDefinitions():
"""Return parameters for Circle."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("od", units=units.CM, description="Outer diameter")
pb.defParam("id", units=units.CM, description="Inner diameter", default=0.0)
pb.defParam("op", units=units.CM, description="Outer pitch")
return pDefs
def getHexagonParameterDefinitions():
"""Return parameters for Hexagon."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("ip", units=units.CM, description="Inner pitch", default=0.0)
pb.defParam("op", units=units.CM, description="Outer pitch")
return pDefs
def getHoledHexagonParameterDefinitions():
"""Return parameters for HoledHexagon."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("holeOD", units=units.CM, description="Diameter of interior hole(s)")
pb.defParam("nHoles", units=units.UNITLESS, description="Number of interior holes")
pb.defParam(
"holeRadFromCenter",
units=units.CM,
description="Distance from the center of the hexagon to the center of the holes assuming the hole centers "
"all lie on a circle.",
default=0.0,
)
return pDefs
def getHexHoledCircleParameterDefinitions():
"""Return parameters for HexHoledCircle."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("holeOP", units=units.CM, description="Pitch of interior hole")
return pDefs
def getFilletedHexagonParameterDefinitions():
"""Return parameters for FilletedHexagon."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("iR", units=units.CM, description="Radius of curvature of the inner corners")
pb.defParam("oR", units=units.CM, description="Radius of curvature of the outer corners")
return pDefs
def getHoledRectangleParameterDefinitions():
"""Return parameters for HoledRectangle."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("holeOD", units=units.CM, description="Diameter of interior hole")
return pDefs
def getHelixParameterDefinitions():
"""Return parameters for Helix."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("od", units=units.CM, description="Outer diameter")
pb.defParam("id", units=units.CM, description="Inner diameter", default=0.0)
pb.defParam("op", units=units.CM, description="Outer pitch")
pb.defParam(
"axialPitch",
units=units.CM,
description="Axial pitch of helix in helical shapes.",
)
pb.defParam("helixDiameter", units=units.CM, description="Diameter of helix")
return pDefs
def getRectangleParameterDefinitions():
"""Return parameters for Rectangle."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("lengthInner", units=units.CM, description="Inner length")
pb.defParam("lengthOuter", units=units.CM, description="Outer length")
pb.defParam("widthInner", units=units.CM, description="Inner width")
pb.defParam("widthOuter", units=units.CM, description="Outer width")
return pDefs
def getCubeParameterDefinitions():
"""Return parameters for Cube."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam(
"lengthInner",
units=units.CM,
default=0.0,
description="Inner length dimension (if the cube is hollow).",
)
pb.defParam("lengthOuter", units=units.CM, description="Outermost length dimension")
pb.defParam(
"widthInner",
units=units.CM,
default=0.0,
description="Inner width dimension (if the cube is hollow).",
)
pb.defParam("widthOuter", units=units.CM, description="Outermost width dimension")
pb.defParam(
"heightInner",
units=units.CM,
default=0.0,
description="Inner height dimension (if the cube is hollow).",
)
pb.defParam("heightOuter", units=units.CM, description="Outermost height dimension")
return pDefs
def getTriangleParameterDefinitions():
"""Return parameters for Triangle."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("base", units=units.CM, description="Length of the base of the triangle")
pb.defParam("height", units=units.CM, description="Height of the triangle")
return pDefs
def getUnshapedParameterDefinitions():
"""Return parameters for UnshapedComponent."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam("op", units=units.CM, description="Outer pitch")
pb.defParam(
"userDefinedVolume",
units=f"{units.CM}^3",
description="Volume of this object.",
)
return pDefs
def getRadialSegmentParameterDefinitions():
"""Return parameters for RadialSegment."""
pDefs = parameters.ParameterDefinitionCollection()
with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:
pb.defParam(
"inner_theta",
units=units.RADIANS,
description="Starting axial position, in radians.",
)
pb.defParam(
"outer_theta",
units=units.RADIANS,
description="Ending axial position, in radians.",
)
pb.defParam(
"inner_radius",
units=units.CM,
description="Starting radial position; this can be zero.",
)
pb.defParam("outer_radius", units=units.CM, description="Ending radial position.")
pb.defParam("height", units=units.CM, description="Height of the 3D radial segment.")
pb.defParam(
"azimuthal_differential",
units=units.RADIANS,
description="Perturbation in the azimuthal dimension (see inner_theta and outer_theta).",
)
pb.defParam(
"radius_differential",
units=units.UNITLESS,
description="Perturbation in the radial dimension (see inner_radius and outer_radius).",
)
pb.defParam(
"inner_axial",
units=units.UNITLESS,
description="Perturbation in the axial dimension (picture outer_axial = inner_axial + height).",
)
pb.defParam(
"outer_axial",
units=units.UNITLESS,
description="Perturbation result in the axial dimension (picture outer_axial = inner_axial + height).",
)
return pDefs
================================================
FILE: armi/reactor/components/tests/__init__.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: armi/reactor/components/tests/test_basicShapes.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit testing file for basic shapes."""
import math
import unittest
from armi.materials import resolveMaterialClassByName
from armi.reactor.components.basicShapes import (
Circle,
Hexagon,
Rectangle,
SolidRectangle,
Square,
Triangle,
)
class TestBasicShapes(unittest.TestCase):
"""Class for testing basic shapes."""
@classmethod
def setUpClass(cls):
cls.material = resolveMaterialClassByName("HT9")()
def test_circleArea(self):
od = 2.0
id = 1.5
comp = Circle("Test", material=self.material, Tinput=20, Thot=300, od=od, id=id, mult=2)
self.assertAlmostEqual(comp.getComponentArea(cold=True), math.pi * (od**2 / 4 - id**2 / 4) * 2)
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
odHot = comp.getDimension("od")
idHot = comp.getDimension("id")
self.assertAlmostEqual(
comp.getComponentArea(cold=False),
math.pi * (odHot**2 / 4 - idHot**2 / 4) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
def test_hexagonArea(self):
op = 2.0
ip = 1.5
comp = Hexagon("Test", material=self.material, Tinput=20, Thot=300, op=op, ip=ip, mult=2)
self.assertAlmostEqual(comp.getComponentArea(cold=True), math.sqrt(3.0) * (op**2 - ip**2))
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
opHot = comp.getDimension("op")
ipHot = comp.getDimension("ip")
self.assertAlmostEqual(
comp.getComponentArea(cold=False),
math.sqrt(3.0) * (opHot**2 - ipHot**2),
)
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
def test_rectangleArea(self):
lo = 2.0
li = 1.5
wo = 2.5
wi = 1.25
comp = Rectangle(
"Test",
material=self.material,
Tinput=20,
Thot=300,
lengthOuter=lo,
lengthInner=li,
widthOuter=wo,
widthInner=wi,
mult=2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * (lo * wo - li * wi))
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
loHot = comp.getDimension("lengthOuter")
liHot = comp.getDimension("lengthInner")
woHot = comp.getDimension("widthOuter")
wiHot = comp.getDimension("widthInner")
self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * (loHot * woHot - liHot * wiHot))
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
def test_solidRectangleArea(self):
lo = 2.0
wo = 2.5
comp = SolidRectangle(
"Test",
material=self.material,
Tinput=20,
Thot=300,
lengthOuter=lo,
widthOuter=wo,
mult=2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * lo * wo)
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
loHot = comp.getDimension("lengthOuter")
woHot = comp.getDimension("widthOuter")
self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * loHot * woHot)
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
def test_squareArea(self):
wo = 2.5
wi = 1.25
comp = Square(
"Test",
material=self.material,
Tinput=20,
Thot=300,
widthOuter=wo,
widthInner=wi,
mult=2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * (wo**2 - wi**2))
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
woHot = comp.getDimension("widthOuter")
wiHot = comp.getDimension("widthInner")
self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * (woHot**2 - wiHot**2))
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
def test_triangleArea(self):
base = 2.5
height = 1.25
comp = Triangle(
"Test",
material=self.material,
Tinput=20,
Thot=300,
base=base,
height=height,
mult=2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), base * height)
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
baseHot = comp.getDimension("base")
heightHot = comp.getDimension("height")
self.assertAlmostEqual(comp.getComponentArea(cold=False), baseHot * heightHot)
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
================================================
FILE: armi/reactor/components/tests/test_complexShapes.py
================================================
# Copyright 2025 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit testing file for basic shapes."""
import math
import unittest
from armi.materials import resolveMaterialClassByName
from armi.reactor.components.complexShapes import (
HexHoledCircle,
HoledHexagon,
HoledRectangle,
HoledSquare,
)
class TestComplexShapes(unittest.TestCase):
"""Class for testing complex shapes."""
@classmethod
def setUpClass(cls):
cls.material = resolveMaterialClassByName("HT9")()
@staticmethod
def circArea(d):
return math.pi * (d / 2) ** 2
@staticmethod
def hexArea(op):
return math.sqrt(3.0) / 2.0 * op**2
@staticmethod
def rectArea(l, w):
return l * w
def test_holedHexagon(self):
op = 2.0
holeOD = 0.5
nHoles = 2
comp = HoledHexagon(
"TestHoledHexagon",
material=self.material,
Tinput=20,
Thot=300,
op=op,
holeOD=holeOD,
nHoles=nHoles,
mult=2,
)
self.assertAlmostEqual(
comp.getComponentArea(cold=True),
(self.hexArea(op) - nHoles * self.circArea(holeOD)) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
opHot = comp.getDimension("op")
holeODHot = comp.getDimension("holeOD")
self.assertAlmostEqual(
comp.getComponentArea(cold=False),
(self.hexArea(opHot) - nHoles * self.circArea(holeODHot)) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
# Test that holeRadFromCenter does not change the area.
comp2 = HoledHexagon(
"TestHoledHexagonHoleRadFromCenter",
material=self.material,
Tinput=20,
Thot=300,
op=op,
holeOD=holeOD,
nHoles=nHoles,
holeRadFromCenter=(op + holeOD) / 2,
mult=2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp2.getComponentArea(cold=True))
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp2.getComponentArea(cold=False))
compHoleRadFromCenter = HoledHexagon(
"TestHoledHexagon33",
material=self.material,
Tinput=20,
Thot=300,
op=op,
holeOD=holeOD,
nHoles=nHoles,
holeRadFromCenter=0.5,
mult=2,
)
self.assertEqual(compHoleRadFromCenter.getDimension("holeRadFromCenter", cold=True, Tc=500), 0.5)
self.assertGreater(compHoleRadFromCenter.getDimension("holeRadFromCenter", cold=False, Tc=500), 0.5)
def test_holedRectangle(self):
lo = 2.0
wo = 3.0
holeOD = 0.5
comp = HoledRectangle(
"Test",
material=self.material,
Tinput=20,
Thot=300,
lengthOuter=lo,
widthOuter=wo,
holeOD=holeOD,
mult=2,
)
self.assertAlmostEqual(
comp.getComponentArea(cold=True),
(self.rectArea(lo, wo) - self.circArea(holeOD)) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
loHot = comp.getDimension("lengthOuter")
woHot = comp.getDimension("widthOuter")
holeODHot = comp.getDimension("holeOD")
self.assertAlmostEqual(
comp.getComponentArea(cold=False),
(self.rectArea(loHot, woHot) - self.circArea(holeODHot)) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
def test_holedSquare(self):
wo = 3.0
holeOD = 0.5
comp = HoledSquare(
"Test",
material=self.material,
Tinput=20,
Thot=300,
widthOuter=wo,
holeOD=holeOD,
mult=2,
)
self.assertAlmostEqual(
comp.getComponentArea(cold=True),
(self.rectArea(wo, wo) - self.circArea(holeOD)) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
woHot = comp.getDimension("widthOuter")
holeODHot = comp.getDimension("holeOD")
self.assertAlmostEqual(
comp.getComponentArea(cold=False),
(self.rectArea(woHot, woHot) - self.circArea(holeODHot)) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
def test_hexHoledCircle(self):
od = 3.0
holeOP = 0.5
comp = HexHoledCircle(
"Test",
material=self.material,
Tinput=20,
Thot=300,
od=od,
holeOP=holeOP,
mult=2,
)
self.assertAlmostEqual(
comp.getComponentArea(cold=True),
(self.circArea(od) - self.hexArea(holeOP)) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))
odHot = comp.getDimension("od")
holeOPHot = comp.getDimension("holeOP")
self.assertAlmostEqual(
comp.getComponentArea(cold=False),
(self.circArea(odHot) - self.hexArea(holeOPHot)) * 2,
)
self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))
================================================
FILE: armi/reactor/components/volumetricShapes.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Three-dimensional shapes."""
import math
from armi.reactor.components import ShapedComponent, componentParameters
class Sphere(ShapedComponent):
"""A spherical component."""
is3D = True
THERMAL_EXPANSION_DIMS = {}
# Just usurp the Circle parameters. This may lead to issues at some point in things like the DB
# interface, but for now, they are the same params, so why not?
pDefs = componentParameters.getCircleParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
od=None,
id=None,
mult=None,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(components, od=od, id=id, mult=mult, modArea=modArea)
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
"""Abstract bounding circle method that should be overwritten by each shape subclass."""
return self.getDimension("od")
def getComponentArea(self, cold=False, Tc=None):
"""Compute an average area over the height."""
from armi.reactor.blocks import Block # avoid circular import
if Tc is not None:
raise NotImplementedError(f"Cannot calculate area at specified temperature: {Tc}")
block = self.getAncestor(lambda c: isinstance(c, Block))
return self.getComponentVolume(cold) / block.getHeight()
def getComponentVolume(self, cold=False):
"""Computes the volume of the sphere in cm^3."""
od = self.getDimension("od", cold=cold)
iD = self.getDimension("id", cold=cold)
mult = self.getDimension("mult")
vol = mult * 4.0 / 3.0 * math.pi * ((od / 2.0) ** 3 - (iD / 2.0) ** 3)
return vol
class Cube(ShapedComponent):
"""More correctly, a rectangular cuboid.
Optionally, there may be a centric cuboid volume cut out of center of this shape.
"""
is3D = True
THERMAL_EXPANSION_DIMS = {}
pDefs = componentParameters.getCubeParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
lengthOuter=None,
lengthInner=None,
widthOuter=None,
widthInner=None,
heightOuter=None,
heightInner=None,
mult=None,
modArea=None,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
lengthOuter=lengthOuter,
lengthInner=lengthInner,
widthOuter=widthOuter,
widthInner=widthInner,
heightOuter=heightOuter,
heightInner=heightInner,
mult=mult,
modArea=modArea,
)
def getComponentArea(self, cold=False, Tc=None):
raise NotImplementedError("Cannot compute area of a cube component.")
def getComponentVolume(self):
"""Computes the volume of the cube in cm^3."""
lengthO = self.getDimension("lengthOuter")
widthO = self.getDimension("widthOuter")
heightO = self.getDimension("heightOuter")
lengthI = self.getDimension("lengthInner")
widthI = self.getDimension("widthInner")
heightI = self.getDimension("heightInner")
mult = self.getDimension("mult")
vol = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)
return vol
class RadialSegment(ShapedComponent):
r"""A RadialSegement represents a volume element with thicknesses in the
azimuthal, radial and axial directions.
This a 3D projection of a 2D shape that is an angular slice of a ring or circle.
The 2D shape is like the one below, with an inner and outer position for the
theta and the radius:
Image::
Y
^ -
| -
| -XXXX\
| - \XXXXXXX\
| theta |XXXXXXX|
|-----------------------> radius, X
|
|
"""
is3D = True
THERMAL_EXPANSION_DIMS = {}
pDefs = componentParameters.getRadialSegmentParameterDefinitions()
def __init__(
self,
name,
material,
Tinput,
Thot,
inner_radius=None,
outer_radius=None,
height=None,
mult=None,
inner_theta=0,
outer_theta=math.pi * 2,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
inner_radius=inner_radius,
outer_radius=outer_radius,
height=height,
mult=mult,
inner_theta=inner_theta,
outer_theta=outer_theta,
)
def getComponentArea(self, refVolume=None, refHeight=None, cold=False, Tc=None):
if Tc is not None:
raise NotImplementedError(f"Cannot calculate area at specified temperature: {Tc}")
if refHeight:
return (
(self.getDimension("height", cold=cold) / refHeight)
* self.getDimension("mult")
* (
math.pi
* (
self.getDimension("outer_radius", cold=cold) ** 2
- self.getDimension("inner_radius", cold=cold) ** 2
)
* (
(self.getDimension("outer_theta", cold=cold) - self.getDimension("inner_theta", cold=cold))
/ (math.pi * 2.0)
)
)
)
if refVolume:
return (self.getComponentVolume() / refVolume) / self.getDimension("height")
else:
return self.getComponentVolume() / self.getDimension("height")
def getComponentVolume(self):
mult = self.getDimension("mult")
outerRad = self.getDimension("outer_radius")
innerRad = self.getDimension("inner_radius")
outerTheta = self.getDimension("outer_theta")
innerTheta = self.getDimension("inner_theta")
height = self.getDimension("height")
radialArea = math.pi * (outerRad**2 - innerRad**2)
aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0)
vol = mult * radialArea * aziFraction * height
return vol
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
return 2.0 * self.getDimension("outer_radius", Tc, cold)
def getCircleInnerDiameter(self, Tc=None, cold=False):
return 2.0 * self.getDimension("inner_radius", Tc, cold)
class DifferentialRadialSegment(RadialSegment):
"""
This component class represents a volume element with thicknesses in the
azimuthal, radial and axial directions. Furthermore it has dependent
dimensions: (outer theta, outer radius, outer axial) that can be updated
depending on the 'differential' in the corresponding directions.
This component class is super useful for defining ThRZ reactors and
perturbing its dimensions using the optimization modules
See Also
--------
geometry purturbation:
armi.physics.optimize.OptimizationInterface.modifyCase (ThRZReflectorThickness,ThRZActiveHeight,ThRZActiveRadius)
mesh updating:
armi.reactor.reactors.Reactor.importGeom
"""
is3D = True
THERMAL_EXPANSION_DIMS = {}
def __init__(
self,
name,
material,
Tinput,
Thot,
inner_radius=None,
radius_differential=None,
inner_axial=None,
height=None,
inner_theta=0,
azimuthal_differential=2 * math.pi,
mult=1,
isotopics=None,
mergeWith=None,
components=None,
):
ShapedComponent.__init__(
self,
name,
material,
Tinput,
Thot,
isotopics=isotopics,
mergeWith=mergeWith,
components=components,
)
self._linkAndStoreDimensions(
components,
inner_radius=inner_radius,
radius_differential=radius_differential,
inner_axial=inner_axial,
height=height,
inner_theta=inner_theta,
azimuthal_differential=azimuthal_differential,
mult=mult,
)
self.updateDims()
def updateDims(self, key="", val=None):
"""
Update the dimensions of differential radial segment component.
Notes
-----
Can be used to update any dimension on the component, but outer_radius, outer_axial, and outer_theta are
always updated.
See Also
--------
armi.reactor.blocks.Block.updateComponentDims
"""
self.setDimension(key, val)
self.setDimension(
"outer_radius",
self.getDimension("inner_radius") + self.getDimension("radius_differential"),
)
self.setDimension(
"outer_axial",
self.getDimension("inner_axial") + self.getDimension("height"),
)
self.setDimension(
"outer_theta",
self.getDimension("inner_theta") + self.getDimension("azimuthal_differential"),
)
def getComponentArea(self, refVolume=None, refHeight=None, cold=False, Tc=None):
if Tc is not None:
raise NotImplementedError(f"Cannot calculate area at specified temperature: {Tc}")
self.updateDims()
return RadialSegment.getComponentArea(self, refVolume=None, refHeight=None, cold=False)
def getComponentVolume(self):
self.updateDims()
return RadialSegment.getComponentVolume(self)
================================================
FILE: armi/reactor/composites.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the basic composite pattern underlying the reactor package.
This follows the principles of the `Composite Design Pattern
`_ to allow the construction of a part/whole
hierarchy representing a physical nuclear reactor. The composite objects act somewhat like lists:
they can be indexed, iterated over, appended, extended, inserted, etc. Each member of the hierarchy
knows its children and its parent, so full access to the hierarchy is available from everywhere.
This design was chosen because of the close analogy of the model to the physical nature of nuclear
reactors.
Warning
-------
Because each member of the hierarchy is linked to the entire tree, it is often unsafe to save
references to individual members; it can cause large and unexpected memory inefficiencies.
See Also
--------
:doc:`/developer/index`.
"""
import collections
import itertools
import operator
import timeit
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from armi import context, runLog, utils
from armi.nucDirectory import nucDir, nuclideBases
from armi.physics.neutronics.fissionProductModel import fissionProductModel
from armi.reactor import grids, parameters
from armi.reactor.flags import Flags, TypeSpec
from armi.reactor.parameters import resolveCollections
from armi.utils import densityTools, tabulate, units
from armi.utils.densityTools import calculateNumberDensity
from armi.utils.flags import auto
if TYPE_CHECKING:
from armi.reactor.components.component import Component
class FlagSerializer(parameters.Serializer):
"""
Serializer implementation for Flags.
This operates by converting each set of Flags (too large to fit in a uint64) into a
sequence of enough uint8 elements to represent all flags. These constitute a
dimension of a 2-D numpy array containing all Flags for all objects provided to the
``pack()`` function.
"""
version = "1"
@staticmethod
def pack(data):
"""
Flags are represented as a 2D numpy array of uint8 (single-byte, unsigned
integers), where each row contains the bytes representing a single Flags
instance. We also store the list of field names so that we can verify that the
reader and the writer can agree on the meaning of each bit.
Under the hood, this calls the private implementation providing the
:py:class:`armi.reactor.flags.Flags` class as the target output class.
"""
return FlagSerializer._packImpl(data, Flags)
@staticmethod
def _packImpl(data, flagCls: Type[utils.Flag]):
"""
Implement the pack operation given a target output Flag class.
This is kept separate from the public interface to permit testing of the
functionality without having to do unholy things to ARMI's actual set of
``reactor.flags.Flags``.
"""
npa = np.array([b for f in data for b in f.to_bytes()], dtype=np.uint8).reshape((len(data), flagCls.width()))
return npa, {"flag_order": flagCls.sortedFields()}
@staticmethod
def _remapBits(inp: int, mapping: Dict[int, int]):
"""
Given an input bitfield, map each bit to the appropriate new bit position based
on the passed mapping.
Parameters
----------
inp : int
input bitfield
mapping : dict
dictionary mapping from old bit position -> new bit position
"""
f = 0
for bit in itertools.count():
if (1 << bit) > inp:
break
if (1 << bit) & inp:
f = f | (1 << mapping[bit])
return f
@classmethod
def unpack(cls, data, version, attrs):
"""
Reverse the pack operation.
This will allow for some degree of conversion from old flags to a new set of
flags, as long as all of the source flags still exist in the current set of
flags.
Under the hood, this calls the private implementation providing the
:py:class:`armi.reactor.flags.Flags` class as the target output class.
"""
return cls._unpackImpl(data, version, attrs, Flags)
@classmethod
def _unpackImpl(cls, data, version, attrs, flagCls: Type[utils.Flag]):
"""
Implement the unpack operation given a target output Flag class.
This is kept separate from the public interface to permit testing of the
functionality without having to do unholy things to ARMI's actual set of
``reactor.flags.Flags``.
If the set of flags for the currently-configured App match the input set of
flags, they are read in directly, which is good and cheap. However, if the set
of flags differ from the input and the current App, we will try to convert them
(as long as all of the input flags exist in the current App). Conversion is done
by forming a map from all input bit positions to the current-App bit positions
of the same meaning. E.g., if FUEL flag used to be the 3rd bit position, but now
it is the 6th bit position, the map will contain ``map[3] = 6``. Then for each
bitfield that is read in, each bit position is queried and if present, mapped to
the proper corresponding new bit position. The result of this mapping is used to
construct the Flags object.
"""
flagOrderPassed = attrs["flag_order"]
flagOrderNow = flagCls.sortedFields()
if version != cls.version:
raise ValueError(
f"The FlagSerializer version used to pack the data ({version}) does not match "
f"the current version ({cls.version})! This database either needs to be migrated, "
"or on-the-fly inter-version conversion needs to be implemented."
)
flagSetIn = set(flagOrderPassed)
flagSetNow = set(flagOrderNow)
# Make sure that all of the old flags still exist
if not flagSetIn.issubset(flagSetNow):
missingFlags = flagSetIn - flagSetNow
runLog.warning(
"The set of flags in the database includes unknown flags. For convenience, we will "
f"add these to the system: {missingFlags}"
)
flagCls.extend({k: auto() for k in missingFlags})
flagOrderNow = flagCls.sortedFields()
if all(i == j for i, j in zip(flagOrderPassed, flagOrderNow)):
out = [flagCls.from_bytes(row.tobytes()) for row in data]
else:
newFlags = {i: flagOrderNow.index(oldFlag) for (i, oldFlag) in enumerate(flagOrderPassed)}
out = [flagCls(cls._remapBits(int.from_bytes(row.tobytes(), byteorder="little"), newFlags)) for row in data]
return out
def _defineBaseParameters():
"""
Return parameter definitions that all ArmiObjects must have to function properly.
For now, this pretty much just includes ``flags``, since these are used throughout
the composite model to filter which objects are considered when traversing the
reactor model.
Note also that the base ParameterCollection class also has a ``serialNum``
parameter. These are defined in different locations, since serialNum is a guaranteed
feature of a ParameterCollection (for serialization to the database and history
tracking), while the ``flags`` parameter is more a feature of the composite model.
.. important::
Notice that the ``flags`` parameter is not written to the database. This is for
a couple of reasons:
* Flags are derived from an ArmiObject's name. Since the name is stored on
the DB, it is possible to recover the flags from that.
* Storing flags to the DB may be complicated, since it is easier to imagine a
number of flags that is greater than the width of natively-supported integer
types, requiring some extra tricks to store the flags in an HDF5 file.
* Allowing flags to be modified by plugins further complicates things, in that
it is important to ensure that the meaning of all bits in the flag value are
consistent between a database state and the current ARMI environment. This may
require encoding these meanings in to the database as some form of metadata.
"""
pDefs = parameters.ParameterDefinitionCollection()
pDefs.add(
parameters.Parameter(
"flags",
units=units.UNITLESS,
description="The type specification of this object",
location=parameters.ParamLocation.AVERAGE,
saveToDB=True,
default=Flags(0),
setter=parameters.NoDefault,
categories=set(),
serializer=FlagSerializer,
)
)
return pDefs
class CompositeModelType(resolveCollections.ResolveParametersMeta):
"""
Metaclass for tracking subclasses of ArmiObject subclasses.
It is often useful to have an easily-accessible collection of all classes that participate in
the ARMI composite reactor model. This metaclass maintains a collection of all defined
subclasses, called TYPES.
"""
TYPES: Dict[str, Type] = dict()
"""
Dictionary mapping class name to class object for all subclasses.
:meta hide-value:
"""
def __new__(cls, name, bases, attrs):
newType = resolveCollections.ResolveParametersMeta.__new__(cls, name, bases, attrs)
CompositeModelType.TYPES[name] = newType
return newType
class ArmiObject(metaclass=CompositeModelType):
"""
The abstract base class for all composites and leaves.
This:
* declares the interface for objects in the composition
* implements default behavior for the interface common to all classes
* Declares an interface for accessing and managing child objects
* Defines an interface for accessing parents.
Called "component" in gang of four, this is an ArmiObject here because the word component was
already taken in ARMI.
The :py:class:`armi.reactor.parameters.ResolveParametersMeta` metaclass is used to automatically
create ``ParameterCollection`` subclasses for storing parameters associated with any particular
subclass of ArmiObject. Defining a ``pDefs`` class attribute in the definition of a subclass of
ArmiObject will lead to the creation of a new subclass of
py:class:`armi.reactor.parameters.ParameterCollection`, which will contain the definitions from
that class's ``pDefs`` as well as the definitions for all of its parents. A new
``paramCollectionType`` class attribute will be added to the ArmiObject subclass to reflect
which type of parameter collection should be used.
Warning
-------
This class has far too many public methods. We are in the midst of a composite tree cleanup that
will likely break these out onto a number of separate functional classes grouping things like
composition, location, shape/dimensions, and various physics queries. Methods are being
collected here from the various specialized subclasses (Block, Assembly) in preparation for this
next step. As a result, the public API on this method should be considered unstable.
.. impl:: Parameters are accessible throughout the armi tree.
:id: I_ARMI_PARAM1
:implements: R_ARMI_PARAM
An ARMI reactor model is composed of collections of ARMIObject objects. These
objects are combined in a hierarchical manner. Each level of the composite tree
is able to be assigned parameters which define it, such as temperature, flux,
or keff values. This class defines an attribute of type ``ParameterCollection``,
which contains all the functionality of an ARMI ``Parameter`` object. Because
the entire model is composed of ARMIObjects at the most basic level, each level
of the Composite tree contains this parameter attribute and can thus be queried.
Attributes
----------
name : str
Object name
parent : ArmiObject
The object's parent in a hierarchical tree
cached : dict
Some cached values for performance
p : ParameterCollection
The state variables
spatialGrid : grids.Grid
The spatial grid that this object contains
spatialLocator : grids.LocationBase
The location of this object in its parent grid, or global space
See Also
--------
armi.reactor.parameters
"""
paramCollectionType: Optional[Type[parameters.ParameterCollection]] = None
pDefs = _defineBaseParameters()
def __init__(self, name):
self.name = name
self.parent = None
self.cached = {}
self._backupCache = None
self.p = self.paramCollectionType()
# NOTE: LFPs are not serialized to the database, which could matter when loading an old DB.
self._lumpedFissionProducts = None
self.spatialGrid = None
self.spatialLocator = grids.CoordinateLocation(0.0, 0.0, 0.0, None)
def __lt__(self, other):
"""
Implement the less-than operator.
Implementing this on the ArmiObject allows most objects, under most circumstances to be
sorted. This is useful from the context of the Database classes, so that they can produce a
stable layout of the serialized composite structure.
By default, this sorts using the spatial locator in K, J, I order, which should give a
relatively intuitive order. It also makes sure that the objects being sorted live in the
same grid.
"""
if self.spatialLocator is None or other.spatialLocator is None:
runLog.error(f"could not compare {self} and {other}")
raise ValueError("One or more of the compared objects have no spatialLocator")
if self.spatialLocator.grid is not other.spatialLocator.grid:
runLog.error(f"could not compare {self} and {other}")
raise ValueError(
"Composite grids must be the same to compare:\n"
f"This grid: {self.spatialGrid}\n"
f"Other grid: {other.spatialGrid}"
)
try:
t1 = tuple(reversed(self.spatialLocator.getCompleteIndices()))
t2 = tuple(reversed(other.spatialLocator.getCompleteIndices()))
return t1 < t2
except ValueError:
runLog.error(f"failed to compare {self.spatialLocator} and {other.spatialLocator}")
raise
def __getstate__(self):
"""
Python method for reducing data before pickling.
This removes links to parent objects, which allows one to, for example, pickle
an assembly without pickling the entire reactor. Likewise, one could
MPI_COMM.bcast an assembly without broadcasting the entire reactor.
Notes
-----
Special treatment of ``parent`` is not enough, since the spatialGrid also
contains a reference back to the armiObject. Consequently, the ``spatialGrid``
needs to be reassigned in ``__setstate__``.
"""
state = self.__dict__.copy()
state["parent"] = None
if "r" in state:
raise RuntimeError("An ArmiObject should never contain the entire Reactor.")
return state
def __setstate__(self, state):
"""
Sets the state of this ArmiObject.
Notes
-----
This ArmiObject may have lost a reference to its parent. If the parent was also
pickled (serialized), then the parent should update the ``.parent`` attribute
during its own ``__setstate__``. That means within the context of
``__setstate__`` one should not rely upon ``self.parent``.
"""
self.__dict__.update(state)
if self.spatialGrid is not None:
self.spatialGrid.armiObject = self
# Spatial locators also get disassociated with their grids when detached;
# make sure they get hooked back up
for c in self:
c.spatialLocator.associate(self.spatialGrid)
# now "reattach" children
for c in self:
c.parent = self
def __repr__(self):
return f"<{self.__class__.__name__}: {self.name}>"
def __format__(self, spec):
return format(str(self), spec)
def __bool__(self):
"""
Flag that says this is non-zero in a boolean context.
Notes
-----
The default behavior for ``not [obj]`` that has a ``__len__`` defined is to see
if the length is zero. However, for these composites, we'd like Assemblies, etc.
to be considered non-zero even if they don't have any blocks. This is important
for parent resolution, etc. If one of these objects exists, it is non-zero,
regardless of its contents.
"""
return True
def __add__(self, other):
"""Return a list of all children in this and another object."""
return self.getChildren() + other.getChildren()
@property
def nuclideBases(self):
from armi.reactor.reactors import Reactor
r = self.getAncestor(lambda c: isinstance(c, Reactor))
if r:
return r.nuclideBases
else:
return nuclideBases.nuclideBases
def duplicate(self):
"""
Make a clean copy of this object.
Warning
-------
Be careful with inter-object dependencies. If one object contains a reference to another
object which contains links to the entire hierarchical tree, memory can fill up rather
rapidly. Weak references are designed to help with this problem.
"""
raise NotImplementedError
def clearCache(self):
"""Clear the cache so all new values are recomputed."""
self.cached = {}
for child in self:
child.clearCache()
def _getCached(self, name):
"""
Obtain a value from the cache.
Cached values can be used to temporarily store frequently read but long-to-compute values.
The practice is generally discouraged because it's challenging to make sure to properly
invalidate the cache when the state changes.
"""
return self.cached.get(name, None)
def _setCache(self, name, val):
"""
Set a value in the cache.
See Also
--------
_getCached : returns a previously-cached value
"""
self.cached[name] = val
def copyParamsFrom(self, other):
"""
Overwrite this object's params with other object's.
Parameters
----------
other : ArmiObject
The object to copy params from
"""
self.p = other.p.__class__()
for p, val in other.p.items():
self.p[p] = val
def updateParamsFrom(self, new):
"""
Update this object's params with a new object's.
Parameters
----------
new : ArmiObject
The object to copy params from
"""
for paramName, val in new.p.items():
self.p[paramName] = val
def iterChildren(
self,
deep=False,
generationNum=1,
predicate: Optional[Callable[["ArmiObject"], bool]] = None,
) -> Iterator["ArmiObject"]:
"""Iterate over children of this object."""
raise NotImplementedError()
def getChildren(self, deep=False, generationNum=1, includeMaterials=False) -> list["ArmiObject"]:
"""Return the children of this object."""
raise NotImplementedError()
def iterChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=False) -> Iterator["ArmiObject"]:
"""Produce an iterator of children that have given flags."""
return self.iterChildren(predicate=lambda o: o.hasFlags(typeSpec, exactMatch))
def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=False) -> list["ArmiObject"]:
"""Get all children that have given flags."""
return list(self.iterChildrenWithFlags(typeSpec, exactMatch))
def iterChildrenOfType(self, typeName: str) -> Iterator["ArmiObject"]:
"""Iterate over children that have a specific input type name."""
return self.iterChildren(predicate=lambda o: o.getType() == typeName)
def getChildrenOfType(self, typeName: str) -> list["ArmiObject"]:
"""Produce a list of children that have a specific input type name."""
return list(self.iterChildrenOfType(typeName))
def getComponents(self, typeSpec: TypeSpec = None, exact=False):
"""
Return all armi.reactor.component.Component within this Composite.
Parameters
----------
typeSpec : TypeSpec
Component flags. Will restrict Components to specific ones matching the
flags specified.
exact : bool, optional
Only match exact component labels (names). If True, 'coolant' will not match
'interCoolant'. This has no impact if compLabel is None.
Returns
-------
list of Component
items matching compLabel and exact criteria
"""
raise NotImplementedError()
def iterComponents(self, typeSpec: TypeSpec = None, exact=False):
"""Yield components one by one in a generator."""
raise NotImplementedError()
def doChildrenHaveFlags(self, typeSpec: TypeSpec, deep=False):
"""
Generator that yields True if the next child has given flags.
Parameters
----------
typeSpec : TypeSpec
Requested type of the child
"""
for c in self.getChildren(deep):
if c.hasFlags(typeSpec, exact=False):
yield True
else:
yield False
def containsAtLeastOneChildWithFlags(self, typeSpec: TypeSpec):
"""
Return True if any of the children are of a given type.
Parameters
----------
typeSpec : TypeSpec
Requested type of the children
See Also
--------
self.doChildrenHaveFlags
self.containsOnlyChildrenWithFlags
"""
return any(self.doChildrenHaveFlags(typeSpec))
def containsOnlyChildrenWithFlags(self, typeSpec: TypeSpec):
"""
Return True if all of the children are of a given type.
Parameters
----------
typeSpec : TypeSpec
Requested type of the children
See Also
--------
self.doChildrenHaveFlags
self.containsAtLeastOneChildWithFlags
"""
return all(self.doChildrenHaveFlags(typeSpec))
def copyParamsToChildren(self, paramNames):
"""
Copy param values in paramNames to all children.
Parameters
----------
paramNames : list
List of param names to copy to children
"""
for paramName in paramNames:
myVal = self.p[paramName]
for c in self:
c.p[paramName] = myVal
@classmethod
def getParameterCollection(cls):
"""
Return a new instance of the specific ParameterCollection type associated with this object.
This has the same effect as ``obj.paramCollectionType()``. Getting a new
instance through a class method like this is useful in situations where the
``paramCollectionType`` is not a top-level object and therefore cannot be
trivially pickled. Since we know that by the time we want to make any instances
of/unpickle a given ``ArmiObject``, such a class attribute will have been
created and associated. So, we use this top-level method to dig
dynamically down to the underlying parameter collection type.
.. impl:: Composites (and all ARMI objects) have parameter collections.
:id: I_ARMI_CMP_PARAMS
:implements: R_ARMI_CMP_PARAMS
This class method allows a user to obtain the
``paramCollection`` object, which is the object containing the interface for
all parameters of an ARMI object.
See Also
--------
:py:meth:`armi.reactor.parameters.parameterCollections.ParameterCollection.__reduce__`
"""
return cls.paramCollectionType()
def getParamNames(self):
"""
Get a list of parameters keys that are available on this object.
Will not have any corner, edge, or timenode dependence.
"""
return sorted(k for k in self.p.keys() if not isinstance(k, tuple))
def nameContains(self, s):
"""
True if s is in this object's name (eg. nameContains('fuel')==True for 'testfuel'.
Notes
-----
Case insensitive (all gets converted to lower)
"""
name = self.name.lower()
if isinstance(s, list):
return any(n.lower() in name for n in s)
else:
return s.lower() in name
def getName(self):
"""Get composite name."""
return self.name
def setName(self, name):
self.name = name
def hasFlags(self, typeID: TypeSpec, exact=False):
"""
Determine if this object is of a certain type.
.. impl:: Composites have queryable flags.
:id: I_ARMI_CMP_FLAG0
:implements: R_ARMI_CMP_FLAG
This method queries the flags (i.e. the ``typeID``) of the Composite for a
given type, returning a boolean representing whether or not the candidate
flag is present in this ArmiObject. Candidate flags cannot be passed as a
``string`` type and must be of a type ``Flag``. If no flags exist in the
object then ``False`` is returned.
If a list of flags is provided, then all input flags will be
checked against the flags of the object. If exact is ``False``, then the
object must have at least one of candidates exactly. If it is ``True`` then
the object flags and candidates must match exactly.
Parameters
----------
typeID : TypeSpec
Flags to test the object against, to see if it contains them. If a list is
provided, each element is treated as a "candidate" set of flags. Return True
if any of candidates match. When exact is True, the object must match one of
the candidates exactly. If exact is False, the object must have at least the
flags contained in a candidate for that candidate to be a match; extra flags
on the object are permitted. None matches all objects if exact is False, or
no objects if exact is True.
exact : bool, optional
Require the type of the object to fully match the provided typeID(s)
Returns
-------
hasFlags : bool
True if this object is in the typeID list.
Notes
-----
Type comparisons use bitwise comparisons using valid flags.
If you have an 'inner control' assembly, then this will evaluate True for the
INNER | CONTROL flag combination. If you just want all FUEL, simply use FUEL
with no additional qualifiers. For more complex comparisons, use bitwise
operations.
Always returns true if typeID is none and exact is False, allowing for default
parameters to be passed in when the method does not care about the object type.
If the typeID is none and exact is True, this will always return False.
Examples
--------
If you have an object with the ``INNER``, ``DRIVER``, and ``FUEL`` flags, then
>>> obj.getType()
[some integer]
>>> obj.hasFlags(Flags.FUEL)
True
>>> obj.hasFlags(Flags.INNER | Flags.DRIVER | Flags.FUEL)
True
>>> obj.hasFlags(Flags.OUTER | Flags.DRIVER | Flags.FUEL)
False
>>> obj.hasFlags(Flags.INNER | Flags.FUEL)
True
>>> obj.hasFlags(Flags.INNER | Flags.FUEL, exact=True)
False
>>> obj.hasFlags([Flags.INNER | Flags.DRIVER | Flags.FUEL, Flags.OUTER | Flags.DRIVER | Flags.FUEL], exact=True)
False
"""
if not typeID:
return not exact
if isinstance(typeID, str):
raise TypeError("Must pass Flags, or an iterable of Flags; Strings are no longer supported")
elif not isinstance(typeID, Flags):
# list behavior gives a spec1 OR spec2 OR ... behavior.
return any(self.hasFlags(typeIDi, exact=exact) for typeIDi in typeID)
if not self.p.flags:
# default still set, or null flag. Do down here so we get proper error
# handling of invalid typeSpecs
return False
if exact:
# all bits must be identical for exact match
return self.p.flags == typeID
# all bits that are 1s in the typeID must be present
return self.p.flags & typeID == typeID
def getType(self):
"""Return the object type."""
return self.p.type
def setType(self, typ, flags: Optional[Flags] = None):
"""
Set the object type.
.. impl:: Composites have modifiable flags.
:id: I_ARMI_CMP_FLAG1
:implements: R_ARMI_CMP_FLAG
This method allows for the setting of flags parameter of the Composite.
Parameters
----------
typ : str
The desired "type" for the object. Type describes the general class of the
object, and typically corresponds to the name of the blueprint that created
it.
flags : Flags, optional
The set of Flags to apply to the object. If these are omitted, then Flags
will be derived from the ``typ``.
Warning
-------
We are in the process of developing more robust definitions for things like
"name" and "type". "type" will generally refer to the name of the blueprint that
created a particular object. When present, a "name" will refer to a specific
instance of an object of a particular "type". Think unique names for each
assembly in a core, even if they are all created from the same blueprint and
therefore have the same "type". When this work is complete, it will be strongly
discouraged, or even disallowed to change the type of an object after it has
been created, and ``setType()`` may be removed entirely.
"""
self.p.flags = flags or Flags.fromStringIgnoreErrors(typ)
self.p.type = typ
def getVolume(self):
return sum(child.getVolume() for child in self)
def getArea(self, cold=False):
return sum(child.getArea(cold) for child in self)
def _updateVolume(self):
"""Recompute and store volume."""
children = self.getChildren()
# Derived shapes must come last so we temporarily change the order if we
# have one.
from armi.reactor.components import DerivedShape
for child in children[:]:
if isinstance(child, DerivedShape):
children.remove(child)
children.append(child)
for child in children:
child._updateVolume()
def getVolumeFractions(self):
"""
Return volume fractions of each child.
Sets volume or area of missing piece (like coolant) if it exists. Caching would
be nice here.
Returns
-------
fracs : list
list of (component, volFrac) tuples
See Also
--------
test_block.Block_TestCase.test_consistentAreaWithOverlappingComponents
Notes
-----
void areas can be negative in gaps between fuel/clad/liner(s), but these
negative areas are intended to account for overlapping positive areas to insure
the total area of components inside the clad is accurate. See
test_block.Block_TestCase.test_consistentAreaWithOverlappingComponents
"""
children = self.getChildren()
numerator = [c.getVolume() for c in children]
denom = sum(numerator)
if denom == 0.0:
numerator = [c.getArea() for c in children]
denom = sum(numerator)
fracs = [(ci, nu / denom) for ci, nu in zip(children, numerator)]
return fracs
def getVolumeFraction(self):
"""Return the volume fraction that this object takes up in its parent."""
if self.parent is not None:
for child, frac in self.parent.getVolumeFractions():
if child is self:
return frac
raise ValueError(f"No parent is defined for {self}. Cannot compute its volume fraction.")
def getMaxArea(self):
"""
The maximum area of this object if it were totally full.
See Also
--------
armi.reactor.blocks.HexBlock.getMaxArea
"""
raise NotImplementedError()
def getMass(self, nuclideNames: Union[None, str, list[str]] = None) -> float:
"""
Determine the mass in grams of nuclide(s) and/or elements in this object.
.. impl:: Return mass of composite.
:id: I_ARMI_CMP_GET_MASS
:implements: R_ARMI_CMP_GET_MASS
This method allows for the querying of the mass of a Composite.
If the ``nuclideNames`` argument is included, it will filter for the mass
of those nuclide names and provide the sum of the mass of those nuclides.
Parameters
----------
nuclideNames
The nuclide/element specifier to get the mass of in the object.
If omitted, total mass is returned.
Returns
-------
mass : float
The mass in grams.
"""
return sum(c.getMass(nuclideNames=nuclideNames) for c in self)
def getMassFrac(self, nucName):
"""
Get the mass fraction of a nuclide.
Notes
-----
If you need multiple mass fractions, use ``getMassFracs``.
"""
nuclideNames = self._getNuclidesFromSpecifier(nucName)
massFracs = self.getMassFracs()
return sum(massFracs.get(nucName, 0.0) for nucName in nuclideNames)
def getMicroSuffix(self):
raise NotImplementedError(
f"Cannot get the suffix on {type(self)} objects. Only certain subclasses"
" of composite such as Blocks or Components have the concept of micro suffixes."
)
def _getNuclidesFromSpecifier(self, nucSpec: Union[None, str, list[str]]):
"""
Convert a nuclide specification to a list of valid nuclide/element keys.
nucSpec : nuclide specifier
Can be a string name of a nuclide or element, or a list of such strings.
This might get Zr isotopes when ZR is passed in if they exist, or it will get elemental ZR if that exists. When
expanding elements, all known nuclides are returned, not just the natural ones.
"""
allNuclidesHere = self.getNuclides()
if nucSpec is None:
return allNuclidesHere
elif isinstance(nucSpec, (str)):
nuclideNames = [nucSpec]
elif isinstance(nucSpec, list):
nuclideNames = nucSpec
else:
raise TypeError(f"nucSpec={nucSpec} is an invalid specifier. It is a {type(nucSpec)}")
# expand elementals if appropriate.
convertedNucNames = []
for nucName in nuclideNames:
if nucName in allNuclidesHere:
convertedNucNames.append(nucName)
continue
try:
# Need all nuclide bases, not just natural isotopics because, e.g. PU
# has no natural isotopics!
nucs = [
nb.name
for nb in self.nuclideBases.elements.bySymbol[nucName].nuclides
if not isinstance(nb, nuclideBases.NaturalNuclideBase)
]
convertedNucNames.extend(nucs)
except KeyError:
convertedNucNames.append(nucName)
return sorted(set(convertedNucNames))
def getMassFracs(self):
"""
Get mass fractions of all nuclides in object.
Ni [1/cm3] * Ai [g/mole] ~ mass
"""
numDensities = self.getNumberDensities()
return densityTools.getMassFractions(numDensities)
def setMassFrac(self, nucName, val):
"""
Adjust the composition of this object so the mass fraction of nucName is val.
See Also
--------
setMassFracs : efficiently set multiple mass fractions at the same time.
"""
self.setMassFracs({nucName: val})
def setMassFracs(self, massFracs):
r"""
Apply one or more adjusted mass fractions.
This will adjust the total mass of the object, as the mass of everything
designated will change, while anything else will not.
.. math::
m_i = \frac{M_i}{\sum_j(M_j)}
(M_{j \ne i} + M_i) m_i = M_i
\frac{m_i M_{j \ne i}}{1-m_i} = M_i
\frac{m_i M_{j \ne i}}{V(1-m_i)} = M_i/V = m_i \rho
N_i = \frac{m_i \rho N_A}{A_i}
N_i = \frac{m_i M_{j \ne i} N_A}{V (1-m_i) {A_i}}
\frac{M_{j \ne i}}{V} = m_{j \ne i} \rho
m_{j \ne i} = 1 - m_i
Notes
-----
You can't just change one mass fraction though, you have scale all others to
fill the remaining frac.
Parameters
----------
massFracs: dict
nucName : new mass fraction pairs.
"""
rho = self.density()
if not rho:
raise ValueError(f"Cannot set mass fractions on {self} because the mass density is zero.")
oldMassFracs = self.getMassFracs()
totalFracSet = 0.0
for nucName, massFrac in massFracs.items():
self.setNumberDensity(
nucName,
(massFrac * rho * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM / nucDir.getAtomicWeight(nucName)),
)
if nucName in oldMassFracs:
del oldMassFracs[nucName]
totalFracSet += massFrac
totalOther = sum(oldMassFracs.values())
if totalOther:
# we normalize the remaining mass fractions so their concentrations relative
# to each other stay constant.
normalizedOtherMassFracs = {nucNameOther: val / totalOther for nucNameOther, val in oldMassFracs.items()}
for nucNameOther, massFracOther in normalizedOtherMassFracs.items():
self.setNumberDensity(
nucNameOther,
(
(1.0 - totalFracSet)
* massFracOther
* rho
* units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
/ nucDir.getAtomicWeight(nucNameOther)
),
)
def adjustMassFrac(
self,
nuclideToAdjust=None,
elementToAdjust=None,
nuclideToHoldConstant=None,
elementToHoldConstant=None,
val=0.0,
):
r"""
Set the initial Zr mass fraction while maintaining Uranium enrichment, but general purpose.
Parameters
----------
nuclideToAdjust : str, optional
The nuclide name to adjust
elementToAdjust : str, optional
The element to adjust. All isotopes in this element will adjust
nuclideToHoldconstant : str, optional
A nuclide to hold constant
elementToHoldConstant : str
Same
val : float
The value to set the adjust mass fraction to be.
Notes
-----
If you use this for two elements one after the other, you will probably get
something wrong. For instance, if you have U-10Zr and add Pu at 10% mass
fraction, the Zr fraction will drop below 10% of the total. The U-Zr fractions
will remain constant though. So this is mostly useful if you have U-10Zr and
want to change it to U-5Zr.
Theory:
Mass fraction of each nuclide to be adjusted = Ai where A1+A2+A...+AI = A
Mass fraction of nuclides to be held constant = Ci where sum = C
Mass fraction of other nuclides is Oi, sum = O
new value for A is v
A+C+O = 1.0
A'=v. If A>0, then A'=A*f1=v where f1 = v/A
If A=0, then Ai' = v/len(A), distributing the value evenly among isotopes
Now, to adjust the other nuclides, we know
A'+C+O' = 1.0 , or v+C+O' = 1.0
So, O'= 1.0-v-C
We can scale each Oi evenly by multiplying by the factor f2
Oi' = Oi * (1-C-v)/O = Oi * f2 where f2= (1-C-v)
See Also
--------
setMassFrac
getMassFrac
"""
self.clearCache() # don't keep densities around or anything.
if val > 1.0 or val < 0:
raise ValueError(f"Invalid mass fraction {val} for {nuclideToAdjust}/{elementToAdjust} in {self.getName()}")
if not nuclideToAdjust and not elementToAdjust:
raise TypeError("Must provide a nuclide or element to adjust to adjustMassFrac")
# sum of other nuclide mass fractions before change is Y
# need Yx+newZr = 1.0 where x is a scaling factor
# so x=(1-newZr)/Y
# determine nuclides to hold constant
nuclides = set(self.getNuclides())
if nuclideToHoldConstant or elementToHoldConstant:
# note that if these arguments are false, you'll get ALL nuclides in the
# material use material.getNuclides to get only non-zero ones. use
# nucDir.getNuclides to get all. Intersect with current nuclides to
# eliminate double counting of element/isotopes
constantNuclides = set(
nucDir.getNuclideNames(nucName=nuclideToHoldConstant, elementSymbol=elementToHoldConstant)
).intersection(nuclides)
constantSum = sum(self.getMassFrac(nucName) for nucName in constantNuclides)
else:
constantNuclides = []
constantSum = 0.0
# determine which nuclides we're adjusting.
# Rather than calling this material's getNuclides method, we call the
# nucDirectory to do this. this way, even zeroed-out nuclides will get in the mix
adjustNuclides = set(
nucDir.getNuclideNames(nucName=nuclideToAdjust, elementSymbol=elementToAdjust)
).intersection(nuclides)
# get original mass frac A of those to be adjusted.
A = sum(self.getMassFrac(ni) for ni in adjustNuclides)
factor1 = val / A if A else None
# set the ones we're adjusting to their given value.
numNucs = len(adjustNuclides)
newA = 0.0
newMassFracs = {}
for nuc in adjustNuclides:
if factor1 is None:
# this is for when adjust nuclides have zero mass fractions. Like Zr.
# In this case, if there are multiple nuclides, we will distribute them
# evenly because we have no other indication of how to adjust them.
newMassFrac = val / numNucs
else:
# this is for when the nuclides we're adjusting already exist
# with non-zero mass fractions could be Pu vector.
newMassFrac = self.getMassFrac(nuc) * factor1
newA += newMassFrac
newMassFracs[nuc] = newMassFrac
if nuc == "ZR":
# custom parameter only set here to determine how to behave for UZr
# density, linear expansion. Can't let it roam with each mass frac
# 'cause then the density roams too and there are "oscillations"
self.zrFrac = newMassFrac
# error checking.
if abs(newA - val) > 1e-10:
runLog.error(f"Adjust Mass fraction did not adjust {adjustNuclides} from {A} to {val}. It got to {newA}")
raise RuntimeError("Failed to adjust mass fraction.")
# determine the mass fraction of the nuclides that will be adjusted to
# accommodate the requested change
othersSum = 1.0 - A - constantSum
if not othersSum:
# no others to be modified.
factor2 = 1.0
else:
# use newA rather than val
factor2 = (1.0 - newA - constantSum) / othersSum
# change all the other nuclides using f2 factor
for nuc in self.getNuclides():
if nuc not in adjustNuclides and nuc not in constantNuclides:
newMassFracs[nuc] = self.getMassFrac(nuc) * factor2
self.setMassFracs(newMassFracs)
def adjustMassEnrichment(self, massFraction):
"""
Adjust the enrichment of this object.
If it's Uranium, enrichment means U-235 fraction.
If it's Boron, enrichment means B-10 fraction, etc.
Parameters
----------
newEnrich : float
The new enrichment as a fraction.
"""
raise NotImplementedError
def getNumberDensity(self, nucName):
"""
Return the number density of a nuclide in atoms/barn-cm.
.. impl:: Get number density for a specific nuclide
:id: I_ARMI_CMP_NUC0
:implements: R_ARMI_CMP_NUC
This method queries the number density
of a specific nuclide within the Composite. It invokes the
``getNuclideNumberDensities`` method for just the requested nuclide.
Notes
-----
This can get called very frequently and has to do volume computations so should
use some kind of caching that is invalidated by any temperature, composition,
etc. changes. Even with caching the volume calls are still somewhat expensive so
prefer the methods in see also.
See Also
--------
ArmiObject.getNuclideNumberDensities: More efficient for >1 specific nuc density is needed.
ArmiObject.getNumberDensities: More efficient for when all nucs in object is needed.
"""
return self.getNuclideNumberDensities([nucName])[0]
def getNuclideNumberDensities(self, nucNames):
"""Return a list of number densities in atoms/barn-cm for the nuc names requested.
.. impl:: Get number densities for specific nuclides.
:id: I_ARMI_CMP_NUC1
:implements: R_ARMI_CMP_NUC
This method provides the capability to query the volume weighted number
densities for a list of nuclides within a given Composite. It provides the
result in units of atoms/barn-cm. The volume weighting is accomplished by
multiplying the number densities within each child Composite by the volume
of the child Composite and dividing by the total volume of the Composite.
"""
volumes = np.array([c.getVolume() / (c.parent.getSymmetryFactor() if c.parent else 1.0) for c in self]) # c x 1
totalVol = volumes.sum()
if totalVol == 0.0:
# there are no children so no volume or number density
return [0.0] * len(nucNames)
nucDensForEachComp = np.array([c.getNuclideNumberDensities(nucNames) for c in self]) # c x n
return volumes.dot(nucDensForEachComp) / totalVol
def _getNdensHelper(self):
"""
Return a number densities dict with unexpanded lfps.
Notes
-----
This is implemented more simply on the component level.
"""
nucNames = self.getNuclides()
return dict(zip(nucNames, self.getNuclideNumberDensities(nucNames)))
def getNumberDensities(self, expandFissionProducts=False):
"""
Retrieve the number densities in atoms/barn-cm of all nuclides (or those requested) in the object.
.. impl:: Number density of composite is retrievable.
:id: I_ARMI_CMP_GET_NDENS
:implements: R_ARMI_CMP_GET_NDENS
This method provides a way for retrieving the number densities
of all nuclides within the Composite. It does this by leveraging the
``_getNdensHelper`` method, which invokes the ``getNuclideNumberDensities``
method. This method considers the nuclides within each child Composite of
this composite (if they exist). If the ``expandFissionProducts`` flag is
``True``, then the lumped fission products are expanded to include their
constituent elements via the ``_expandLFPs`` method.
Parameters
----------
expandFissionProducts : bool (optional)
expand the fission product number densities
Returns
-------
numberDensities : dict
nucName keys, number density values (atoms/bn-cm)
"""
numberDensities = self._getNdensHelper()
if expandFissionProducts:
return self._expandLFPs(numberDensities)
return numberDensities
def _expandLFPs(self, numberDensities):
"""
Expand the LFPs on the numberDensities dictionary using this composite's
lumpedFissionProductCollection.
"""
lfpCollection = self.getLumpedFissionProductCollection()
if lfpCollection: # may not have lfps in non-fuel
lfpDensities = lfpCollection.getNumberDensities(self)
numberDensities = {
nucName: numberDensities.get(nucName, 0.0) + lfpDensities.get(nucName, 0.0)
for nucName in set(numberDensities) | set(lfpDensities)
}
# remove LFPs from the result
for lfpName in lfpCollection:
numberDensities.pop(lfpName, None)
else:
lfpMass = sum(
dens
for name, dens in numberDensities.items()
if isinstance(self.nuclideBases.byName[name], nuclideBases.LumpNuclideBase)
)
if lfpMass:
raise RuntimeError(
f"Composite {self} is attempting to expand lumped fission products, but does not have "
"an lfpCollection."
)
return numberDensities
def getChildrenWithNuclides(self, nucNames):
"""Return children that contain any nuclides in nucNames."""
nucNames = set(nucNames) # only convert to set once
return [child for child in self if nucNames.intersection(child.getNuclides())]
def getAncestor(self, fn):
"""
Return the first ancestor that satisfies the supplied predicate.
Parameters
----------
fn : Function-like object
The predicate used to test the validity of an ancestor. Should return true
if the ancestor satisfies the caller's requirements
"""
if fn(self):
return self
if self.parent is None:
return None
else:
return self.parent.getAncestor(fn)
def getAncestorAndDistance(self, fn, _distance=0) -> Optional[Tuple["ArmiObject", int]]:
"""
Return the first ancestor that satisfies the supplied predicate, along with how
many levels above self the ancestor lies.
Parameters
----------
fn : Function-like object
The predicate used to test the validity of an ancestor. Should return true
if the ancestor satisfies the caller's requirements
"""
if fn(self):
return self, _distance
if self.parent is None:
return None
else:
return self.parent.getAncestorAndDistance(fn, _distance + 1)
def getAncestorWithFlags(self, typeSpec: TypeSpec, exactMatch=False):
"""
Return the first ancestor that matches the passed flags.
Parameters
----------
typeSpec : TypeSpec
A collection of flags to match on candidate parents
exactMatch : bool
Whether the flags match should be exact
Returns
-------
armi.composites.ArmiObject
the first ancestor up the chain of parents that matches the passed flags
See Also
--------
ArmiObject.hasFlags()
"""
if self.hasFlags(typeSpec, exact=exactMatch):
return self
if self.parent is None:
return None
else:
return self.parent.getAncestorWithFlags(typeSpec, exactMatch=exactMatch)
def getTotalNDens(self):
"""
Return the total number density of all atoms in this object.
Returns
-------
nTot : float
Total ndens of all nuclides in atoms/bn-cm. Not homogenized.
"""
nFPsPerLFP = fissionProductModel.NUM_FISSION_PRODUCTS_PER_LFP # LFPs count as two! Big deal in non BOL cases.
return sum(dens * (nFPsPerLFP if "LFP" in name else 1.0) for name, dens in self.getNumberDensities().items())
def setNumberDensity(self, nucName, val):
"""
Set the number density of this nuclide to this value.
This distributes atom density evenly across all children that contain nucName.
If the nuclide doesn't exist in any of the children, then that's actually an
error. This would only happen if some unnatural nuclide like Pu239 built up in
fresh UZr. That should be anticipated and dealt with elsewhere.
"""
activeChildren = self.getChildrenWithNuclides({nucName})
if not activeChildren:
activeVolumeFrac = 1.0
if val:
raise ValueError(
f"The nuclide {nucName} does not exist in any children of {self}; "
f"cannot set its number density to {val}. The nuclides here are: {self.getNuclides()}"
)
else:
activeVolumeFrac = sum(vf for ci, vf in self.getVolumeFractions() if ci in activeChildren)
dehomogenizedNdens = val / activeVolumeFrac # scale up to dehomogenize on children.
for child in activeChildren:
child.setNumberDensity(nucName, dehomogenizedNdens)
def setNumberDensities(self, numberDensities):
"""
Set one or more multiple number densities. Reset any non-listed nuclides to 0.0.
Parameters
----------
numberDensities : dict
nucName: ndens pairs.
Notes
-----
We'd like to not have to call setNumberDensity for each nuclide because we don't
want to call ``getVolumeFractions`` for each nuclide (it's inefficient).
"""
numberDensities.update({nuc: 0.0 for nuc in self.getNuclides() if nuc not in numberDensities})
self.updateNumberDensities(numberDensities)
def updateNumberDensities(self, numberDensities):
"""
Set one or more multiple number densities. Leaves unlisted number densities alone.
This changes a nuclide number density only on children that already have that
nuclide, thereby allowing, for example, actinides to stay in the fuel component
when setting block-level values.
The complication is that various number densities are distributed among various
components. This sets the number density for each nuclide evenly across all
components that contain it.
Parameters
----------
numberDensities : dict
nucName: ndens pairs.
"""
children, volFracs = zip(*self.getVolumeFractions())
childNucs = tuple(set(child.getNuclides()) for child in children)
allDehomogenizedNDens = collections.defaultdict(dict)
# compute potentially-different homogenization factors for each child. evenly
# distribute entire number density over the subset of active children.
for nuc, dens in numberDensities.items():
# get "active" indices, i.e., indices of children containing nuc
# NOTE: this is one of the rare instances in which (imo), using explicit
# indexing clarifies subsequent code since it's not necessary to zip +
# filter + extract individual components (just extract by filtered index).
indiciesToSet = tuple(i for i, nucsInChild in enumerate(childNucs) if nuc in nucsInChild)
if not indiciesToSet:
if dens == 0:
# density is zero, skip
continue
# This nuc doesn't exist in any children but is to be set.
# Evenly distribute it everywhere.
childrenToSet = children
dehomogenizedNDens = dens / sum(volFracs)
else:
childrenToSet = tuple(children[i] for i in indiciesToSet)
dehomogenizedNDens = dens / sum(volFracs[i] for i in indiciesToSet)
for child in childrenToSet:
allDehomogenizedNDens[child][nuc] = dehomogenizedNDens
# apply the child-dependent ndens vectors to the children
for child, ndens in allDehomogenizedNDens.items():
child.updateNumberDensities(ndens)
def changeNDensByFactor(self, factor):
"""Change the number density of all nuclides within the object by a multiplicative factor."""
densitiesScaled = {nuc: val * factor for nuc, val in self.getNumberDensities().items()}
self.setNumberDensities(densitiesScaled)
# Update detailedNDens
if self.p.detailedNDens is not None:
self.p.detailedNDens *= factor
# Update pinNDens
if self.p.pinNDens is not None:
self.p.pinNDens *= factor
def clearNumberDensities(self):
"""
Reset all the number densities to nearly zero.
Set to almost zero, so components remember which nuclides are where.
"""
ndens = {nuc: units.TRACE_NUMBER_DENSITY for nuc in self.getNuclides()}
self.setNumberDensities(ndens)
def density(self):
"""Returns the mass density of the object in g/cc."""
density = 0.0
for nuc in self.getNuclides():
density += (
self.getNumberDensity(nuc) * nucDir.getAtomicWeight(nuc) / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
)
return density
def getNumberOfAtoms(self, nucName):
"""Return the number of atoms of nucName in this object."""
numDens = self.getNumberDensity(nucName) # atoms/bn-cm
return numDens * self.getVolume() / units.CM2_PER_BARN
def getLumpedFissionProductCollection(self):
"""
Get collection of LFP objects. Will work for global or block-level LFP models.
Returns
-------
lfps : LumpedFissionProduct
lfpName keys , lfp object values
See Also
--------
armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct : LFP object
"""
return self._lumpedFissionProducts
def setLumpedFissionProducts(self, lfpCollection):
self._lumpedFissionProducts = lfpCollection
def setChildrenLumpedFissionProducts(self, lfpCollection):
for c in self:
c.setLumpedFissionProducts(lfpCollection)
def getFissileMassEnrich(self):
"""Returns the fissile mass enrichment."""
hm = self.getHMMass()
if hm > 0:
return self.getFissileMass() / hm
else:
return 0.0
def getUraniumNumEnrich(self):
"""Returns fissile uranium number fraction."""
uraniumNucs = self._getNuclidesFromSpecifier("U")
totalU = sum(self.getNuclideNumberDensities(uraniumNucs))
if totalU < 1e-10:
return 0.0
fissileU = sum(self.getNuclideNumberDensities(["U233", "U235"]))
return fissileU / totalU
def calcTotalParam(
self,
param,
objs=None,
volumeIntegrated=False,
addSymmetricPositions=False,
typeSpec: TypeSpec = None,
generationNum=1,
calcBasedOnFullObj=False,
):
"""
Sums up a parameter throughout the object's children or list of objects.
Parameters
----------
param : str
Name of the block parameter to sum
objs : iterable, optional
A list of objects to sum over. If none, all children in object will be used
volumeIntegrated : bool, optional
Integrate over volume
addSymmetricPositions : bool, optional
If True, will multiply by the symmetry factor of the core (3 for 1/3 models,
1 for full core models)
typeSpec : TypeSpec
object types to restrict to
generationNum : int, optional
Which generation to consider. 1 means direct children, 2 means children of
children. Default: Just return direct children.
calcBasedOnFullObj : bool, optional
Some assemblies or blocks, such as the center assembly in a third core
model, are not modeled as full assemblies or blocks. In the third core model
objects at these positions are modeled as having 1/3 the volume and thus 1/3
the power. Setting this argument to True will apply the full value of the
parameter as if it was a full block or assembly.
"""
tot = 0.0
if objs is None:
objs = self.getChildren(generationNum=generationNum)
if addSymmetricPositions:
if calcBasedOnFullObj:
raise ValueError(
"AddSymmetricPositions is Incompatible with calcBasedOnFullObj. Will result in double counting."
)
try:
coreMult = self.powerMultiplier
except AttributeError:
coreMult = self.parent.powerMultiplier
if not coreMult:
raise ValueError(f"powerMultiplier is equal to {coreMult}")
else:
coreMult = 1.0
for a in objs:
if not a.hasFlags(typeSpec):
continue
mult = a.getVolume() if volumeIntegrated else 1.0
if calcBasedOnFullObj:
mult *= a.getSymmetryFactor()
tot += a.p[param] * mult
return tot * coreMult
def calcAvgParam(
self,
param,
typeSpec: TypeSpec = None,
weightingParam=None,
volumeAveraged=True,
absolute=True,
generationNum=1,
):
r"""
Calculate the child-wide average of a parameter.
Parameters
----------
param : str
The ARMI block parameter that you want the average from
typeSpec : TypeSpec
The child types that should be included in the calculation. Restrict average
to a certain child type with this parameter.
weightingParam : None or str, optional
An optional block param that the average will be weighted against
volumeAveraged : bool, optional
volume (or height, or area) average this param
absolute : bool, optional
Returns the average of the absolute value of param
generationNum : int, optional
Which generation to average over (1 for children, 2 for grandchildren)
The weighted sum is:
.. math::
\left<\text{x}\right> = \frac{\sum_{i} x_i w_i}{\sum_i w_i}
where :math:`i` is each child, :math:`x_i` is the param value of the i-th child,
and :math:`w_i` is the weighting param value of the i-th child.
Warning
-------
If a param is unset/zero on any of the children, this will be included in the
average and may significantly perturb results.
Returns
-------
float
The average parameter value.
"""
total = 0.0
weightSum = 0.0
for child in self.getChildren(generationNum=generationNum):
if child.hasFlags(typeSpec):
if weightingParam:
weight = child.p[weightingParam]
if weight < 0:
# Just for conservatism, do not allow negative weights.
raise ValueError(f"Weighting value ({weightingParam},{weight}) cannot be negative.")
else:
weight = 1.0
if volumeAveraged:
weight *= child.getVolume()
weightSum += weight
if absolute:
total += abs(child.p[param]) * weight
else:
total += child.p[param] * weight
if not weightSum:
raise ValueError(
f"Cannot calculate {weightingParam}-weighted average of {param} in {self}. "
f"Weights sum to zero. typeSpec is {typeSpec}"
)
return total / weightSum
def getMaxParam(
self,
param,
typeSpec: TypeSpec = None,
absolute=True,
generationNum=1,
returnObj=False,
):
"""
Find the maximum value for the parameter in this container.
Parameters
----------
param : str
block parameter that will be sought.
typeSpec : TypeSpec
restricts the search to cover a variety of block types.
absolute : bool
looks for the largest magnitude value, regardless of sign, default: true
returnObj : bool, optional
If true, returns the child object as well as the value.
Returns
-------
maxVal : float
The maximum value of the parameter asked for
obj : child object
The object that has the max (only returned if ``returnObj==True``)
"""
compartor = lambda x, y: x > y
return self._minMaxHelper(
param,
typeSpec,
absolute,
generationNum,
returnObj,
-float("inf"),
compartor,
)
def getMinParam(
self,
param,
typeSpec: TypeSpec = None,
absolute=True,
generationNum=1,
returnObj=False,
):
"""
Find the minimum value for the parameter in this container.
See Also
--------
getMaxParam : details
"""
compartor = lambda x, y: x < y
return self._minMaxHelper(param, typeSpec, absolute, generationNum, returnObj, float("inf"), compartor)
def _minMaxHelper(
self,
param,
typeSpec: TypeSpec,
absolute,
generationNum,
returnObj,
startingNum,
compartor,
):
"""Helper for getMinParam and getMaxParam."""
maxP = (startingNum, None)
realVal = 0.0
objs = self.getChildren(generationNum=generationNum)
for b in objs:
if b.hasFlags(typeSpec):
try:
val = b.p[param]
except parameters.UnknownParameterError:
# No worries; not all Composite types are guaranteed to have the
# relevant parameter. It might be a good idea to more strongly
# type-check this, perhaps by passing the paramDef,
# rather than its name?
continue
if val is None:
# Neither bigger or smaller than anything (also illegal in Python3)
continue
if absolute:
absVal = abs(val)
else:
absVal = val
if compartor(absVal, maxP[0]):
maxP = (absVal, b)
realVal = val
if returnObj:
return realVal, maxP[1]
else:
return realVal
def getChildParamValues(self, param):
"""Get the child parameter values in a numpy array."""
return np.array([child.p[param] for child in self])
def isFuel(self):
"""True if this is a fuel block."""
return self.hasFlags(Flags.FUEL)
def containsHeavyMetal(self):
"""True if this has HM."""
return any(nucDir.isHeavyMetal(nucName) for nucName in self.getNuclides())
def getNuclides(self):
"""
Determine which nuclides are present in this armi object.
Returns
-------
list
List of nuclide names that exist in this
"""
nucs = set()
for child in self:
nucs.update(child.getNuclides())
return nucs
def getFissileMass(self):
"""Returns fissile mass in grams."""
return self.getMass(nuclideBases.NuclideBase.fissile)
def getHMMass(self):
"""Returns heavy metal mass in grams."""
nucs = []
for nucName in self.getNuclides():
if nucDir.isHeavyMetal(nucName):
nucs.append(nucName)
mass = self.getMass(nucs)
return mass
def getHMMoles(self):
"""
Get the number of moles of heavy metal in this object.
Notes
-----
If an object is on a symmetry line, the volume reported by getVolume
is reduced to reflect that the block is not wholly within the reactor. This
reduction in volume reduces the reported HM moles.
"""
return self.getHMDens() / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * self.getVolume()
def getHMDens(self):
"""
Compute the total heavy metal density of this object.
Returns
-------
hmDens : float
The total heavy metal number (atom) density in atoms/bn-cm.
"""
hmNuclides = [nuclide for nuclide in self.getNuclides() if nucDir.isHeavyMetal(nuclide)]
hmDens = np.sum(self.getNuclideNumberDensities(hmNuclides))
return hmDens
def getFPMass(self):
"""Returns mass of fission products in this block in grams."""
nucs = []
for nucName in self.getNuclides():
if "LFP" in nucName:
nucs.append(nucName)
mass = self.getMass(nucs)
return mass
def getFuelMass(self):
"""Returns mass of fuel in grams."""
return sum((c.getFuelMass() for c in self))
def constituentReport(self):
"""A print out of some pertinent constituent information."""
from armi.utils import iterables
elementz = self.nuclideBases.elements
rows = [["Constituent", "HMFrac", "FuelFrac"]]
columns = [-1, self.getHMMass(), self.getFuelMass()]
for base_ele in ["U", "PU"]:
total = sum([self.getMass(nuclide.name) for nuclide in elementz.bySymbol[base_ele]])
rows.append([base_ele, total, total])
fp_total = self.getFPMass()
rows.append(["FP", fp_total, fp_total])
ma_nuclides = iterables.flatten(
[ele.nuclides for ele in [elementz.byZ[key] for key in elementz.byZ.keys() if key > 94]]
)
ma_total = sum([self.getMass(nuclide.name) for nuclide in ma_nuclides])
rows.append(["MA", ma_total, ma_total])
for i, row in enumerate(rows):
for j, entry in enumerate(row):
try:
percent = entry / columns[j] * 100.0
rows[i][j] = percent or "-"
except ZeroDivisionError:
rows[i][j] = "NaN"
except TypeError:
pass # trying to divide the string name
return "\n".join(["{:<14}{:<10}{:<10}".format(*row) for row in rows])
def getAtomicWeight(self):
r"""
Calculate the atomic weight of this object in g/mole of atoms.
.. warning:: This is not the molecular weight, which is grams per mole of
molecules (grams/gram-molecule). That requires knowledge of the chemical
formula. Don't be surprised when you run this on UO2 and find it to be 90;
there are a lot of Oxygen atoms in UO2.
.. math::
A = \frac{\sum_i N_i A_i }{\sum_i N_i}
"""
numerator = 0.0
denominator = 0.0
numDensities = self.getNumberDensities()
for nucName, nDen in numDensities.items():
atomicWeight = self.nuclideBases.byName[nucName].weight
numerator += atomicWeight * nDen
denominator += nDen
return numerator / denominator
def getMasses(self):
"""
Return a dictionary of masses indexed by their nuclide names.
Notes
-----
Implemented to get number densities and then convert to mass
because getMass is too slow on a large tree.
"""
numDensities = self.getNumberDensities()
vol = self.getVolume()
return {nucName: densityTools.getMassInGrams(nucName, vol, ndens) for nucName, ndens in numDensities.items()}
def getIntegratedMgFlux(self, adjoint=False, gamma=False):
raise NotImplementedError
def getMgFlux(self, adjoint=False, average=False, gamma=False):
"""
Return the multigroup neutron flux in [n/cm^2/s].
The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as
set in the ISOTXS library.
On blocks, it is stored integrated over volume on .p.mgFlux
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
average : bool, optional
If true, will return average flux between latest and previous. Doesn't work
for pin detailed yet
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
flux : np.ndarray
multigroup neutron flux in [n/cm^2/s]
"""
if average:
raise NotImplementedError(
f"{self.__class__} class has no method for producing average MG flux -- tryusing blocks"
)
volume = self.getVolume()
return self.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) / volume
def removeMass(self, nucName, mass):
self.addMass(nucName, -mass)
def addMass(self, nucName, mass):
"""Add mass to a particular nuclide.
Parameters
----------
nucName : str
nuclide name e.g. 'U235'
mass : float
mass in grams of nuclide to be added to this armi Object
"""
volume = self.getVolume()
addedNumberDensity = densityTools.calculateNumberDensity(nucName, mass, volume)
self.setNumberDensity(nucName, self.getNumberDensity(nucName) + addedNumberDensity)
def addMasses(self, masses):
"""
Adds a vector of masses.
Parameters
----------
masses : dict
a dictionary of masses (g) indexed by nucNames (string)
"""
for nucName, mass in masses.items():
if mass:
self.addMass(nucName, mass)
def setMass(self, nucName, mass):
"""
Set the mass in an object by adjusting the ndens of the nuclides.
Parameters
----------
nucName : str
Nuclide name to set mass of
mass : float
Mass in grams to set.
"""
d = calculateNumberDensity(nucName, mass, self.getVolume())
self.setNumberDensity(nucName, d)
def setMasses(self, masses):
"""
Set a vector of masses.
Parameters
----------
masses : dict
a dictionary of masses (g) indexed by nucNames (string)
"""
self.clearNumberDensities()
for nucName, mass in masses.items():
self.setMass(nucName, mass)
def getSymmetryFactor(self):
"""
Return a scaling factor due to symmetry on the area of the object or its children.
See Also
--------
armi.reactor.blocks.HexBlock.getSymmetryFactor : concrete implementation
"""
return 1.0
def getBoundingIndices(self):
"""
Find the 3-D index bounds (min, max) of all children in the spatial grid of this object.
Returns
-------
bounds : tuple
((minI, maxI), (minJ, maxJ), (minK, maxK))
"""
minI = minJ = minK = float("inf")
maxI = maxJ = maxK = -float("inf")
for obj in self:
i, j, k = obj.spatialLocator.getCompleteIndices()
if i >= maxI:
maxI = i
if i <= minI:
minI = i
if j >= maxJ:
maxJ = j
if j <= minJ:
minJ = j
if k >= maxK:
maxK = k
if k <= minK:
minK = k
return ((minI, maxI), (minJ, maxJ), (minK, maxK))
def getComponentNames(self):
r"""
Get all unique component names of this Composite.
Returns
-------
set or str
A set of all unique component names found in this Composite.
"""
return set(c.getName() for c in self.iterComponents())
def getComponentsOfShape(self, shapeClass):
"""
Return list of components in this block of a particular shape.
Parameters
----------
shapeClass : Component
The class of component, e.g. Circle, Helix, Hexagon, etc.
Returns
-------
param : list
List of components in this block that are of the given shape.
"""
return [c for c in self.iterComponents() if isinstance(c, shapeClass)]
def getComponentsOfMaterial(self, material=None, materialName=None):
"""
Return list of components in this block that are made of a particular material.
Only one of the selectors may be used
Parameters
----------
material : armi.materials.material.Material, optional
The material to match
materialName : str, optional
The material name to match.
Returns
-------
componentsWithThisMat : list
"""
if materialName is None:
materialName = material.getName()
else:
assert material is None, "Cannot call with more than one selector. Choose one or the other."
componentsWithThisMat = []
for c in self.iterComponents():
if c.getProperties().getName() == materialName:
componentsWithThisMat.append(c)
return componentsWithThisMat
def hasComponents(self, typeSpec: Union[TypeSpec, List[TypeSpec]], exact=False):
"""
Return true if components matching all TypeSpec exist in this object.
Parameters
----------
typeSpec : Flags or iterable of Flags
Component flags to check for
"""
# Wrap the typeSpec in a tuple if we got a scalar
try:
typeSpec = iter(typeSpec)
except TypeError:
typeSpec = (typeSpec,)
return all(self.getComponents(t, exact) for t in typeSpec)
def getComponentByName(self, name: str) -> "Component":
"""
Gets a particular component from this object, based on its name.
Parameters
----------
name
The blueprint name of the component to return
Returns
-------
Component, c, whose c.name matches name.
"""
components = [c for c in self.iterComponents() if c.name == name]
nComp = len(components)
if nComp == 0:
return None
elif nComp > 1:
raise ValueError(f"More than one component named '{name}' in {self}")
else:
return components[0]
def getComponent(self, typeSpec: TypeSpec, exact: bool = False, quiet: bool = True) -> Optional["Component"]:
"""
Get a particular component from this object.
Be careful with multiple similar names in one object.
Parameters
----------
typeSpec : flags.Flags or list of Flags
The type specification of the component to return
exact : boolean, optional
Demand that the component flags be exactly equal to the typespec. Default: False
quiet : boolean, optional
Log if the component is not found. Default: True
Returns
-------
Component : The component that matches the criteria or None
Raises
------
ValueError: more than one Component matches the typeSpec
"""
results = self.getComponents(typeSpec, exact=exact)
if len(results) == 1:
return results[0]
elif not results:
if not quiet:
runLog.debug(
f"No component matched {typeSpec} in {self}. Returning None",
single=True,
label=f"None component returned instead of {typeSpec}",
)
return None
else:
raise ValueError(f"Multiple components match in {self} match typeSpec {typeSpec}: {results}")
def getNumComponents(self, typeSpec: TypeSpec, exact=False):
"""
Get the number of components that have these flags, taking into account multiplicity. Useful
for getting nPins even when there are pin detailed cases.
Parameters
----------
typeSpec : Flags
Expected flags of the component to get. e.g. Flags.FUEL
Returns
-------
total : int
the number of components of this type in this object, including multiplicity.
"""
total = 0
for c in self.iterComponents(typeSpec, exact):
total += int(c.getDimension("mult"))
return total
def setComponentDimensionsReport(self):
"""Makes a summary of the dimensions of the components in this object."""
reportGroups = []
for c in self.iterComponents():
reportGroups.append(c.setDimensionReport())
return reportGroups
def expandAllElementalsToIsotopics(self):
reactorNucs = self.getNuclides()
for elemental in self.nuclideBases.where(
lambda nb: isinstance(nb, nuclideBases.NaturalNuclideBase) and nb.name in reactorNucs
):
self.expandElementalToIsotopics(elemental)
def expandElementalToIsotopics(self, elementalNuclide):
"""
Expands the density of a specific elemental nuclides to its natural isotopics.
Parameters
----------
elementalNuclide : :class:`armi.nucDirectory.nuclideBases.NaturalNuclide` natural nuclide to
replace.
"""
natName = elementalNuclide.name
for component in self.iterComponents():
elementalDensity = component.getNumberDensity(natName)
if elementalDensity == 0.0:
continue
keepIndex = np.where(component.p.nuclides != natName.encode())[0]
newNuclides = [nuc.decode() for nuc in component.p.nuclides[keepIndex]]
newNDens = component.p.numberDensities[keepIndex]
component.updateNumberDensities(dict(zip(newNuclides, newNDens)), wipe=True)
# add in isotopics
for natNuc in elementalNuclide.getNaturalIsotopics():
component.setNumberDensity(natNuc.name, elementalDensity * natNuc.abundance)
def getAverageTempInC(self, typeSpec: TypeSpec = None, exact=False):
"""Return the average temperature of the ArmiObject in C by averaging all components."""
tempNumerator = 0.0
totalVol = 0.0
for component in self.iterComponents(typeSpec, exact):
vol = component.getVolume()
tempNumerator += component.temperatureInC * vol
totalVol += vol
return tempNumerator / totalVol
def resolveLinkedDims(self, components):
"""Resolve link strings to links on all child components."""
for component in self.iterComponents():
component.resolveLinkedDims(components)
def getDominantMaterial(self, typeSpec: TypeSpec = None, exact=False):
"""
Return the first sample of the most dominant material (by volume) in this object.
Parameters
----------
typeSpec : Flags or iterable of Flags, optional
The types of components to consider (e.g. ``[Flags.FUEL, Flags.CONTROL]``)
exact : bool, optional
Whether or not the TypeSpec is exact
Returns
-------
mat : armi.materials.material.Material
the first instance of the most dominant material (by volume) in this object.
See Also
--------
getComponentsOfMaterial
Gets components that are made of a particular material
gatherMaterialsByVolume
Classifies all materials by volume
"""
return getDominantMaterial([self], typeSpec, exact)
class Composite(ArmiObject):
"""
An ArmiObject that has children.
This is a fundamental ARMI state object that generally represents some piece of the
nuclear reactor that is made up of other smaller pieces. This object can cache
information about its children to help performance.
**Details about spatial representation**
Spatial representation of a ``Composite`` is handled through a combination of the
``spatialLocator`` and ``spatialGrid`` parameters. The ``spatialLocator`` is a numpy
triple representing either:
1. Indices in the parent's ``spatialGrid`` (for lattices, etc.), used when the dtype is int.
2. Coordinates in the parent's universe in cm, used when the dtype is float.
The top parent of any composite must have a coordinate-based ``spatialLocator``. For
example, a Reactor an a Pump should both have coordinates based on how far apart
they are.
The traversal of indices and grids is recursive. The Reactor/Core/Assembly/Block
model is handled by putting a 2-D grid (either Theta-R, Hex, or Cartesian) on the
Core and individual 1-D Z-meshes on the assemblies. Then, Assemblies have 2-D
spatialLocators (i,j,0) and Blocks have 1-D spatiaLocators (0,0,k). These get added
to form the global indices. This way, if an assembly is moved, all the blocks
immediately and naturally move with it. Individual children may have
coordinate-based spatialLocators mixed with siblings in a grid. This allows mixing
grid-representation with explicit representation, often useful in advanced
assemblies and thermal reactors.
The traversal of indices and grids is recursive. The
Reactor/Core/Assembly/Block model is handled by putting a 2-D grid (either
Theta-R, Hex, or Cartesian) on the Core and individual 1-D Z-meshes on the
assemblies. Then, Assemblies have 2-D spatialLocators (i,j,0) and Blocks
have 1-D spatiaLocators (0,0,k). These get added to form the global indices.
This way, if an assembly is moved, all the blocks immediately and naturally
move with it. Individual children may have coordinate-based spatialLocators
mixed with siblings in a grid. This allows mixing grid-representation with
explicit representation, often useful in advanced assemblies and thermal
reactors.
.. impl:: Composites are a physical part of the reactor in a hierarchical data model.
:id: I_ARMI_CMP0
:implements: R_ARMI_CMP
An ARMI reactor model is composed of collections of ARMIObject objects. This
class is a child-class of the ARMIObject class and provides a structure
allowing a reactor model to be composed of Composites.
This class provides various methods to query and modify the hierarchical ARMI
reactor model, including but not limited to, iterating, sorting, and adding or
removing child Composites.
"""
_children: list["Composite"]
def __init__(self, name):
ArmiObject.__init__(self, name)
self.childrenByLocator = {}
self._children = []
def __getitem__(self, index):
return self._children[index]
def __setitem__(self, index, obj):
raise NotImplementedError("Unsafe to insert elements directly")
def __iter__(self):
return iter(self._children)
def __len__(self):
return len(self._children)
def __contains__(self, item):
"""
Membership check.
This does not use quality checks for membership checking because equality
operations can be fairly heavy. Rather, this only checks direct identity
matches.
"""
return id(item) in set(id(c) for c in self._children)
def sort(self):
"""Sort the children of this object."""
# sort the top-level children of this Composite
self._children.sort()
# recursively sort the children below it.
for c in self._children:
if issubclass(c.__class__, Composite):
c.sort()
def index(self, obj):
"""Obtain the list index of a particular child."""
return self._children.index(obj)
def append(self, obj):
"""Append a child to this object."""
self._children.append(obj)
def extend(self, seq):
"""Add a list of children to this object."""
for item in seq:
self.add(item)
def add(self, obj):
"""Add one new child."""
if obj in self:
raise RuntimeError(f"Cannot add {obj} because it has already been added to {self}.")
obj.parent = self
self._children.append(obj)
def remove(self, obj):
"""Remove a particular child."""
obj.parent = None
obj.spatialLocator = obj.spatialLocator.detachedCopy()
self._children.remove(obj)
def moveTo(self, locator):
"""Move to specific location in parent. Often in a grid."""
if locator.grid.armiObject is not self.parent:
raise ValueError(
f"Cannot move {self} to a location in {locator.grid.armiObject}"
", which is not its parent ({self.parent})."
)
self.spatialLocator = locator
def insert(self, index, obj):
"""Insert an object into the list of children at a particular index."""
if obj in self._children:
raise RuntimeError(f"Cannot insert {obj} because it has already been added to {self}.")
obj.parent = self
self._children.insert(index, obj)
def removeAll(self):
"""Remove all children."""
for c in self.getChildren()[:]:
self.remove(c)
def setChildren(self, items):
"""Clear this container and fills it with new children."""
self.removeAll()
for c in items:
self.add(c)
def iterChildren(
self,
deep=False,
generationNum=1,
predicate: Optional[Callable[["Composite"], bool]] = None,
) -> Iterator["Composite"]:
"""Iterate over children objects of this composite.
Parameters
----------
deep : bool, optional
If true, traverse the entire composite tree. Otherwise, go as far as ``generationNum``.
generationNum: int, optional
Produce composites at this depth. A depth of ``1`` includes children of ``self``, ``2``
is children of children, and so on.
predicate: f(Composite) -> bool, optional
Function to check on a composite before producing it. All items in the iteration
will pass this check.
Returns
-------
iterator of Composite
See Also
--------
:meth:`getChildren` produces a list for situations where you need to perform
multiple iterations or do list operations (append, indexing, sorting, containment, etc.)
Composites are naturally iterable. The following are identical::
>>> for child in c.getChildren():
... pass
>>> for child in c.iterChildren():
... pass
>>> for child in c:
... pass
If you do not need any depth-traversal, natural iteration should be sufficient.
The :func:`filter` command may be sufficient if you do not wish to pass a predicate. The following
are identical::
>>> checker = lambda c: len(c.name) % 3
>>> for child in c.getChildren(predicate=checker):
... pass
>>> for child in c.iterChildren(predicate=checker):
... pass
>>> for child in filter(checker, c):
... pass
If you're going to be doing traversal beyond the first generation, this method will help you.
"""
if deep and generationNum > 1:
raise RuntimeError("Cannot get children with a generation number set and the deep flag set")
if predicate is None:
checker = lambda _: True
else:
checker = predicate
yield from self._iterChildren(deep, generationNum, checker)
def _iterChildren(
self, deep: bool, generationNum: int, checker: Callable[["Composite"], bool]
) -> Iterator["Composite"]:
if deep or generationNum == 1:
yield from filter(checker, self)
if deep or generationNum > 1:
for c in self:
yield from c._iterChildren(deep, generationNum - 1, checker)
def iterChildrenWithMaterials(self, *args, **kwargs) -> Iterator:
"""Produce an iterator that also includes any materials found on descendants.
Arguments are forwarded to :meth:`iterChildren` and control the depth of traversal
and filtering of objects.
This is useful for sending state across MPI tasks where you need a more full
representation of the composite tree. Which includes the materials attached
to components.
"""
children = self.iterChildren(*args, **kwargs)
# Each entry is either (c, ) or (c, c.material) if the child has a material attribute
stitched = map(
lambda c: ((c,) if getattr(c, "material", None) is None else (c, c.material)),
children,
)
# Iterator that iterates over each "sub" iterator. If we have ((c0, ), (c1, m1)), this produces a single
# iterator of (c0, c1, m1)
return itertools.chain.from_iterable(stitched)
def getChildren(
self,
deep=False,
generationNum=1,
includeMaterials=False,
predicate: Optional[Callable[["Composite"], bool]] = None,
) -> list["Composite"]:
"""
Return the children objects of this composite.
.. impl:: Composites have children in the hierarchical data model.
:id: I_ARMI_CMP1
:implements: R_ARMI_CMP
This method retrieves all children within a given Composite object. Children of any
generation can be retrieved. This is achieved by visiting all children and calling this
method recursively for each generation requested.
If the method is called with ``includeMaterials``, it will additionally include
information about the material for each child. If a function is supplied as the
``predicate`` argument, then this method will be used to evaluate all children as a
filter to include or not. For example, if the caller of this method only desires
children with a certain flag, or children which only contain a certain material, then
the ``predicate`` function can be used to perform this filtering.
Parameters
----------
deep : boolean, optional
Return all children of all levels.
generationNum : int, optional
Which generation to return. 1 means direct children, 2 means children of children.
Setting this parameter will only return children of this generation, not their parents.
Default: Just return direct children.
includeMaterials : bool, optional
Include the material properties
predicate : callable, optional
An optional unary predicate to use for filtering results. This can be used to request
children of specific types, or with desired attributes. Not all ArmiObjects have the
same methods and members, so care should be taken to make sure that the predicate
executes gracefully in all cases (e.g., use ``getattr(obj, "attribute", None)`` to
access instance attributes). Failure to meet the predicate only affects the object in
question; children will still be considered.
See Also
--------
:meth:`iterChildren` if you do not need to produce a full list, e.g., just iterating
over objects.
Examples
--------
>>> obj.getChildren()
[child1, child2, child3]
>>> obj.getChildren(generationNum=2)
[grandchild1, grandchild2, grandchild3]
>>> obj.getChildren(deep=True)
[child1, child2, child3, grandchild1, grandchild2, grandchild3]
# Assuming that grandchild1 and grandchild3 are Component objects
>>> obj.getChildren(deep=True, predicate=lambda o: isinstance(o, Component))
[grandchild1, grandchild3]
"""
if not includeMaterials:
items = self.iterChildren(deep=deep, generationNum=generationNum, predicate=predicate)
else:
items = self.iterChildrenWithMaterials(deep=deep, generationNum=generationNum, predicate=predicate)
return list(items)
def getComponents(self, typeSpec: TypeSpec = None, exact=False):
"""
Return a list of Component objects within this Composite.
Parameters
----------
typeSpec : TypeSpec
Component flags. Will restrict Components to specific ones matching the flags specified.
exact : bool, optional
Only match exact component labels (names). If True, 'coolant' will not match 'interCoolant'. This has no
impact if typeSpec is None.
Returns
-------
list of Component
items matching typeSpec and exact criteria
"""
return list(self.iterComponents(typeSpec, exact))
def getFirstComponent(self, typeSpec: TypeSpec = None, exact=False):
"""
Returns a single Component object within this Composite.
Parameters
----------
typeSpec : TypeSpec
Component flags. Will restrict Components to specific ones matching the flags specified.
exact : bool, optional
Only match exact component labels (names). If True, 'coolant' will not match 'interCoolant'. This has no
impact if typeSpec is None.
Returns
-------
Component
The first item matching typeSpec and exact criteria
"""
try:
return next(self.iterComponents(typeSpec, exact))
except StopIteration:
raise ValueError(f"No component matches {typeSpec} {exact}")
def iterComponents(self, typeSpec: TypeSpec = None, exact: bool = False) -> Iterator["Component"]:
"""
Return an iterator of armi.reactor.component.Component objects within this Composite.
Parameters
----------
typeSpec : TypeSpec
Component flags. Will restrict Components to specific ones matching the flags specified.
exact : bool, optional
Only match exact component labels (names). If True, 'coolant' will not match
'interCoolant'. This has no impact if typeSpec is None.
Returns
-------
iterator of Component
items matching typeSpec and exact criteria
"""
return (c for child in self for c in child.iterComponents(typeSpec, exact))
def syncMpiState(self):
"""
Synchronize all parameters of this object and all children to all worker nodes over the
network using MPI.
In parallelized runs, if each process has its own copy of the entire reactor hierarchy, this
method synchronizes the state of all parameters on all objects.
.. impl:: Composites can be synchronized across MPI threads.
:id: I_ARMI_CMP_MPI
:implements: R_ARMI_CMP_MPI
Parameters need to be handled properly during parallel code execution. This method
synchronizes all parameters of the composite object across all processes by cycling
through all the children of the Composite and ensuring that their parameters are
properly synchronized. If it fails to synchronize, an error message is displayed which
alerts the user to which Composite has inconsistent data across the processes.
Returns
-------
int
number of parameters synchronized over all components
"""
if context.MPI_SIZE == 1:
return 0
startTime = timeit.default_timer()
# sync parameters...
genItems = itertools.chain(
[self],
self.iterChildrenWithMaterials(deep=True),
)
allComps = [c for c in genItems if hasattr(c, "p")]
sendBuf = [c.p.getSyncData() for c in allComps]
runLog.debug(f"syncMpiState has {len(allComps)} comps")
try:
context.MPI_COMM.barrier() # sync up
allGatherTime = -timeit.default_timer()
allSyncData = context.MPI_COMM.allgather(sendBuf)
allGatherTime += timeit.default_timer()
except:
msg = ["Failure while trying to allgather."]
for ci, compData in enumerate(sendBuf):
if compData is not None:
msg += [f"sendBuf[{ci}]: {compData}"]
runLog.error("\n".join(msg))
raise
# key is (comp, paramName) value is conflicting nodes
errors = collections.defaultdict(list)
syncCount = 0
compsPerNode = {len(nodeSyncData) for nodeSyncData in allSyncData}
if len(compsPerNode) != 1:
raise ValueError(f"The workers have different reactor sizes! comp lengths: {compsPerNode}")
for ci, comp in enumerate(allComps):
if not hasattr(comp, "_syncParameters"):
# materials don't have Parameters to sync
continue
data = (nodeSyncData[ci] for nodeSyncData in allSyncData)
syncCount += comp._syncParameters(data, errors)
if errors:
errorData = sorted(
(str(comp), comp.__class__.__name__, str(comp.parent), paramName, nodes)
for (comp, paramName), nodes in errors.items()
)
message = "Synchronization failed due to overlapping data. Only the first duplicates are listed\n{}".format(
tabulate.tabulate(
errorData,
headers=[
"Composite",
"Composite Type",
"Composite Parent",
"ParameterName",
"NodeRanks",
],
)
)
raise ValueError(message)
self._markSynchronized()
runLog.extra(
f"Synchronized reactor over MPI in {timeit.default_timer() - startTime:.4f} seconds"
f", {allGatherTime:.4f} seconds in MPI allgather. count:{syncCount}"
)
return syncCount
def _syncParameters(self, allSyncData, errors):
"""Ensure no overlap with syncedKeys, use errors to report overlapping data."""
syncedKeys = set()
for nodeRank, nodeSyncData in enumerate(allSyncData):
if nodeSyncData is None:
continue
for key, val in nodeSyncData.items():
if key in syncedKeys:
# Edge Case: a Composite object is flagged as out of sync, and this parameter
# was also globally modified and readjusted to the original value.
curVal = self.p[key]
if isinstance(val, np.ndarray) or isinstance(curVal, np.ndarray):
if (val != curVal).any():
errors[self, key].append(nodeRank)
elif curVal != val:
errors[self, key].append(nodeRank)
runLog.error(f"in {self}, {key} differ ({curVal} != {val})")
continue
syncedKeys.add(key)
self.p[key] = val
self.clearCache()
return len(syncedKeys)
def _markSynchronized(self):
"""
Mark the composite and child parameters as synchronized across MPI.
We clear SINCE_LAST_DISTRIBUTE_STATE so that anything after this point will set the
SINCE_LAST_DISTRIBUTE_STATE flag, indicating it has been modified
SINCE_LAST_DISTRIBUTE_STATE.
"""
paramDefs = set()
items = itertools.chain(
[self],
self.iterChildrenWithMaterials(deep=True),
)
for child in items:
# Materials don't have a "p" / Parameter attribute to sync
if hasattr(child, "p"):
# below reads as: assigned & everything_but(SINCE_LAST_DISTRIBUTE_STATE)
child.p.assigned &= ~parameters.SINCE_LAST_DISTRIBUTE_STATE
paramDefs.add(child.p.paramDefs)
for paramDef in paramDefs:
paramDef.resetAssignmentFlag(parameters.SINCE_LAST_DISTRIBUTE_STATE)
def retainState(self, paramsToApply=None):
"""
Restores a state before and after some operation.
Parameters
----------
paramsToApply : iterable
Parameters that should be applied to the state after existing the state retainer. All
others will be reverted to their values upon entering.
Notes
-----
This should be used in a `with` statement.
"""
return StateRetainer(self, paramsToApply)
def backUp(self):
"""
Create and store a backup of the state.
This needed to be overridden due to linked components which actually have a parameter value
of another ARMI component.
"""
self._backupCache = (self.cached, self._backupCache)
self.cached = {} # don't .clear(), using reference above!
self.p.backUp()
if self.spatialGrid:
self.spatialGrid.backUp()
def restoreBackup(self, paramsToApply):
"""
Restore the parameters from previously created backup.
Parameters
----------
paramsToApply : list of ParmeterDefinitions
restores the state of all parameters not in `paramsToApply`
"""
self.p.restoreBackup(paramsToApply)
self.cached, self._backupCache = self._backupCache
if self.spatialGrid:
self.spatialGrid.restoreBackup()
def getLumpedFissionProductsIfNecessary(self, nuclides=None):
"""Return Lumped Fission Product objects that belong to this object or any of its children."""
if self.requiresLumpedFissionProducts(nuclides=nuclides):
lfps = self.getLumpedFissionProductCollection()
if lfps is None:
for c in self:
return c.getLumpedFissionProductsIfNecessary(nuclides=nuclides)
else:
return lfps
# There are no lumped fission products in the batch so if you use a
# dictionary no one will know the difference
return {}
def getLumpedFissionProductCollection(self):
"""
Get collection of LFP objects. Will work for global or block-level LFP models.
Returns
-------
lfps : object
lfpName keys, lfp object values
See Also
--------
armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct
"""
lfps = ArmiObject.getLumpedFissionProductCollection(self)
if lfps is None:
for c in self:
lfps = c.getLumpedFissionProductCollection()
if lfps is not None:
break
return lfps
def requiresLumpedFissionProducts(self, nuclides=None):
"""True if any of the nuclides in this object are Lumped nuclides."""
if nuclides is None:
nuclides = self.getNuclides()
# ruff: noqa: SIM110
for nucName in nuclides:
if isinstance(self.nuclideBases.byName[nucName], nuclideBases.LumpNuclideBase):
return True
return False
def getIntegratedMgFlux(self, adjoint=False, gamma=False):
"""
Returns the multigroup neutron tracklength in [n-cm/s].
The first entry is the first energy group (fastest neutrons). Each additional group is the
next energy group, as set in the ISOTXS library.
Parameters
----------
adjoint : bool, optional
Return adjoint flux instead of real
gamma : bool, optional
Whether to return the neutron flux or the gamma flux.
Returns
-------
integratedFlux : np.ndarray
multigroup neutron tracklength in [n-cm/s]
"""
integratedMgFlux = np.zeros(1)
for c in self:
mgFlux = c.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma)
if mgFlux is not None:
integratedMgFlux = integratedMgFlux + mgFlux
return integratedMgFlux
def _getReactionRates(self, nucName, nDensity=None):
"""
Wrapper around logic to get reaction rates for a certain nuclide, to handle any errors.
Parameters
----------
nucName : str
nuclide name -- e.g. 'U235'
nDensity : float
number density
Returns
-------
rxnRates : dict
dictionary of reaction rates (rxn/s) for nG, nF, n2n, nA and nP
Notes
-----
If you set nDensity to 1/CM2_PER_BARN this makes 1 group cross section generation easier.
This method is not designed to work on ``Assembly``, ``Core``, or anything higher on the
hierarchy than ``Block``.
"""
from armi.reactor.blocks import Block
from armi.reactor.reactors import Core
if nDensity is None:
nDensity = self.getNumberDensity(nucName)
try:
return self._getReactionRateDict(
nucName,
self.getAncestor(lambda c: isinstance(c, Core)).lib,
self.getAncestor(lambda x: isinstance(x, Block)).getMicroSuffix(),
self.getIntegratedMgFlux(),
nDensity,
)
except AttributeError:
runLog.warning(
f"Object {self} does not belong to a core and so has no reaction rates.",
single=True,
)
return {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0}
except KeyError:
runLog.warning(
f"Attempting to get a reaction rate on an isotope not in the lib {nucName}.",
single=True,
)
return {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0}
def _getReactionRateDict(self, nucName, lib, xsSuffix, mgFlux, nDens):
"""
Helper to get the reaction rates of a certain nuclide on one ArmiObject.
Parameters
----------
nucName : str
nuclide name -- e.g. 'U235', 'PU239', etc. Not to be confused with the nuclide _label_, see
the nucDirectory module for a description of the difference.
lib : isotxs
cross section library
xsSuffix : str
cross section suffix, consisting of the type followed by the burnup group, e.g. 'AB' for the
second burnup group of type A
mgFlux : np.ndarray
integrated mgFlux (n-cm/s)
nDens : float
number density (atom/bn-cm)
Returns
-------
rxnRates - dict
dictionary of reaction rates (rxn/s) for nG, nF, n2n, nA and nP
Notes
-----
Assume there is no n3n cross section in ISOTXS
"""
nucLabel = self.nuclideBases.byName[nucName].label
key = f"{nucLabel}{xsSuffix}"
libNuc = lib[key]
rxnRates = {"n3n": 0}
for rxName, mgXSs in [
("nG", libNuc.micros.nGamma),
("nF", libNuc.micros.fission),
("n2n", libNuc.micros.n2n),
("nA", libNuc.micros.nalph),
("nP", libNuc.micros.np),
]:
rxnRates[rxName] = nDens * sum(mgXSs * mgFlux)
return rxnRates
def getReactionRates(self, nucName, nDensity=None):
"""
Get the reaction rates of a certain nuclide on this ArmiObject.
Parameters
----------
nucName : str
nuclide name -- e.g. 'U235'
nDensity : float
number Density
Returns
-------
rxnRates : dict
reaction rates (1/s) for nG, nF, n2n, nA and nP
Notes
-----
This is volume integrated NOT (1/cm3-s).
If you set nDensity to 1 this makes 1-group cross section generation easier.
"""
from armi.reactor.components import Component
# find child objects
objects = self.getChildren(deep=True, predicate=lambda x: isinstance(x, Component))
if not len(objects):
objects = [self]
# The reaction rates for this object is the sum of its children
rxnRates = {"nG": 0, "nF": 0, "n2n": 0, "nA": 0, "nP": 0, "n3n": 0}
for armiObject in objects:
for rxName, val in armiObject._getReactionRates(nucName, nDensity).items():
rxnRates[rxName] += val
return rxnRates
def printContents(self, includeNuclides=True):
"""Display information about all the comprising children in this object."""
runLog.important(self)
for c in self:
c.printContents(includeNuclides=includeNuclides)
def _genChildByLocationLookupTable(self):
"""Update the childByLocation lookup table."""
runLog.extra("Generating location-to-child lookup table.")
self.childrenByLocator = {}
for child in self:
self.childrenByLocator[child.spatialLocator] = child
def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):
"""
Get sum circle bound.
Used to roughly approximate relative size vs. other objects
"""
getter = operator.methodcaller("getBoundingCircleOuterDiameter", Tc, cold)
return sum(map(getter, self))
def getPuMoles(self):
"""Returns total number of moles of Pu isotopes."""
nucNames = [nuc.name for nuc in self.nuclideBases.elements.byZ[94].nuclides]
puN = np.sum(self.getNuclideNumberDensities(nucNames))
return puN / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * self.getVolume()
class StateRetainer:
"""
Retains state during some operations.
This can be used to temporarily cache state, perform an operation, extract some info, and then
revert back to the original state.
* A state retainer is faster than restoring state from a database as it reduces the number of IO
reads; however, it does use more memory.
* This can be used on any object within the composite pattern via with
``[rabc].retainState([list], [of], [parameters], [to], [retain]):``.
Use on an object up in the hierarchy applies to all objects below as well.
* This is intended to work across MPI, so that if you were to broadcast the reactor the state
would be correct; however the exact implication on ``parameters`` may be unclear.
"""
def __init__(self, composite: Composite, paramsToApply=None):
"""
Create an instance of a StateRetainer.
Parameters
----------
composite: Composite
composite object to retain state (recursively)
paramsToApply: iterable of parameters.Parameter
Iterable of parameters.Parameter to retain updated values after `__exit__`. All other
parameters are reverted to the original state, i.e. retained at the original value.
"""
self.composite = composite
self.paramsToApply = set(paramsToApply or [])
def __enter__(self):
self._enterExitHelper(lambda obj: obj.backUp())
return self
def __exit__(self, *args):
self._enterExitHelper(lambda obj: obj.restoreBackup(self.paramsToApply))
def _enterExitHelper(self, func):
"""Helper method for ``__enter__`` and ``__exit__``. ``func`` is a lambda to either
``backUp()`` or ``restoreBackup()``.
"""
paramDefs = set()
items = itertools.chain(
(self.composite,),
self.composite.iterChildrenWithMaterials(deep=True),
)
for child in items:
if hasattr(child, "p"):
# materials don't have Parameters
paramDefs.update(child.p.paramDefs)
func(child)
for paramDef in paramDefs:
func(paramDef)
def gatherMaterialsByVolume(objects: List[ArmiObject], typeSpec: TypeSpec = None, exact=False):
"""
Compute the total volume of each material in a set of objects and give samples.
Parameters
----------
objects : list of ArmiObject
Objects to look within. This argument allows clients to search though some subset of the
three (e.g. when you're looking for all CLADDING components within FUEL blocks)
typeSpec : TypeSpec
Flags for the components to look at
exact : bool
Whether or not the TypeSpec is exact
Notes
-----
This helper method is outside the main ArmiObject tree for the special clients that need to
filter both by container type (e.g. Block type) with one set of flags, and Components with
another set of flags.
.. warning:: This is a **composition** related helper method that will likely be filed into
classes/modules that deal specifically with the composition of things in the data model.
Thus clients that use it from here should expect to need updates soon.
"""
volumes = {}
samples = {}
for obj in objects:
for c in obj.iterComponents(typeSpec, exact):
vol = c.getVolume()
matName = c.material.getName()
volumes[matName] = volumes.get(matName, 0.0) + vol
if matName not in samples:
samples[matName] = c.material
return volumes, samples
def getDominantMaterial(objects: List[ArmiObject], typeSpec: TypeSpec = None, exact=False):
"""
Return the first sample of the most dominant material (by volume) in a set of objects.
Warning
-------
This is a **composition** related helper method that will likely be filed into classes/modules
that deal specifically with the composition of things in the data model. Thus clients that use
it from here should expect to need updates soon.
"""
volumes, samples = gatherMaterialsByVolume(objects, typeSpec, exact)
if volumes:
# find matName with max volume
maxMatName = list(sorted(volumes.items(), key=lambda item: item[1])).pop()[0]
# return this material. Note that if this material has properties like Zr-frac, enrichment,
# etc. then this will just return one in the batch, not an average.
return samples[maxMatName]
return None
================================================
FILE: armi/reactor/converters/__init__.py
================================================
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains code that can convert reactor models from one geometry to another.
Conversions between geometries are often needed in advance of a certain type of physics
calculation that cannot be done on the full 3-D detailed geometry. For example, sometimes
an analyst wants to convert a reactor from 3-D to R-Z in advance of a very fast running
neutronics solution.
Converting from one geometry to another while properly conserving mass or some other
parameter manually is tedious and error prone. So it's well-suited for automation with
ARMI.
This subpackage contains code that does a certain subset of conversions along those lines.
.. warning::
Geometry conversions are relatively design-specific, so the converters in this
subpackage are relatively limited in scope as to what they can convert, largely
targeting hexagonal pin-type assemblies. If your geometry is different from this, this
code is best considered as examples and starting points, as you will likely need to
write your own converters in your own plugin. Of course, if your converter is
sufficiently generic, we welcome it here.
In other words, some of these converters may at some point migrate to a more
design-specific plugin.
See Also
--------
armi.cases.inputModifiers
Modify input files and re-write them.
"""
================================================
FILE: armi/reactor/converters/axialExpansionChanger/__init__.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enable component-wise axial expansion for assemblies and/or a reactor."""
# ruff: noqa: F401
from armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import (
AssemblyAxialLinkage,
)
from armi.reactor.converters.axialExpansionChanger.axialExpansionChanger import (
AxialExpansionChanger,
makeAssemsAbleToSnapToUniformMesh,
)
from armi.reactor.converters.axialExpansionChanger.expansionData import (
ExpansionData,
getSolidComponents,
iterSolidComponents,
)
================================================
FILE: armi/reactor/converters/axialExpansionChanger/assemblyAxialLinkage.py
================================================
# Copyright 2024 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import functools
import itertools
import typing
from textwrap import dedent
from armi import runLog
from armi.reactor.blocks import Block
from armi.reactor.components import Component, UnshapedComponent
from armi.reactor.converters.axialExpansionChanger.expansionData import (
iterSolidComponents,
)
from armi.reactor.grids import MultiIndexLocation
if typing.TYPE_CHECKING:
from armi.reactor.assemblies import Assembly
def areAxiallyLinked(componentA: Component, componentB: Component) -> bool:
"""Determine axial component linkage for two components.
Parameters
----------
componentA : :py:class:`Component `
component of interest
componentB : :py:class:`Component `
component to compare and see if is linked to componentA
Notes
-----
If componentA and componentB are both solids and the same type, geometric overlap can be checked via
getCircleInnerDiameter and getBoundingCircleOuterDiameter. Four different cases are accounted for.
If they do not meet these initial criteria, linkage is assumed to be False.
Case #1: Unshaped Components. There is no way to determine overlap so they're assumed to be not linked.
Case #2: Blocks with specified grids. If componentA and componentB have identical grid indices (cannot be a partial
case, ALL of the indices must be contained by one or the other), then overlap can be checked.
Case #3: If Component position is not specified via a grid, the multiplicity is checked. If consistent, they are
assumed to be in the same positions and their overlap is checked.
Case #4: Components are either not both solids, are not the same type, or Cases 1-3 are not True.
Returns
-------
linked : bool
status is componentA and componentB are axially linked to one another
"""
## Cases 4
linked = False
if isinstance(componentA, type(componentB)) and (
componentA.containsSolidMaterial() and componentB.containsSolidMaterial()
):
if isinstance(componentA, UnshapedComponent):
## Case 1
runLog.warning(
f"Components {componentA} and {componentB} are UnshapedComponents "
"and do not have 'getCircleInnerDiameter' or getBoundingCircleOuterDiameter methods; "
"nor is it physical to do so. Instead of crashing and raising an error, "
"they are going to be assumed to not be linked.",
single=True,
)
elif isinstance(componentA.spatialLocator, MultiIndexLocation) and isinstance(
componentB.spatialLocator, MultiIndexLocation
):
## Case 2
fromA = set(tuple(index) for index in componentA.spatialLocator.indices)
fromB = set(tuple(index) for index in componentB.spatialLocator.indices)
if fromA == fromB:
linked = _checkOverlap(componentA, componentB)
elif componentA.getDimension("mult") == componentB.getDimension("mult"):
## Case 3
linked = _checkOverlap(componentA, componentB)
return linked
def _checkOverlap(componentA: Component, componentB: Component) -> bool:
"""Check two components for geometric overlap by seeing if one can fit within the other.
Notes
-----
When component dimensions are retrieved, cold=True to ensure that dimensions are evaluated
at cold/input temperatures. At temperature, solid-solid interfaces in ARMI may produce
slight overlaps due to thermal expansion. Handling these potential overlaps are out of scope.
"""
idA = componentA.getCircleInnerDiameter(cold=True)
odA = componentA.getBoundingCircleOuterDiameter(cold=True)
idB = componentB.getCircleInnerDiameter(cold=True)
odB = componentB.getBoundingCircleOuterDiameter(cold=True)
biggerID = max(idA, idB)
smallerOD = min(odA, odB)
return biggerID < smallerOD
# Make a generic type so we can "template" the axial link class based on what could be above/below a thing
Comp = typing.TypeVar("Comp", Block, Component)
@dataclasses.dataclass
class AxialLink(typing.Generic[Comp]):
"""Small class for named references to objects above and below a specific object.
Axial expansion in ARMI works by identifying what objects occupy the same axial space.
For components in blocks, identify which above and below axially align. This is used
to determine what, if any, mass needs to be re-assigned across blocks during expansion.
For blocks, the linking determines what blocks need to move as a result of a specific block's
axial expansion.
Attributes
----------
lower : Composite or None
Object below, if any.
upper : Composite or None
Object above, if any.
Notes
-----
This class is "templated" by the type of composite that could be assigned and fetched. A
block-to-block linkage could be type-hinted via ``AxialLink[Block]`` or ``AxialLink[Component]``
for component-to-component link.
See Also
--------
* :attr:`AxialAssemblyLinkage.linkedBlocks`
* :attr:`AxialAssemblyLinkage.linkedComponents`
"""
lower: typing.Optional[Comp] = dataclasses.field(default=None)
upper: typing.Optional[Comp] = dataclasses.field(default=None)
class AssemblyAxialLinkage:
"""Determines and stores the block- and component-wise axial linkage for an assembly.
Parameters
----------
assem : armi.reactor.assemblies.Assembly
Assembly to be linked
Attributes
----------
a : :py:class:`Assembly