Repository: binary-husky/unreal-map Branch: master Commit: 6454c72cc242 Files: 402 Total size: 96.2 MB Directory structure: gitextract_cudsbdvr/ ├── .gitignore ├── BuildLinuxRender.py ├── BuildLinuxServer.py ├── BuildWindowsRender.py ├── BuildWindowsServer.py ├── Config/ │ ├── DefaultEditor.ini │ ├── DefaultEditorPerProjectUserSettings.ini │ ├── DefaultEngine.ini │ ├── DefaultGame.ini │ └── DefaultInput.ini ├── Content/ │ ├── Assets/ │ │ ├── AbstractActor/ │ │ │ ├── Agent.uasset │ │ │ ├── DeleteOnResetActor.uasset │ │ │ ├── HmapDefaultPawn.uasset │ │ │ ├── HmapDefaultPawnLegacy.uasset │ │ │ ├── HmapDefaultPawnMinimap.uasset │ │ │ └── ReqResetActor.uasset │ │ ├── CoreSystem/ │ │ │ ├── CoreControl.uasset │ │ │ ├── CoreFunctionLib.uasset │ │ │ ├── HmapGameMode.uasset │ │ │ ├── HmapHud.uasset │ │ │ ├── PerceptionComponentEfficient.uasset │ │ │ └── PerceptionInterface.uasset │ │ ├── DefAction/ │ │ │ └── ParseAction.uasset │ │ ├── DefAgent/ │ │ │ ├── AgentTypeManifest.uasset │ │ │ ├── DefAirAgent/ │ │ │ │ ├── RLA_UAV.uasset │ │ │ │ ├── RLA_UAV_Support.uasset │ │ │ │ ├── RLA_UAV_Support_V2.uasset │ │ │ │ └── RLA_UAV_VIP.uasset │ │ │ ├── DefAttackPost/ │ │ │ │ ├── Attacker.uasset │ │ │ │ ├── Lv3_DefenceTank.uasset │ │ │ │ ├── Lv3_DefenceTower.uasset │ │ │ │ └── PosAttacker.uasset │ │ │ ├── DefCarAgent/ │ │ │ │ ├── RLA_CAR.uasset │ │ │ │ ├── RLA_CAR_RED.uasset │ │ │ │ └── RLA_LaserLauncher.uasset │ │ │ ├── DefCarrierAgent/ │ │ │ │ ├── Carrier.uasset │ │ │ │ ├── DroneMesh.uasset │ │ │ │ └── SmallDrone.uasset │ │ │ ├── DefControl/ │ │ │ │ ├── AC_RL.uasset │ │ │ │ ├── AC_RL_ACCESS.uasset │ │ │ │ ├── AC_RL_ACCESS_LaserWeaponV2.uasset │ │ │ │ └── AC_RL_ACCESS_NoWeapon.uasset │ │ │ ├── DefFormation/ │ │ │ │ ├── Lv2_MomentumTestAgentSensor.uasset │ │ │ │ └── Lv3_MomentumAgentWithHp.uasset │ │ │ ├── DefIntercept/ │ │ │ │ ├── Attacker.uasset │ │ │ │ ├── Defender.uasset │ │ │ │ └── Landmark.uasset │ │ │ ├── DefPlane/ │ │ │ │ ├── PlaneAgent.uasset │ │ │ │ ├── UAV_Dragon.uasset │ │ │ │ └── UAV_Rainbow.uasset │ │ │ ├── DefPreyPredator/ │ │ │ │ ├── Predator.uasset │ │ │ │ └── Prey.uasset │ │ │ ├── DefReproduce/ │ │ │ │ └── DummyAgent.uasset │ │ │ ├── DefTarget/ │ │ │ │ ├── AirDefense.uasset │ │ │ │ ├── Commander.uasset │ │ │ │ ├── Missile.uasset │ │ │ │ ├── Target_Agent.uasset │ │ │ │ ├── air_defense.uasset │ │ │ │ └── fleet.uasset │ │ │ ├── DefWaterdrop/ │ │ │ │ ├── Ship.uasset │ │ │ │ └── Waterdrop.uasset │ │ │ ├── Lv1_AgentCanMoveLegacy.uasset │ │ │ ├── Lv1_MomentumAgent.uasset │ │ │ ├── Lv1_MoveAgent.uasset │ │ │ ├── Lv1_TestFlyingAgent.uasset │ │ │ ├── Lv1_TestWalkingAgent.uasset │ │ │ ├── Lv2_AgentCanControlLegacy.uasset │ │ │ ├── Lv2_TestAgentSensor.uasset │ │ │ ├── Lv3_AgentWithHp.uasset │ │ │ ├── SharedBehaviorTree/ │ │ │ │ ├── ATTACK_MICRO_MANAGE.uasset │ │ │ │ ├── BB_RL.uasset │ │ │ │ ├── BT_RL_V2.uasset │ │ │ │ ├── RL_CMD_IDLE_SETTING.uasset │ │ │ │ └── RL_CMD_STATE.uasset │ │ │ └── SharedTask/ │ │ │ ├── ArgKeepDistanceToTargetOrLocationV3.uasset │ │ │ ├── BTService_CallingGuardLocationUpdateV2.uasset │ │ │ ├── CallChangeCmdStateToIdleV2.uasset │ │ │ ├── CallGuardLocationUpdateV2.uasset │ │ │ ├── SetFocusToTargetOrLocation.uasset │ │ │ ├── WeaponFiringNoFinishV2.uasset │ │ │ └── cancel_movementV2.uasset │ │ ├── DefNotAgent/ │ │ │ ├── BP_Explosion.uasset │ │ │ ├── BP_Explosion_TypeB.uasset │ │ │ ├── DefReqResetActor/ │ │ │ │ ├── AgentKillerWall/ │ │ │ │ │ ├── Cylinder_Brush_StaticMesh.uasset │ │ │ │ │ ├── KillZoneInvisible.uasset │ │ │ │ │ ├── KillZoneInvisible_Circle.uasset │ │ │ │ │ └── KillZoneInvisible_Dynamic.uasset │ │ │ │ ├── FlagToCapture.uasset │ │ │ │ ├── KeyObjDecoration.uasset │ │ │ │ ├── RecordWhoGetInside/ │ │ │ │ │ ├── InvisibleTrigger.uasset │ │ │ │ │ └── InvisibleTrigger_CanEndEpisode.uasset │ │ │ │ └── VipTargetToReach/ │ │ │ │ └── KeyObjExample.uasset │ │ │ ├── General_Laser.uasset │ │ │ ├── LaserForIntercept.uasset │ │ │ ├── Weapon_Laser.uasset │ │ │ ├── Weapon_MissileTypeC.uasset │ │ │ └── toy.uasset │ │ ├── GUI/ │ │ │ ├── ChildMap.uasset │ │ │ ├── ChildMapWidget.uasset │ │ │ ├── HpBar.uasset │ │ │ ├── Icon_Cmd.uasset │ │ │ ├── Icons.uasset │ │ │ ├── Icons_airport.uasset │ │ │ ├── Icons_cmd.uasset │ │ │ ├── Icons_missile.uasset │ │ │ ├── Icons_target.uasset │ │ │ ├── MouseTraceSelector.uasset │ │ │ ├── PlaneIcon.uasset │ │ │ ├── RingMark.uasset │ │ │ ├── SelectEffect.uasset │ │ │ ├── airport.uasset │ │ │ ├── house.uasset │ │ │ ├── missile.uasset │ │ │ └── 飞机2.uasset │ │ └── Trashbin/ │ │ └── DroneAgentBlueprint/ │ │ ├── AC_Drone.uasset │ │ ├── AC_Drone_Opp.uasset │ │ ├── BB_Drone.uasset │ │ ├── BT_Drone.uasset │ │ ├── BT_Drone_Opp.uasset │ │ ├── Drone.uasset │ │ ├── Drone_Ground.uasset │ │ ├── Drone_Ground_Opp.uasset │ │ ├── EndlessPatrol.uasset │ │ ├── KeepDistanceToTargetOrLocation.uasset │ │ ├── KeepMsFiringToTarget.uasset │ │ ├── LinkingPoint.uasset │ │ ├── LinkingPointGroup.uasset │ │ ├── MaintainDistanceToTargetOrLocation.uasset │ │ └── MissileTypeB.uasset │ └── Maps/ │ ├── AirportSwarmTraining.umap │ ├── AirportSwarmTraining_BuiltData.uasset │ ├── AutoEntry.umap │ ├── Entry.umap │ ├── TestCradle.umap │ ├── TestCradle_BuiltData.uasset │ ├── UhmapAttackPost.umap │ ├── UhmapAttackPost_BuiltData.uasset │ ├── UhmapBreakingBad.umap │ ├── UhmapBreakingBad_BuiltData.uasset │ ├── UhmapCarrier.umap │ ├── UhmapCarrier_BuiltData.uasset │ ├── UhmapFlagCapture.umap │ ├── UhmapFormation.umap │ ├── UhmapIntercept.umap │ ├── UhmapJustAnIsland_BuiltData.uasset │ ├── UhmapLargeScale.umap │ ├── UhmapLargeScaleLegacy.umap │ ├── UhmapLargeScale_BuiltData.uasset │ ├── UhmapPreyPredator.umap │ ├── UhmapReproduce.umap │ ├── UhmapReproduce_BuiltData.uasset │ ├── UhmapTemplate.umap │ ├── UhmapTemplate_BuiltData.uasset │ ├── UhmapWaterdrop.umap │ └── UhmapWaterdrop_BuiltData.uasset ├── Docs/ │ ├── git_coop.md │ └── old_install_method/ │ └── README.md ├── Please_Run_This_First_To_Fetch_Big_Files.py ├── PythonExample/ │ ├── README.md │ └── hmp_minimal_modules/ │ ├── .gitattributes │ ├── .gitignore │ ├── .gitmodules │ ├── ALGORITHM/ │ │ ├── common/ │ │ │ ├── alg_base.py │ │ │ ├── attention.py │ │ │ ├── conc.py │ │ │ ├── dl_pool.py │ │ │ ├── his.py │ │ │ ├── hyper_net.py │ │ │ ├── logit2act.py │ │ │ ├── mlp.py │ │ │ ├── net_manifest.py │ │ │ ├── norm.py │ │ │ ├── pca.py │ │ │ ├── ppo_sampler.py │ │ │ ├── rl_alg_base.py │ │ │ ├── traj.py │ │ │ ├── traj_gae.py │ │ │ └── traj_manager.py │ │ ├── example_foundation.py │ │ ├── hete_league_onenet_fix/ │ │ │ ├── ccategorical.py │ │ │ ├── cython_func.pyx │ │ │ ├── div_tree.py │ │ │ ├── foundation.py │ │ │ ├── hete_assignment.py │ │ │ ├── hete_net.py │ │ │ ├── net.py │ │ │ ├── ppo.py │ │ │ ├── ppo_sampler.py │ │ │ ├── shell_env.py │ │ │ ├── stage_planner.py │ │ │ └── trajectory.py │ │ ├── my_ai/ │ │ │ └── foundation.py │ │ ├── ppo_ma/ │ │ │ ├── ccategorical.py │ │ │ ├── cython_func.pyx │ │ │ ├── div_tree.py │ │ │ ├── foundation.py │ │ │ ├── net.py │ │ │ ├── ppo.py │ │ │ ├── ppo_sampler.py │ │ │ ├── shell_env.py │ │ │ ├── stage_planner.py │ │ │ └── trajectory.py │ │ ├── random/ │ │ │ ├── actionset.py │ │ │ └── foundation.py │ │ └── script_ai/ │ │ ├── a_attackpost.py │ │ ├── a_escape.py │ │ ├── a_test_reproduce.py │ │ ├── assignment.py │ │ ├── blue_strategy.py │ │ ├── decision.py │ │ ├── dummy.py │ │ ├── dummy_uhmap.py │ │ ├── global_params.py │ │ ├── manual.py │ │ ├── module_evaluation.py │ │ ├── red_strategy.py │ │ ├── stance.py │ │ ├── uhmap_bb.py │ │ ├── uhmap_island.py │ │ ├── uhmap_ls.py │ │ └── uhmap_ls_mp.py │ ├── LICENSE │ ├── MISSION/ │ │ ├── common/ │ │ │ └── base_env.py │ │ ├── env_router.py │ │ ├── readme.md │ │ └── uhmap/ │ │ ├── SubTasks/ │ │ │ ├── SubtaskCommonFn.py │ │ │ ├── UhmapAdversial.py │ │ │ ├── UhmapAdversialConf.py │ │ │ ├── UhmapAttackPost.py │ │ │ ├── UhmapAttackPostConf.py │ │ │ ├── UhmapBreakingBad.py │ │ │ ├── UhmapBreakingBadConf.py │ │ │ ├── UhmapCarrier.py │ │ │ ├── UhmapCarrierConf.py │ │ │ ├── UhmapEscape.py │ │ │ ├── UhmapEscapeConf.py │ │ │ ├── UhmapFormation.py │ │ │ ├── UhmapFormationConf.py │ │ │ ├── UhmapHuge.py │ │ │ ├── UhmapHugeConf.py │ │ │ ├── UhmapIntercept.py │ │ │ ├── UhmapInterceptConf.py │ │ │ ├── UhmapJustAnIsland.py │ │ │ ├── UhmapJustAnIslandConf.py │ │ │ ├── UhmapLargeScale.py │ │ │ ├── UhmapLargeScaleConf.py │ │ │ ├── UhmapPreyPredator.py │ │ │ ├── UhmapPreyPredatorConf.py │ │ │ ├── UhmapReproduce.py │ │ │ ├── UhmapReproduceConf.py │ │ │ ├── UhmapWaterdrop.py │ │ │ ├── UhmapWaterdropConf.py │ │ │ └── cython_func.pyx │ │ ├── actionset.py │ │ ├── actionset_v3.py │ │ ├── actset_lookup.py │ │ ├── agent.py │ │ ├── auto_download.py │ │ ├── struct.cpp │ │ ├── uhmap.md │ │ └── uhmap_env_wrapper.py │ ├── README.md │ ├── UTIL/ │ │ ├── __init__.py │ │ ├── auto_gpu.py │ │ ├── batch_exp.py │ │ ├── colorful.py │ │ ├── config_args.py │ │ ├── data_struct.py │ │ ├── exp_helper.py │ │ ├── fetch_multiserver.py │ │ ├── file_lock.py │ │ ├── gpu_eater.py │ │ ├── gpu_share.py │ │ ├── hidden_print.py │ │ ├── hmp_daemon.py │ │ ├── legacy/ │ │ │ └── gpu_share_unfin.py │ │ ├── mem_watcher_ue.py │ │ ├── memleak_finder.py │ │ ├── mprofile.py │ │ ├── mserver_launcher.sh │ │ ├── network.py │ │ ├── pip_find_missing.py │ │ ├── shm_env.py │ │ ├── shm_pool.pyx │ │ ├── sync_exp.py │ │ ├── tensor_ops.py │ │ ├── tensor_ops_c.pyx │ │ └── win_pool.py │ ├── VISUALIZE/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── color.html │ │ ├── mcom.py │ │ ├── mcom_def.py │ │ ├── mcom_rec.py │ │ ├── mcom_replay.py │ │ ├── mcom_rt.py │ │ ├── mcom_test.py │ │ ├── mcom_v2d.py │ │ ├── read_group_replay.ipynb │ │ ├── seaborn_defaults.py │ │ └── threejs_replay.py │ ├── ZDOCS/ │ │ ├── Dockerfile │ │ ├── DockerfilePython311 │ │ ├── bashrc_suffix │ │ ├── examples/ │ │ │ └── uhmap/ │ │ │ ├── AirAttack.jsonc │ │ │ ├── AirShow.jsonc │ │ │ ├── hlt+50vs50.jsonc │ │ │ ├── ppoma+50vs50.jsonc │ │ │ ├── ppoma+intercept.jsonc │ │ │ ├── ppoma+predatorprey.jsonc │ │ │ ├── ppoma+uhmap10vs10hete.jsonc │ │ │ ├── ppoma_waterdrop.jsonc │ │ │ ├── qmix+uhmap10vs10hete.jsonc │ │ │ ├── qmix+uhmap20vs20.jsonc │ │ │ ├── qmix+uhmap50vs50+debug.jsonc │ │ │ ├── qmix+uhmap50vs50.jsonc │ │ │ ├── qplex+uhmap10vs10hete.jsonc │ │ │ ├── qtran+uhmap10vs10hete.jsonc │ │ │ └── random_waterdrop.jsonc │ │ ├── pip_requirement.md │ │ ├── sc2checkversion │ │ ├── setup_docker.md │ │ ├── setup_no_docker.md │ │ ├── setup_ubuntu.md │ │ ├── setup_ue_docker.md │ │ ├── ssh_pubkey.sh │ │ ├── test_examples.py │ │ ├── use_pymarl2.md │ │ └── use_unreal_hmap.md │ ├── ZHECKPOINT/ │ │ └── uhmap_hete10vs10/ │ │ ├── experiment_test.jsonc │ │ ├── render_result.jsonc │ │ ├── render_result_editor.jsonc │ │ └── render_result_editor2.jsonc │ ├── agent_with_sensor.jsonc │ ├── attack_post.jsonc │ ├── carrier.jsonc │ ├── config.py │ ├── cradle.ipynb │ ├── cradle.py │ ├── escape.jsonc │ ├── formation.jsonc │ ├── main.py │ ├── multi_server.py │ ├── multi_team.py │ ├── multi_team_parallel.py │ ├── reproduce.jsonc │ └── task_runner.py ├── README.md ├── README_CN.md ├── Source/ │ ├── Jsonx/ │ │ ├── Jsonx.Build.cs │ │ ├── Private/ │ │ │ ├── Dom/ │ │ │ │ ├── JsonxObject.cpp │ │ │ │ └── JsonxValue.cpp │ │ │ ├── JsonxModule.cpp │ │ │ └── Tests/ │ │ │ └── JsonxTests.cpp │ │ └── Public/ │ │ ├── Dom/ │ │ │ ├── JsonxObject.h │ │ │ └── JsonxValue.h │ │ ├── Jsonx.h │ │ ├── JsonxGlobals.h │ │ ├── JsonxUtils/ │ │ │ └── JsonxObjectArrayUpdater.h │ │ ├── Policies/ │ │ │ ├── CondensedJsonxPrintPolicy.h │ │ │ ├── JsonxPrintPolicy.h │ │ │ └── PrettyJsonxPrintPolicy.h │ │ └── Serialization/ │ │ ├── JsonxReader.h │ │ ├── JsonxSerializer.h │ │ ├── JsonxSerializerMacros.h │ │ ├── JsonxTypes.h │ │ └── JsonxWriter.h │ ├── JsonxUtilities/ │ │ ├── JsonxUtilities.Build.cs │ │ ├── Private/ │ │ │ ├── JsonxObjectConverter.cpp │ │ │ ├── JsonxObjectWrapper.cpp │ │ │ └── JsonxUtilitiesModule.cpp │ │ └── Public/ │ │ ├── JsonxDomBuilder.h │ │ ├── JsonxObjectConverter.h │ │ ├── JsonxObjectWrapper.h │ │ └── JsonxUtilities.h │ ├── UHMP/ │ │ ├── AgentBaseCpp.cpp │ │ ├── AgentBaseCpp.h │ │ ├── DataStruct.h │ │ ├── HMPAIController.cpp │ │ ├── HMPAIController.h │ │ ├── HMPLevelScriptActor.cpp │ │ ├── HMPLevelScriptActor.h │ │ ├── HmpCrowdFollowingComponent.cpp │ │ ├── HmpCrowdFollowingComponent.h │ │ ├── HmpPythonIO.cpp │ │ ├── HmpPythonIO.h │ │ ├── IOCompress/ │ │ │ ├── lz4.c │ │ │ └── lz4.h │ │ ├── UHMP.Build.cs │ │ ├── UHMP.cpp │ │ ├── UHMP.h │ │ ├── UHMPBlueprintFunctionLibrary.cpp │ │ └── UHMPBlueprintFunctionLibrary.h │ ├── UHMP.Target.cs │ ├── UHMPEditor.Target.cs │ └── UHMPServer.Target.cs ├── build.py ├── compress.txt ├── current_version ├── onedrive_util.py ├── test_memory_leak.py └── upload_big_file.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ Binaries Build DerivedDataCache Intermediate Saved TEMP Temp Py .vscode .vs *.VC.db *.opensdf *.opendb *.sdf *.sln *.suo *.xcodeproj *.xcworkspace Python __pycache__ *-datas # private* private* Content/Model3D Plugins 7-Zip UHMP.uproject EnvDesignTutorial.pptx # f'.\\7-Zip\\7z.exe a -tzip -mx4 ./Build/uhmp-big-file-v{desired_version}.zip ./Content/Model3D ./Plugins ./7-Zip ./UHMP.uproject ./EnvDesignTutorial.pptx' # ./7-Zip/7z.exe a -tzip -mx4 ./Build/tt.zip -ir!Content\Model3D\Flying\Meshes 7-Zip Content/StarterContent ================================================ FILE: BuildLinuxRender.py ================================================ """ After you have installed the toolchain for cross-compilation, you must open the solution for the engine in Rider (or VisualStudio) and build (rebuild is not necessary) the editor for Win64 (yes, that is Win64 and not Linux). This will create the needed folder “Engine/Binaries/Win64/Linux”. Then restart your editor, and packaging LinuxServer will work. """ import subprocess, sys, shutil, time, os EnginePath = "F:/UnrealEngine-4.27.2-release/Engine" assert os.path.exists(EnginePath), f"Cannot find Unreal Engine at this path {EnginePath}" Windows_Only = False Build = 'Test' # Development/Test/shipping Platform = 'Linux' # Win64/Linux def print亮绿(*kw,**kargs): print("\033[1;32m",*kw,"\033[0m",**kargs) time_mark = time.strftime("%Y-%m-%d-%H-%M", time.localtime()) try: shutil.rmtree('Build/LinuxNoEditor') except: pass print亮绿(f'********* Begin Build: {Build} On {Platform} ***********') # build server path = os.path.abspath('./').replace(r'\\', '/') process = subprocess.Popen([ f"{EnginePath}/Build/BatchFiles/RunUAT.bat", f"-ScriptsForProject={path}/UHMP.uproject", "BuildCookRun", "-nocompileeditor", "-nop4", f"-project={path}/UHMP.uproject", "-cook", "-stage", "-archive", f"-archivedirectory={path}/Build", "-package ", f"-ue4exe={EnginePath}/Binaries/Win64/UE4Editor-Cmd.exe", "-compressed", "-ddc=DerivedDataBackendGraph", "-pak", "-prereqs", "-nodebuginfo", f"-targetplatform={Platform}", "-build", "-target=UHMP", "-serverconfig=%s"%Build, "-utf8output", "-compile" ]) return_code = process.wait() print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') if (return_code!=0): print('fail') sys.exit() ================================================ FILE: BuildLinuxServer.py ================================================ """ After you have installed the toolchain for cross-compilation, you must open the solution for the engine in Rider (or VisualStudio) and build (rebuild is not necessary) the editor for Win64 (yes, that is Win64 and not Linux). This will create the needed folder “Engine/Binaries/Win64/Linux”. Then restart your editor, and packaging LinuxServer will work. """ import subprocess, sys, shutil, time, os EnginePath = "F:/UnrealEngine-4.27.2-release/Engine" assert os.path.exists(EnginePath), f"Cannot find Unreal Engine at this path {EnginePath}" Windows_Only = False Build = 'Test' # Development/Test/shipping Platform = 'Linux' # Win64/Linux def print亮绿(*kw,**kargs): print("\033[1;32m",*kw,"\033[0m",**kargs) time_mark = time.strftime("%Y-%m-%d-%H-%M", time.localtime()) try: shutil.rmtree('Build/LinuxServer') except: pass print亮绿(f'********* Begin Build: {Build} On {Platform} ***********') # build server path = os.path.abspath('./').replace(r'\\', '/') process = subprocess.Popen([ f"{EnginePath}/Build/BatchFiles/RunUAT.bat", f"-ScriptsForProject={path}/UHMP.uproject", "BuildCookRun", "-nocompileeditor", "-nop4", f"-project={path}/UHMP.uproject", "-cook", "-stage", "-archive", f"-archivedirectory={path}/Build", "-package ", f"-ue4exe={EnginePath}/Binaries/Win64/UE4Editor-Cmd.exe", "-compressed", "-ddc=DerivedDataBackendGraph", "-pak", "-prereqs", "-nodebuginfo", f"-targetplatform={Platform}", "-build", "-target=UHMPServer", "-serverconfig=%s"%Build, "-utf8output", "-compile" ]) return_code = process.wait() print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') if (return_code!=0): print('fail') sys.exit() ================================================ FILE: BuildWindowsRender.py ================================================ import subprocess, sys, shutil, time, os EnginePath = "F:/UnrealEngine-4.27.2-release/Engine" assert os.path.exists(EnginePath), f"Cannot find Unreal Engine at this path {EnginePath}" Windows_Only = False Build = 'Test' # Development/Test/shipping Platform = 'Win64' # Win64/Linux def print亮绿(*kw,**kargs): print("\033[1;32m",*kw,"\033[0m",**kargs) time_mark = time.strftime("%Y-%m-%d-%H-%M", time.localtime()) try: shutil.rmtree('Build/WindowsNoEditor') except: pass print亮绿(f'********* Begin Build: {Build} On {Platform} ***********') # build server path = os.path.abspath('./').replace(r'\\', '/') process = subprocess.Popen([ f"{EnginePath}/Build/BatchFiles/RunUAT.bat", f"-ScriptsForProject={path}/UHMP.uproject", "BuildCookRun", "-nocompileeditor", "-nop4", f"-project={path}/UHMP.uproject", "-cook", "-stage", "-archive", f"-archivedirectory={path}/Build", "-package ", f"-ue4exe={EnginePath}/Binaries/Win64/UE4Editor-Cmd.exe", "-compressed", "-ddc=DerivedDataBackendGraph", "-pak", "-prereqs", "-nodebuginfo", f"-targetplatform={Platform}", "-build", "-target=UHMP", "-serverconfig=%s"%Build, "-utf8output", "-compile" ]) return_code = process.wait() print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') if (return_code!=0): print('fail') sys.exit() ================================================ FILE: BuildWindowsServer.py ================================================ import subprocess, sys, shutil, time, os EnginePath = "F:/UnrealEngine-4.27.2-release/Engine" assert os.path.exists(EnginePath), f"Cannot find Unreal Engine at this path {EnginePath}" Windows_Only = False Build = 'Test' # Development/Test/shipping Platform = 'Win64' # Win64/Linux def print亮绿(*kw,**kargs): print("\033[1;32m",*kw,"\033[0m",**kargs) time_mark = time.strftime("%Y-%m-%d-%H-%M", time.localtime()) try: shutil.rmtree('Build/WindowsServer') except: pass print亮绿(f'********* Begin Build: {Build} On {Platform} ***********') # build server path = os.path.abspath('./').replace(r'\\', '/') process = subprocess.Popen([ f"{EnginePath}/Build/BatchFiles/RunUAT.bat", f"-ScriptsForProject={path}/UHMP.uproject", "BuildCookRun", "-nocompileeditor", "-nop4", f"-project={path}/UHMP.uproject", "-cook", "-stage", "-archive", f"-archivedirectory={path}/Build", "-package ", f"-ue4exe={EnginePath}/Binaries/Win64/UE4Editor-Cmd.exe", "-compressed", "-ddc=DerivedDataBackendGraph", "-pak", "-prereqs", "-nodebuginfo", f"-targetplatform={Platform}", "-build", "-target=UHMPServer", "-serverconfig=%s"%Build, "-utf8output", "-compile" ]) return_code = process.wait() print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') print亮绿('********* ********************** ***********') if (return_code!=0): print('fail') sys.exit() ================================================ FILE: Config/DefaultEditor.ini ================================================ ================================================ FILE: Config/DefaultEditorPerProjectUserSettings.ini ================================================ [/Script/BlueprintGraph.BlueprintEditorSettings] bDrawMidpointArrowsInBlueprints=False bShowGraphInstructionText=True bHideUnrelatedNodes=False bShowShortTooltips=True bSplitContextTargetSettings=True bExposeAllMemberComponentFunctions=True bShowContextualFavorites=False bExposeDeprecatedFunctions=False bCompactCallOnMemberNodes=False bFlattenFavoritesMenus=True bFavorPureCastNodes=False bAutoCastObjectConnections=False bShowViewportOnSimulate=False bShowInheritedVariables=False bAlwaysShowInterfacesInOverrides=True bShowParentClassInOverrides=True bShowEmptySections=True bShowAccessSpecifier=False bSpawnDefaultBlueprintNodes=True bHideConstructionScriptComponentsInDetailsView=True bHostFindInBlueprintsInGlobalTab=True bNavigateToNativeFunctionsFromCallNodes=True Bookmarks=() bIncludeCommentNodesInBookmarksTab=True bShowBookmarksForCurrentDocumentOnlyInTab=False GraphEditorQuickJumps=() SaveOnCompile=SoC_Never bJumpToNodeErrors=False bAllowExplicitImpureNodeDisabling=True bShowActionMenuItemSignatures=False bBlueprintNodeUniqueNames=False bShowDetailedCompileResults=False CompileEventDisplayThresholdMs=5 NodeTemplateCacheCapMB=20.000000 ================================================ FILE: Config/DefaultEngine.ini ================================================ [/Script/EngineSettings.GameMapsSettings] GameDefaultMap=/Game/Maps/AutoEntry.AutoEntry EditorStartupMap=/Game/Maps/UhmapLargeScale.UhmapLargeScale ServerDefaultMap=/Game/Maps/AutoEntry.AutoEntry [/Script/HardwareTargeting.HardwareTargetingSettings] TargetedHardwareClass=Desktop AppliedTargetedHardwareClass=Desktop DefaultGraphicsPerformance=Maximum AppliedDefaultGraphicsPerformance=Maximum [/Script/Engine.Engine] +ActiveGameNameRedirects=(OldGameName="TP_BlankBP",NewGameName="/Script/UHMP") +ActiveGameNameRedirects=(OldGameName="/Script/TP_BlankBP",NewGameName="/Script/UHMP") +ActiveGameNameRedirects=(OldGameName="/Script/MyProject2", NewGameName="/Script/UHMP") CustomTimeStepClassName=None bUseFixedFrameRate=True FixedFrameRate=64.000000 [/Script/Engine.RendererSettings] r.DefaultFeature.AutoExposure=False r.DefaultFeature.AutoExposure.Method=0 r.DefaultFeature.MotionBlur=False [/Script/Engine.CollisionProfile] -Profiles=(Name="NoCollision",CollisionEnabled=NoCollision,ObjectTypeName="WorldStatic",CustomResponses=((Channel="Visibility",Response=ECR_Ignore),(Channel="Camera",Response=ECR_Ignore)),HelpMessage="No collision",bCanModify=False) -Profiles=(Name="BlockAll",CollisionEnabled=QueryAndPhysics,ObjectTypeName="WorldStatic",CustomResponses=,HelpMessage="WorldStatic object that blocks all actors by default. All new custom channels will use its own default response. ",bCanModify=False) -Profiles=(Name="OverlapAll",CollisionEnabled=QueryOnly,ObjectTypeName="WorldStatic",CustomResponses=((Channel="WorldStatic",Response=ECR_Overlap),(Channel="Pawn",Response=ECR_Overlap),(Channel="Visibility",Response=ECR_Overlap),(Channel="WorldDynamic",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Overlap),(Channel="PhysicsBody",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Destructible",Response=ECR_Overlap)),HelpMessage="WorldStatic object that overlaps all actors by default. All new custom channels will use its own default response. ",bCanModify=False) -Profiles=(Name="BlockAllDynamic",CollisionEnabled=QueryAndPhysics,ObjectTypeName="WorldDynamic",CustomResponses=,HelpMessage="WorldDynamic object that blocks all actors by default. All new custom channels will use its own default response. ",bCanModify=False) -Profiles=(Name="OverlapAllDynamic",CollisionEnabled=QueryOnly,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="WorldStatic",Response=ECR_Overlap),(Channel="Pawn",Response=ECR_Overlap),(Channel="Visibility",Response=ECR_Overlap),(Channel="WorldDynamic",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Overlap),(Channel="PhysicsBody",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Destructible",Response=ECR_Overlap)),HelpMessage="WorldDynamic object that overlaps all actors by default. All new custom channels will use its own default response. ",bCanModify=False) -Profiles=(Name="IgnoreOnlyPawn",CollisionEnabled=QueryOnly,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="Pawn",Response=ECR_Ignore),(Channel="Vehicle",Response=ECR_Ignore)),HelpMessage="WorldDynamic object that ignores Pawn and Vehicle. All other channels will be set to default.",bCanModify=False) -Profiles=(Name="OverlapOnlyPawn",CollisionEnabled=QueryOnly,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="Pawn",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Ignore)),HelpMessage="WorldDynamic object that overlaps Pawn, Camera, and Vehicle. All other channels will be set to default. ",bCanModify=False) -Profiles=(Name="Pawn",CollisionEnabled=QueryAndPhysics,ObjectTypeName="Pawn",CustomResponses=((Channel="Visibility",Response=ECR_Ignore)),HelpMessage="Pawn object. Can be used for capsule of any playerable character or AI. ",bCanModify=False) -Profiles=(Name="Spectator",CollisionEnabled=QueryOnly,ObjectTypeName="Pawn",CustomResponses=((Channel="WorldStatic",Response=ECR_Block),(Channel="Pawn",Response=ECR_Ignore),(Channel="Visibility",Response=ECR_Ignore),(Channel="WorldDynamic",Response=ECR_Ignore),(Channel="Camera",Response=ECR_Ignore),(Channel="PhysicsBody",Response=ECR_Ignore),(Channel="Vehicle",Response=ECR_Ignore),(Channel="Destructible",Response=ECR_Ignore)),HelpMessage="Pawn object that ignores all other actors except WorldStatic.",bCanModify=False) -Profiles=(Name="CharacterMesh",CollisionEnabled=QueryOnly,ObjectTypeName="Pawn",CustomResponses=((Channel="Pawn",Response=ECR_Ignore),(Channel="Vehicle",Response=ECR_Ignore),(Channel="Visibility",Response=ECR_Ignore)),HelpMessage="Pawn object that is used for Character Mesh. All other channels will be set to default.",bCanModify=False) -Profiles=(Name="PhysicsActor",CollisionEnabled=QueryAndPhysics,ObjectTypeName="PhysicsBody",CustomResponses=,HelpMessage="Simulating actors",bCanModify=False) -Profiles=(Name="Destructible",CollisionEnabled=QueryAndPhysics,ObjectTypeName="Destructible",CustomResponses=,HelpMessage="Destructible actors",bCanModify=False) -Profiles=(Name="InvisibleWall",CollisionEnabled=QueryAndPhysics,ObjectTypeName="WorldStatic",CustomResponses=((Channel="Visibility",Response=ECR_Ignore)),HelpMessage="WorldStatic object that is invisible.",bCanModify=False) -Profiles=(Name="InvisibleWallDynamic",CollisionEnabled=QueryAndPhysics,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="Visibility",Response=ECR_Ignore)),HelpMessage="WorldDynamic object that is invisible.",bCanModify=False) -Profiles=(Name="Trigger",CollisionEnabled=QueryOnly,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="WorldStatic",Response=ECR_Overlap),(Channel="Pawn",Response=ECR_Overlap),(Channel="Visibility",Response=ECR_Ignore),(Channel="WorldDynamic",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Overlap),(Channel="PhysicsBody",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Destructible",Response=ECR_Overlap)),HelpMessage="WorldDynamic object that is used for trigger. All other channels will be set to default.",bCanModify=False) -Profiles=(Name="Ragdoll",CollisionEnabled=QueryAndPhysics,ObjectTypeName="PhysicsBody",CustomResponses=((Channel="Pawn",Response=ECR_Ignore),(Channel="Visibility",Response=ECR_Ignore)),HelpMessage="Simulating Skeletal Mesh Component. All other channels will be set to default.",bCanModify=False) -Profiles=(Name="Vehicle",CollisionEnabled=QueryAndPhysics,ObjectTypeName="Vehicle",CustomResponses=,HelpMessage="Vehicle object that blocks Vehicle, WorldStatic, and WorldDynamic. All other channels will be set to default.",bCanModify=False) -Profiles=(Name="UI",CollisionEnabled=QueryOnly,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="WorldStatic",Response=ECR_Overlap),(Channel="Pawn",Response=ECR_Overlap),(Channel="Visibility",Response=ECR_Block),(Channel="WorldDynamic",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Overlap),(Channel="PhysicsBody",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Destructible",Response=ECR_Overlap)),HelpMessage="WorldStatic object that overlaps all actors by default. All new custom channels will use its own default response. ",bCanModify=False) +Profiles=(Name="NoCollision",CollisionEnabled=NoCollision,bCanModify=False,ObjectTypeName="WorldStatic",CustomResponses=((Channel="Visibility",Response=ECR_Ignore),(Channel="Camera",Response=ECR_Ignore)),HelpMessage="No collision") +Profiles=(Name="BlockAll",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="WorldStatic",CustomResponses=,HelpMessage="WorldStatic object that blocks all actors by default. All new custom channels will use its own default response. ") +Profiles=(Name="OverlapAll",CollisionEnabled=QueryOnly,bCanModify=False,ObjectTypeName="WorldStatic",CustomResponses=((Channel="WorldStatic",Response=ECR_Overlap),(Channel="Pawn",Response=ECR_Overlap),(Channel="Visibility",Response=ECR_Overlap),(Channel="WorldDynamic",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Overlap),(Channel="PhysicsBody",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Destructible",Response=ECR_Overlap)),HelpMessage="WorldStatic object that overlaps all actors by default. All new custom channels will use its own default response. ") +Profiles=(Name="BlockAllDynamic",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="WorldDynamic",CustomResponses=,HelpMessage="WorldDynamic object that blocks all actors by default. All new custom channels will use its own default response. ") +Profiles=(Name="OverlapAllDynamic",CollisionEnabled=QueryOnly,bCanModify=False,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="WorldStatic",Response=ECR_Overlap),(Channel="Pawn",Response=ECR_Overlap),(Channel="Visibility",Response=ECR_Overlap),(Channel="WorldDynamic",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Overlap),(Channel="PhysicsBody",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Destructible",Response=ECR_Overlap)),HelpMessage="WorldDynamic object that overlaps all actors by default. All new custom channels will use its own default response. ") +Profiles=(Name="IgnoreOnlyPawn",CollisionEnabled=QueryOnly,bCanModify=False,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="Pawn",Response=ECR_Ignore),(Channel="Vehicle",Response=ECR_Ignore)),HelpMessage="WorldDynamic object that ignores Pawn and Vehicle. All other channels will be set to default.") +Profiles=(Name="OverlapOnlyPawn",CollisionEnabled=QueryOnly,bCanModify=False,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="Pawn",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Ignore)),HelpMessage="WorldDynamic object that overlaps Pawn, Camera, and Vehicle. All other channels will be set to default. ") +Profiles=(Name="Pawn",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="Pawn",CustomResponses=((Channel="Visibility",Response=ECR_Ignore)),HelpMessage="Pawn object. Can be used for capsule of any playerable character or AI. ") +Profiles=(Name="Spectator",CollisionEnabled=QueryOnly,bCanModify=False,ObjectTypeName="Pawn",CustomResponses=((Channel="WorldStatic"),(Channel="Pawn",Response=ECR_Ignore),(Channel="Visibility",Response=ECR_Ignore),(Channel="WorldDynamic",Response=ECR_Ignore),(Channel="Camera",Response=ECR_Ignore),(Channel="PhysicsBody",Response=ECR_Ignore),(Channel="Vehicle",Response=ECR_Ignore),(Channel="Destructible",Response=ECR_Ignore)),HelpMessage="Pawn object that ignores all other actors except WorldStatic.") +Profiles=(Name="CharacterMesh",CollisionEnabled=QueryOnly,bCanModify=False,ObjectTypeName="Pawn",CustomResponses=((Channel="Pawn",Response=ECR_Ignore),(Channel="Vehicle",Response=ECR_Ignore),(Channel="Visibility",Response=ECR_Ignore)),HelpMessage="Pawn object that is used for Character Mesh. All other channels will be set to default.") +Profiles=(Name="PhysicsActor",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="PhysicsBody",CustomResponses=,HelpMessage="Simulating actors") +Profiles=(Name="Destructible",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="Destructible",CustomResponses=,HelpMessage="Destructible actors") +Profiles=(Name="InvisibleWall",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="WorldStatic",CustomResponses=((Channel="Visibility",Response=ECR_Ignore)),HelpMessage="WorldStatic object that is invisible.") +Profiles=(Name="InvisibleWallDynamic",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="Visibility",Response=ECR_Ignore)),HelpMessage="WorldDynamic object that is invisible.") +Profiles=(Name="Trigger",CollisionEnabled=QueryOnly,bCanModify=False,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="WorldStatic",Response=ECR_Overlap),(Channel="Pawn",Response=ECR_Overlap),(Channel="Visibility",Response=ECR_Ignore),(Channel="WorldDynamic",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Overlap),(Channel="PhysicsBody",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Destructible",Response=ECR_Overlap)),HelpMessage="WorldDynamic object that is used for trigger. All other channels will be set to default.") +Profiles=(Name="Ragdoll",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="PhysicsBody",CustomResponses=((Channel="Pawn",Response=ECR_Ignore),(Channel="Visibility",Response=ECR_Ignore)),HelpMessage="Simulating Skeletal Mesh Component. All other channels will be set to default.") +Profiles=(Name="Vehicle",CollisionEnabled=QueryAndPhysics,bCanModify=False,ObjectTypeName="Vehicle",CustomResponses=,HelpMessage="Vehicle object that blocks Vehicle, WorldStatic, and WorldDynamic. All other channels will be set to default.") +Profiles=(Name="UI",CollisionEnabled=QueryOnly,bCanModify=False,ObjectTypeName="WorldDynamic",CustomResponses=((Channel="WorldStatic",Response=ECR_Overlap),(Channel="Pawn",Response=ECR_Overlap),(Channel="Visibility"),(Channel="WorldDynamic",Response=ECR_Overlap),(Channel="Camera",Response=ECR_Overlap),(Channel="PhysicsBody",Response=ECR_Overlap),(Channel="Vehicle",Response=ECR_Overlap),(Channel="Destructible",Response=ECR_Overlap)),HelpMessage="WorldStatic object that overlaps all actors by default. All new custom channels will use its own default response. ") +DefaultChannelResponses=(Channel=ECC_GameTraceChannel1,DefaultResponse=ECR_Block,bTraceType=False,bStaticObject=False,Name="Missile") +DefaultChannelResponses=(Channel=ECC_GameTraceChannel2,DefaultResponse=ECR_Block,bTraceType=False,bStaticObject=False,Name="Agent") +DefaultChannelResponses=(Channel=ECC_GameTraceChannel3,DefaultResponse=ECR_Block,bTraceType=False,bStaticObject=False,Name="Env") -ProfileRedirects=(OldName="BlockingVolume",NewName="InvisibleWall") -ProfileRedirects=(OldName="InterpActor",NewName="IgnoreOnlyPawn") -ProfileRedirects=(OldName="StaticMeshComponent",NewName="BlockAllDynamic") -ProfileRedirects=(OldName="SkeletalMeshActor",NewName="PhysicsActor") -ProfileRedirects=(OldName="InvisibleActor",NewName="InvisibleWallDynamic") +ProfileRedirects=(OldName="BlockingVolume",NewName="InvisibleWall") +ProfileRedirects=(OldName="InterpActor",NewName="IgnoreOnlyPawn") +ProfileRedirects=(OldName="StaticMeshComponent",NewName="BlockAllDynamic") +ProfileRedirects=(OldName="SkeletalMeshActor",NewName="PhysicsActor") +ProfileRedirects=(OldName="InvisibleActor",NewName="InvisibleWallDynamic") -CollisionChannelRedirects=(OldName="Static",NewName="WorldStatic") -CollisionChannelRedirects=(OldName="Dynamic",NewName="WorldDynamic") -CollisionChannelRedirects=(OldName="VehicleMovement",NewName="Vehicle") -CollisionChannelRedirects=(OldName="PawnMovement",NewName="Pawn") +CollisionChannelRedirects=(OldName="Static",NewName="WorldStatic") +CollisionChannelRedirects=(OldName="Dynamic",NewName="WorldDynamic") +CollisionChannelRedirects=(OldName="VehicleMovement",NewName="Vehicle") +CollisionChannelRedirects=(OldName="PawnMovement",NewName="Pawn") [/Script/NavigationSystem.RecastNavMesh] CellSize=5.000000 CellHeight=10.000000 AgentRadius=106.453094 AgentHeight=74.311516 TileSizeUU=1000.000000 AgentMaxStepHeight=100.000000 AgentMaxSlope=44.000000 RuntimeGeneration=Static bDrawPolyEdges=False [URL] GameName=UHMP [/Script/NavigationSystem.NavigationSystemV1] DefaultAgentName=None CrowdManagerClass=/Script/AIModule.CrowdManager bAutoCreateNavigationData=True bSpawnNavDataInNavBoundsLevel=False bAllowClientSideNavigation=False bShouldDiscardSubLevelNavData=True bTickWhilePaused=False bInitialBuildingLocked=False bSkipAgentHeightCheckWhenPickingNavData=False bGenerateNavigationOnlyAroundNavigationInvokers=False ActiveTilesUpdateInterval=1.000000 DataGatheringMode=Instant DirtyAreaWarningSizeThreshold=-1.000000 +SupportedAgents=(Name="AirportAgents",Color=(B=0,G=255,R=140,A=164),DefaultQueryExtent=(X=500.000000,Y=500.000000,Z=500.000000),NavDataClass=/Script/NavigationSystem.RecastNavMesh,AgentRadius=130.000000,AgentHeight=100.000000,AgentStepHeight=50.000000,NavWalkingSearchHeightScale=0.500000,PreferredNavData=/Script/NavigationSystem.RecastNavMesh,bCanCrouch=False,bCanJump=False,bCanWalk=True,bCanSwim=False,bCanFly=True) +SupportedAgents=(Name="RLA",Color=(B=0,G=255,R=140,A=164),DefaultQueryExtent=(X=500.000000,Y=500.000000,Z=500.000000),NavDataClass=/Script/NavigationSystem.RecastNavMesh,AgentRadius=130.000000,AgentHeight=100.000000,AgentStepHeight=-1.000000,NavWalkingSearchHeightScale=0.500000,PreferredNavData=None,bCanCrouch=False,bCanJump=False,bCanWalk=True,bCanSwim=False,bCanFly=True) SupportedAgentsMask=(bSupportsAgent0=True,bSupportsAgent1=True,bSupportsAgent2=True,bSupportsAgent3=True,bSupportsAgent4=True,bSupportsAgent5=True,bSupportsAgent6=True,bSupportsAgent7=True,bSupportsAgent8=True,bSupportsAgent9=True,bSupportsAgent10=True,bSupportsAgent11=True,bSupportsAgent12=True,bSupportsAgent13=True,bSupportsAgent14=True,bSupportsAgent15=True) DirtyAreasUpdateFreq=60.000000 [/Script/Engine.PhysicsSettings] MaxPhysicsDeltaTime=0.500000 InitialAverageFrameRate=0.041667 [/Script/Engine.GarbageCollectionSettings] gc.TimeBetweenPurgingPendingKillObjects=10.000000 gc.ActorClusteringEnabled=True ================================================ FILE: Config/DefaultGame.ini ================================================ [/Script/EngineSettings.GeneralProjectSettings] ProjectID=2433E6A446E19AAC82A379AF7E021098 [StartupActions] bAddPacks=True InsertPack=(PackSource="StarterContent.upack",PackName="StarterContent") [/Script/UnrealEd.ProjectPackagingSettings] Build=IfProjectHasCode BuildConfiguration=PPBC_Development BuildTarget=UHMP StagingDirectory=(Path="F:/2/git_test/uhmap/Build") FullRebuild=False ForDistribution=False IncludeDebugFiles=False BlueprintNativizationMethod=Disabled +NativizeBlueprintAssets=(FilePath="/Game/Assets/BlueprintLib/Agent") +NativizeBlueprintAssets=(FilePath="/Game/Assets/BlueprintLib/BpHmpPythonBridgeV2") +NativizeBlueprintAssets=(FilePath="/Game/Assets/Core/BpHmpPythonBridgeV2") +NativizeBlueprintAssets=(FilePath="/Game/Assets/Core/Main") +NativizeBlueprintAssets=(FilePath="/Game/Assets/Core/CoreControl") +NativizeBlueprintAssets=(FilePath="/Game/Assets/CoreSystem/CoreControl") +NativizeBlueprintAssets=(FilePath="/Game/Assets/CoreAgent/Agent") +NativizeBlueprintAssets=(FilePath="/Game/Assets/CoreActors/Agent") +NativizeBlueprintAssets=(FilePath="/Game/Assets/AbstractActors/Agent") +NativizeBlueprintAssets=(FilePath="/Game/Assets/AbstractActor/Agent") bIncludeNativizedAssetsInProjectGeneration=False bExcludeMonolithicEngineHeadersInNativizedCode=False UsePakFile=True bUseIoStore=False bMakeBinaryConfig=False bGenerateChunks=False bGenerateNoChunks=False bChunkHardReferencesOnly=False bForceOneChunkPerFile=False MaxChunkSize=0 bBuildHttpChunkInstallData=False HttpChunkInstallDataDirectory=(Path="") bCompressed=True PakFileCompressionFormats=Oodle bForceUseProjectCompressionFormatIgnoreHardwareOverride=False PakFileAdditionalCompressionOptions=-compressionblocksize=256KB PakFileCompressionMethod=Kraken PakFileCompressionLevel_DebugDevelopment=3 PakFileCompressionLevel_TestShipping=5 PakFileCompressionLevel_Distribution=7 HttpChunkInstallDataVersion= IncludePrerequisites=True IncludeAppLocalPrerequisites=False bShareMaterialShaderCode=True bDeterministicShaderCodeOrder=False bSharedMaterialNativeLibraries=True ApplocalPrerequisitesDirectory=(Path="") IncludeCrashReporter=False InternationalizationPreset=English -CulturesToStage=en +CulturesToStage=en LocalizationTargetCatchAllChunkId=0 bCookAll=False bCookMapsOnly=False bSkipEditorContent=False bSkipMovies=False -IniKeyBlacklist=KeyStorePassword -IniKeyBlacklist=KeyPassword -IniKeyBlacklist=rsa.privateexp -IniKeyBlacklist=rsa.modulus -IniKeyBlacklist=rsa.publicexp -IniKeyBlacklist=aes.key -IniKeyBlacklist=SigningPublicExponent -IniKeyBlacklist=SigningModulus -IniKeyBlacklist=SigningPrivateExponent -IniKeyBlacklist=EncryptionKey -IniKeyBlacklist=IniKeyBlacklist -IniKeyBlacklist=IniSectionBlacklist +IniKeyBlacklist=KeyStorePassword +IniKeyBlacklist=KeyPassword +IniKeyBlacklist=rsa.privateexp +IniKeyBlacklist=rsa.modulus +IniKeyBlacklist=rsa.publicexp +IniKeyBlacklist=aes.key +IniKeyBlacklist=SigningPublicExponent +IniKeyBlacklist=SigningModulus +IniKeyBlacklist=SigningPrivateExponent +IniKeyBlacklist=EncryptionKey +IniKeyBlacklist=IniKeyBlacklist +IniKeyBlacklist=IniSectionBlacklist +MapsToCook=(FilePath="/Game/Maps/AutoEntry") +MapsToCook=(FilePath="/Game/Maps/UhmapBreakingBad") +MapsToCook=(FilePath="/Game/Maps/UhmapLargeScale") +MapsToCook=(FilePath="/Game/Maps/UhmapIntercept") +MapsToCook=(FilePath="/Game/Maps/UhmapPreyPredator") +MapsToCook=(FilePath="/Game/Maps/UhmapWaterdrop") +MapsToCook=(FilePath="/Game/Maps/UhmapAttackPost") +MapsToCook=(FilePath="/Game/Maps/UhmapCarrier") +MapsToCook=(FilePath="/Game/Maps/UhmapReproduce") +MapsToCook=(FilePath="/Game/Maps/UhmapFormation") +MapsToCook=(FilePath="/Game/Maps/UhmapFlagCapture") ================================================ FILE: Config/DefaultInput.ini ================================================ [/Script/Engine.InputSettings] -AxisConfig=(AxisKeyName="Gamepad_LeftX",AxisProperties=(DeadZone=0.25,Exponent=1.f,Sensitivity=1.f)) -AxisConfig=(AxisKeyName="Gamepad_LeftY",AxisProperties=(DeadZone=0.25,Exponent=1.f,Sensitivity=1.f)) -AxisConfig=(AxisKeyName="Gamepad_RightX",AxisProperties=(DeadZone=0.25,Exponent=1.f,Sensitivity=1.f)) -AxisConfig=(AxisKeyName="Gamepad_RightY",AxisProperties=(DeadZone=0.25,Exponent=1.f,Sensitivity=1.f)) -AxisConfig=(AxisKeyName="MouseX",AxisProperties=(DeadZone=0.f,Exponent=1.f,Sensitivity=0.07f)) -AxisConfig=(AxisKeyName="MouseY",AxisProperties=(DeadZone=0.f,Exponent=1.f,Sensitivity=0.07f)) -AxisConfig=(AxisKeyName="Mouse2D",AxisProperties=(DeadZone=0.f,Exponent=1.f,Sensitivity=0.07f)) +AxisConfig=(AxisKeyName="Gamepad_LeftX",AxisProperties=(DeadZone=0.250000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Gamepad_LeftY",AxisProperties=(DeadZone=0.250000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Gamepad_RightX",AxisProperties=(DeadZone=0.250000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Gamepad_RightY",AxisProperties=(DeadZone=0.250000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MouseX",AxisProperties=(DeadZone=0.000000,Sensitivity=0.070000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MouseY",AxisProperties=(DeadZone=0.000000,Sensitivity=0.070000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Mouse2D",AxisProperties=(DeadZone=0.000000,Sensitivity=0.070000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MouseWheelAxis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Gamepad_LeftTriggerAxis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Gamepad_RightTriggerAxis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Gamepad_Special_Left_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Gamepad_Special_Left_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Daydream_Left_Trackpad_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Daydream_Left_Trackpad_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Daydream_Right_Trackpad_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Daydream_Right_Trackpad_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Vive_Left_Trigger_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Vive_Left_Trackpad_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Vive_Left_Trackpad_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Vive_Right_Trigger_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Vive_Right_Trackpad_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="Vive_Right_Trackpad_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Left_Trigger_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Left_Thumbstick_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Left_Thumbstick_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Left_Trackpad_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Left_Trackpad_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Right_Trigger_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Right_Thumbstick_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Right_Thumbstick_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Right_Trackpad_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="MixedReality_Right_Trackpad_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="OculusTouch_Left_Grip_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="OculusTouch_Left_Trigger_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="OculusTouch_Left_Thumbstick_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="OculusTouch_Left_Thumbstick_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="OculusTouch_Right_Grip_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="OculusTouch_Right_Trigger_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="OculusTouch_Right_Thumbstick_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="OculusTouch_Right_Thumbstick_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Grip_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Grip_Force",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Trigger_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Thumbstick_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Thumbstick_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Trackpad_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Trackpad_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Trackpad_Force",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Left_Trackpad_Touch",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Right_Grip_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Right_Grip_Force",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Right_Trigger_Axis",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Right_Thumbstick_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Right_Thumbstick_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Right_Trackpad_X",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Right_Trackpad_Y",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) +AxisConfig=(AxisKeyName="ValveIndex_Right_Trackpad_Force",AxisProperties=(DeadZone=0.000000,Sensitivity=1.000000,Exponent=1.000000,bInvert=False)) bAltEnterTogglesFullscreen=True bF11TogglesFullscreen=True bUseMouseForTouch=False bEnableMouseSmoothing=True bEnableFOVScaling=True bCaptureMouseOnLaunch=True bAlwaysShowTouchInterface=False bShowConsoleOnFourFingerTap=True bEnableGestureRecognizer=False bUseAutocorrect=False DefaultViewportMouseCaptureMode=CapturePermanently_IncludingInitialMouseDown DefaultViewportMouseLockMode=LockOnCapture FOVScale=0.011110 DoubleClickTime=0.200000 +ActionMappings=(ActionName="ResumControl",bShift=False,bCtrl=False,bAlt=False,bCmd=False,Key=Tab) +ActionMappings=(ActionName="OnMouseSelect",bShift=False,bCtrl=False,bAlt=False,bCmd=False,Key=LeftMouseButton) +ActionMappings=(ActionName="OnRightMouseSelect",bShift=False,bCtrl=False,bAlt=False,bCmd=False,Key=RightMouseButton) +ActionMappings=(ActionName="OnMidMousePressed",bShift=False,bCtrl=False,bAlt=False,bCmd=False,Key=MiddleMouseButton) +ActionMappings=(ActionName="On=Pressed",bShift=False,bCtrl=False,bAlt=False,bCmd=False,Key=Equals) DefaultPlayerInputClass=/Script/Engine.PlayerInput DefaultInputComponentClass=/Script/Engine.InputComponent DefaultTouchInterface=/Engine/MobileResources/HUD/DefaultVirtualJoysticks.DefaultVirtualJoysticks -ConsoleKeys=Tilde +ConsoleKeys=Tilde ================================================ FILE: Content/Maps/AirportSwarmTraining.umap ================================================ [File too large to display: 26.3 MB] ================================================ FILE: Content/Maps/TestCradle_BuiltData.uasset ================================================ [File too large to display: 21.0 MB] ================================================ FILE: Content/Maps/UhmapBreakingBad.umap ================================================ [File too large to display: 25.9 MB] ================================================ FILE: Content/Maps/UhmapLargeScale_BuiltData.uasset ================================================ [File too large to display: 21.0 MB] ================================================ FILE: Docs/git_coop.md ================================================ # 如何将修改同步到远程 ### 0. 首先关闭虚幻引擎! ### 1. 打开vscode,切换到git页面
### 2. 填写message,然后点击commit ### 3. 再点击sync changes,然后出现下面界面(也有可能直接顺利完成,忽略以下步骤即可)
### 4. 如果没有“Merge Changes”,再次点击提交即可,但一般都会有若干项“Merge Changes”,即和其他人发生冲突的文件,需要进行处理 ### 5. 如有“Merge Changes”,打开终端 #### 5-1 (确实对该文件做出过有意义的修改)如果想保存本地,无视远程文件,强行把本地文件汇入远程: 终端命令: ``` git checkout --ours /path/to/file git add /path/to/file ``` 在此例子中, ``` git checkout --ours Content\Assets\DefAction\ParseAction.uasset git add Content\Assets\DefAction\ParseAction.uasset ``` #### 5-2 (无意修改此文件,或该文件属于其他人的管辖范围)如果想覆盖本地文件,采纳远程文件: 终端命令: ``` git checkout --theirs /path/to/file git add /path/to/file ``` 在此例子中, ``` git checkout --theirs Content\Assets\DefAction\ParseAction.uasset git add Content\Assets\DefAction\ParseAction.uasset ``` ### 6. 再次点击Commit,完成流程 ================================================ FILE: Docs/old_install_method/README.md ================================================ # UHMAP Developed with Unreal Engine 4 # How to install | 如何安装自定义版本的虚幻引擎 另见视频: https://ageasga-my.sharepoint.com/:v:/g/personal/fuqingxu_yiteam_tech/EawfqsV2jF5Nsv3KF7X1-woBH-VTvELL6FSRX4cIgUboLg?e=Vmp67E ## 1. 下载 Visual Studio Community https://visualstudio.microsoft.com/zh-hans/ ## 2. 安装 Visual Studio Community
需要至少安装C++的桌面开发和C++的游戏开发两部分,安装版本见截图;另需安装.NET框架,版本为4.6.2 ## 3. 下载虚幻引擎源代码 (非官方,修改过源码) https://ageasga-my.sharepoint.com/:u:/g/personal/fuqingxu_yiteam_tech/Ee3lQrUjKNFMjPITm5G-hEgBbeEN6dMOPtKP9ssgONKJcA?e=BavOoJ ## 4. 编译虚幻引擎 1. 解压源代码(到至少150GB空间的磁盘) 1. Open your source folder in Explorer and run **Setup.bat**. This will download binary content for the engine, as well as installing prerequisites and setting up Unreal file associations. On Windows 8, a warning from SmartScreen may appear. Click "More info", then "Run anyway" to continue. 运行**Setup.bat** A clean download of the engine binaries is currently 3-4gb, which may take some time to complete. Subsequent checkouts only require incremental downloads and will be much quicker. 需要一段时间 1. Run **GenerateProjectFiles.bat** to create project files for the engine. It should take less than a minute to complete. 运行 **GenerateProjectFiles.bat** 1. Load the project into Visual Studio by double-clicking on the **UE4.sln** file. Set your solution configuration to **Development Editor** and your solution platform to **Win64**, then right click on the **UE4** target and select **Build**. It may take anywhere between 10 and 40 minutes to finish compiling, depending on your system specs. 打开**UE4.sln**,界面顶部的项目配置为**Development Editor** 和 **Win64**, 右击界面右侧菜单的**UE4**,点击**Build**,需要20分钟~1小时时间编译(配置好后,点选界面上面栏目中的生成Build,点击生成UE4) 1. 右键点击项目工程文件夹中的UHMP.uproject,按照图示选择虚幻引擎版本为4.27.2-release,进行初步生成项目的操作;之后打开UHMP.sln,将界面顶部的项目配置为**Development Editor** 和 **Win64**,点选右侧资源管理器中的UHMP,再点击界面顶部的生成-->生成UHMP;完成后双击UHMP.uproject打开工程项目
6. 成功打开的虚幻编辑器加载界面应当如下图所示
================================================ FILE: Please_Run_This_First_To_Fetch_Big_Files.py ================================================ import os, commentjson, shutil, subprocess, tqdm, shutil import zipfile from modelscope import snapshot_download try: os.makedirs('./TEMP') except: pass version = 'unreal-map-v3.4' model_dir = snapshot_download(f'BinaryHusky/{version}') zip_file_path = f'./TEMP/{version}.zip' def combine_file(model_dir, output_file_path, num_parts): with open(output_file_path, 'wb') as output_file: for i in range(0, num_parts): part_file_path = os.path.join(model_dir, "tensor", f"safetensor_{i+1}.pt") with open(part_file_path, 'rb') as part_file: output_file.write(part_file.read()) extract_to_path = './' combine_file(model_dir, output_file_path=zip_file_path, num_parts=5) # 打开 ZIP 文件 with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: # 解压所有文件到指定目录 zip_ref.extractall(extract_to_path) print(f"files unzipped {extract_to_path}") print("everything is ready!") ================================================ FILE: PythonExample/README.md ================================================ This demo program can connect to developing/compiled U-MAP environment to debug your simulation. This program is a copy of another resp: https://github.com/binary-husky/hmp2g ================================================ FILE: PythonExample/hmp_minimal_modules/.gitattributes ================================================ *.js linguist-detectable=false ================================================ FILE: PythonExample/hmp_minimal_modules/.gitignore ================================================ # Build and Release Folders bin-debug/ bin-release/ */__pycache__ [Oo]bj/ [Bb]in/ # Other files and folders .settings/ # Executables *.swf *.air *.ipa *.apk *.so *.pyc *.pyd *.so # gpu lock *.glock *.mp3 *.png # pytorch model *.pt core !MISSION/collective_assult/malib/core TODO __pycache__/ ./build/ ALGORITHM/Starcraft/result/ ALGORITHM/Starcraft/model/ ZipResults/ VISUALIZE/train no aWiseAttn VISUALIZE/train-half-death-reward ZipResults/Starcraft/ result # .vscode/ forattack-train/ full-cargo/ T*/ checkpoint/ PROFILE/ RECYCLE/ TEMP/ test_only_*.py test_only_log test_only_profile.txt test_only_profilex.txt debug_change_self_n_agent.json.profile.txt debug2-2500pt-test_only_profile.txt test_only_logdebug_change_self_n_agent.json.log test_only_profiledebug_change_self_n_agent.json.txt xx_profile_n_agents2.py xx_profile_n_agents3.py xx_profile_n_agents4.py UTIL/keys.py private* fqx*.jsonc my_*.jsonc result.prof ignore bvrAI.log ZHECKPOINT/* THIRDPARTY/pymarl2/test !ZHECKPOINT/test-50+50 ZHECKPOINT/test-50+50/* !ZHECKPOINT/test-50+50/model.pt !ZHECKPOINT/test-50+50/experiment.json !ZHECKPOINT/test-50+50/test-50+50.jsonc !ZHECKPOINT/test-50+50/test50.gif !ZHECKPOINT/test-100+100 ZHECKPOINT/test-100+100/* !ZHECKPOINT/test-100+100/model.pt !ZHECKPOINT/test-100+100/experiment.json !ZHECKPOINT/test-100+100/test-100+100.jsonc !ZHECKPOINT/50RL-55opp ZHECKPOINT/50RL-55opp/* !ZHECKPOINT/50RL-55opp/test-50RL-55opp.jsonc !ZHECKPOINT/50RL-55opp/model.pt ZHECKPOINT/50RL-55opp/experiment.json !ZHECKPOINT/test-cargo50 ZHECKPOINT/test-cargo50/* !ZHECKPOINT/test-cargo50/model.pt !ZHECKPOINT/test-cargo50/experiment.json !ZHECKPOINT/test-cargo50/test-cargo50.jsonc !ZHECKPOINT/test-cargo50/cargo50.jpg !ZHECKPOINT/test-cargo50/history_cpt ZHECKPOINT/test-cargo50/history_cpt/* !ZHECKPOINT/test-cargo50/history_cpt/init.pkl !ZHECKPOINT/test-50+50/butterfly.webp !ZHECKPOINT/test-aii515 ZHECKPOINT/test-aii515/* !ZHECKPOINT/test-aii515/model.pt !ZHECKPOINT/test-aii515/experiment.json !ZHECKPOINT/test-aii515/test-aii515.jsonc !ZHECKPOINT/test-aii515/aii.jpg !ZHECKPOINT/test-aii515/history_cpt ZHECKPOINT/test-aii515/history_cpt/* !ZHECKPOINT/test-aii515/history_cpt/init.pkl !ZHECKPOINT/basic-ma-40-demo ZHECKPOINT/basic-ma-40-demo/* !ZHECKPOINT/basic-ma-40-demo/trained_model.pt !ZHECKPOINT/basic-ma-40-demo/train.json !ZHECKPOINT/basic-ma-40-demo/test.json !ZHECKPOINT/adca-demo ZHECKPOINT/adca-demo/* !ZHECKPOINT/adca-demo/model_trained.pt !ZHECKPOINT/adca-demo/train.json !ZHECKPOINT/adca-demo/test.json !ZHECKPOINT/uhmap_hete10vs10 ZHECKPOINT/uhmap_hete10vs10/backup_files ZHECKPOINT/uhmap_hete10vs10/logger !ZHECKPOINT/uhmap_hete10vs10/model_trained.pt ZHECKPOINT/uhmap_hete10vs10/experiment.jsonc cmd_io.txt rec.jpg detail_reward.jpg z_* ALGORITHM/conc_4hist_divtree3 ALGORITHM/conc_4hist_divtree2 example_dca_cs* PersonalityDevelop.pdf 6vs7Pr-continue-train.jsonc 6vs7Pr.jsonc 6vs7PrTry2-Link.jsonc 6vs7PrTry2.jsonc 7vs7Pr.json 7vs7Pr.jsonc batch_experiment_backup.py mcom_buffer_0____starting_session.txt temp.jpg temp.jpg.jpg x.txt debug*.jsonc HLT_eval.py qplex-pad.jsonc raw_exp.jsonc info.json UTIL/mem_watcher.py ALGORITHM/mirror* ================================================ FILE: PythonExample/hmp_minimal_modules/.gitmodules ================================================ [submodule "THIRDPARTY/pymarl2/pymarl2src"] path = THIRDPARTY/pymarl2/pymarl2src url = https://github.com/binary-husky/pymarl-hmap-compat.git branch = master ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/alg_base.py ================================================ import os, time, torch, traceback import numpy as np from config import GlobalConfig from UTIL.colorful import * class AlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_thread = n_thread self.n_agent = n_agent self.team = team self.act_space = space['act_space'] self.obs_space = space['obs_space'] self.ScenarioConfig = GlobalConfig.ScenarioConfig self.mcv = mcv self.device = GlobalConfig.device def interact_with_env(self, team_intel): raise NotImplementedError ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/attention.py ================================================ import math import torch import torch.nn as nn import torch.nn.functional as F from torch.distributions.categorical import Categorical from torch.distributions.multivariate_normal import MultivariateNormal from UTIL.tensor_ops import my_view class MultiHeadAttention(nn.Module): # taken from https://github.com/wouterkool/attention-tsp/blob/master/graph_encoder.py def __init__( self, n_heads, input_dim, embed_dim=None, val_dim=None, key_dim=None ): super(MultiHeadAttention, self).__init__() if val_dim is None: assert embed_dim is not None, "Provide either embed_dim or val_dim" val_dim = embed_dim // n_heads if key_dim is None: key_dim = val_dim self.n_heads = n_heads self.input_dim = input_dim self.embed_dim = embed_dim self.val_dim = val_dim self.key_dim = key_dim self.norm_factor = 1 / math.sqrt(key_dim) # See Attention is all you need self.W_query = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_key = nn.Parameter(torch.Tensor(n_heads, input_dim, key_dim)) self.W_val = nn.Parameter(torch.Tensor(n_heads, input_dim, val_dim)) if embed_dim is not None: self.W_out = nn.Parameter(torch.Tensor(n_heads, key_dim, embed_dim)) self.init_parameters() def init_parameters(self): for param in self.parameters(): stdv = 1. / math.sqrt(param.size(-1)) param.data.uniform_(-stdv, stdv) def forward(self, q, k=None, v=None, mask=None, return_attn=False, return_attn_weight=False): if q.dim()<=3: out = self.forward_(q, k, v, mask, return_attn, return_attn_weight) if return_attn: out, attn = out assert attn.shape[0]==1 attn = attn.squeeze(0) return out, attn return out hyper_dim = q.shape[:-2] q = my_view(q, [-1, *q.shape[-2:]]) if k is not None: k = my_view(k, [-1, *k.shape[-2:]]) if v is not None: v = my_view(v, [-1, *v.shape[-2:]]) if mask is not None: mask = my_view(mask, [-1, *mask.shape[-2:]]) out = self.forward_(q, k, v, mask, return_attn, return_attn_weight) if return_attn: out, attn = out if hyper_dim is not None: out = out.view(*hyper_dim, *out.shape[-2:]) attn = attn.view(*hyper_dim, *attn.shape[-2:]) #?? return out, attn else: if hyper_dim is not None: out = out.view(*hyper_dim, *q.shape[-2:]) return out def forward_(self, q, k=None, v=None, mask=None, return_attn=False, return_attn_weight=False): """ :param q: queries (batch_size, n_query, input_dim) :param k: data (batch_size, n_key/graph_size, input_dim) :param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1) Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency) :return: """ if k is None: k = q # compute self-attention if v is None: v = k # k should be (batch_size, graph_size, input_dim) batch_size, graph_size, input_dim = k.size() n_query = q.size(1) assert q.size(0) == batch_size assert q.size(2) == input_dim assert input_dim == self.input_dim, "Wrong embedding dimension of input" kflat = k.contiguous().view(-1, input_dim) qflat = q.contiguous().view(-1, input_dim) vflat = v.contiguous().view(-1, input_dim) # last dimension can be different for keys and values shp = (self.n_heads, batch_size, graph_size, -1) shp_q = (self.n_heads, batch_size, n_query, -1) # Calculate queries, (n_heads, n_query, graph_size, key/val_size) Q = torch.matmul(qflat, self.W_query).view(shp_q) # Calculate keys and values (n_heads, batch_size, graph_size, key/val_size) K = torch.matmul(kflat, self.W_key).view(shp) V = torch.matmul(vflat, self.W_val).view(shp) # Calculate compatibility (n_heads, batch_size, n_query, graph_size) compatibility = self.norm_factor * torch.matmul(Q, K.transpose(2, 3)) if return_attn_weight: assert self.n_heads == 1 if mask is not None: mask = mask.view(1, batch_size, n_query, graph_size).expand_as(compatibility) compatibility[mask.bool()] = -math.inf return compatibility.squeeze(0) # Optionally apply mask to prevent attention if mask is not None: # expand to n_heads mask = mask.view(1, batch_size, n_query, graph_size).expand_as(compatibility) compatibility[mask.bool()] = -math.inf attn = F.softmax(compatibility, dim=-1) # If there are nodes with no neighbours then softmax returns nan so we fix them to 0 if mask is not None: attnc = attn.clone() attnc[mask.bool()] = 0 attn = attnc # 为了在这里解决 0*nan = nan 的问题,输入必须将V中的nan转化为0 heads = torch.matmul(attn, V) out = torch.mm( heads.permute(1, 2, 0, 3).contiguous().view(-1, self.n_heads * self.val_dim), self.W_out.view(-1, self.embed_dim) ).view(batch_size, n_query, self.embed_dim) if return_attn: return out, attn return out class SimpleAttention(nn.Module): def __init__(self, h_dim): super().__init__() self.W_query = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.W_key = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.W_val = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.init_parameters() def init_parameters(self): for param in self.parameters(): stdv = 1. / math.sqrt(param.size(-1)) param.data.uniform_(-stdv, stdv) def forward(self, k, q, v, mask=None): Q = torch.matmul(q, self.W_query) K = torch.matmul(k, self.W_key) V = torch.matmul(v, self.W_val) norm_factor = 1 / math.sqrt(Q.shape[-1]) compat = norm_factor * torch.matmul(Q, K.transpose(-1, -2)) if mask is not None: compat[mask.bool()] = -math.inf # 为了在这里解决 0*nan = nan 的问题,输入必须将V中的nan转化为0 score = torch.nan_to_num(F.softmax(compat, dim=-1), 0) return torch.matmul(score, V) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/conc.py ================================================ import math import torch,time,random import torch.nn as nn import torch.nn.functional as F from UTIL.tensor_ops import my_view, __hash__, __hashn__, pad_at_dim, gather_righthand class Concentration(nn.Module): def __init__(self, n_focus_on, h_dim, skip_connect=False, skip_connect_dim=0, adopt_selfattn=False): super().__init__() self.n_focus_on = n_focus_on self.skip_connect = skip_connect self.skip_dim = h_dim+skip_connect_dim self.CT_W_query = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.CT_W_key = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.CT_W_val = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.CT_motivate_mlp = nn.Sequential(nn.Linear(h_dim * 2, h_dim), nn.ReLU(inplace=True)) self.AT_forward_mlp = nn.Sequential(nn.Linear((n_focus_on+1)*self.skip_dim, h_dim), nn.ReLU(inplace=True)) self.adopt_selfattn = adopt_selfattn if self.adopt_selfattn: assert False, ('no longer support') self.init_parameters() def init_parameters(self): for param in self.parameters(): stdv = 1. / math.sqrt(param.size(-1)) param.data.uniform_(-stdv, stdv) def forward(self, vs, ve, ve_dead, skip_connect_ze=None, skip_connect_zs=None): mask = ve_dead Q = torch.matmul(vs, self.CT_W_query) K = torch.matmul(ve, self.CT_W_key) norm_factor = 1 / math.sqrt(Q.shape[-1]) compat = norm_factor * torch.matmul(Q, K.transpose(2, 3)) assert compat.shape[-2] == 1 compat = compat.squeeze(-2) compat[mask.bool()] = -math.inf score = F.softmax(compat, dim=-1) # nodes with no neighbours were softmax into nan, fix them to 0 score = torch.nan_to_num(score, 0) # ----------- motivational brach ------------- Va = torch.matmul(score.unsqueeze(-2), torch.matmul(ve, self.CT_W_val)) v_M = torch.cat((vs, Va), -1).squeeze(-2) v_M_final = self.CT_motivate_mlp(v_M) # ----------- forward branch ------------- score_sort_index = torch.argsort(score, dim=-1, descending=True) score_sort_drop_index = score_sort_index[..., :self.n_focus_on] if self.skip_connect: ve = torch.cat((ve, skip_connect_ze), -1) vs = torch.cat((vs, skip_connect_zs), -1) ve_C = gather_righthand(src=ve, index=score_sort_drop_index, check=False) need_padding = (score_sort_drop_index.shape[-1] != self.n_focus_on) if need_padding: print('the n_focus param is large than input, advise: pad observation instead of pad here') ve_C = pad_at_dim(ve_C, dim=-2, n=self.n_focus_on) v_C_stack = torch.cat((vs, ve_C), dim=-2) if self.adopt_selfattn: v_C_stack = self.AT_Attention(v_C_stack, mask=None) v_C_flat = my_view(v_C_stack, [0, 0, -1]); assert v_C_stack.dim()==4 v_C_final = self.AT_forward_mlp(v_C_flat) return v_C_final, v_M_final class ConcentrationHete(nn.Module): def __init__(self, n_focus_on, h_dim, skip_connect=False, skip_connect_dim=0, adopt_selfattn=False): super().__init__() self.n_focus_on = n_focus_on self.skip_connect = skip_connect self.skip_dim = h_dim+skip_connect_dim self.AT_W_query = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.AT_W_key = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.AT_W_val = nn.Parameter(torch.Tensor(h_dim, h_dim)) self.AT_motivate_mlp = nn.Sequential(nn.Linear(h_dim * 2, h_dim), nn.ReLU(inplace=True)) self.AT_forward_mlp = nn.Sequential(nn.Linear((n_focus_on+1)*self.skip_dim, h_dim), nn.ReLU(inplace=True)) self.adopt_selfattn = adopt_selfattn if self.adopt_selfattn: assert False, ('no longer support') self.init_parameters() def init_parameters(self): for param in self.parameters(): stdv = 1. / math.sqrt(param.size(-1)) param.data.uniform_(-stdv, stdv) def forward(self, vs, ve, ve_dead, skip_connect_ze=None, skip_connect_zs=None): mask = ve_dead Q = torch.matmul(vs, self.AT_W_query) K = torch.matmul(ve, self.AT_W_key) norm_factor = 1 / math.sqrt(Q.shape[-1]) compat = norm_factor * torch.matmul(Q, K.transpose(2, 3)) assert compat.shape[-2] == 1 compat = compat.squeeze(-2) compat[mask.bool()] = -math.inf score = F.softmax(compat, dim=-1) # nodes with no neighbours were softmax into nan, fix them to 0 score = torch.nan_to_num(score, 0) # ----------- motivational brach ------------- Va = torch.matmul(score.unsqueeze(-2), torch.matmul(ve, self.AT_W_val)) v_M = torch.cat((vs, Va), -1).squeeze(-2) v_M_final = self.AT_motivate_mlp(v_M) # ----------- forward branch ------------- score_sort_index = torch.argsort(score, dim=-1, descending=True) score_sort_drop_index = score_sort_index[..., :self.n_focus_on] if self.skip_connect: ve = torch.cat((ve, skip_connect_ze), -1) vs = torch.cat((vs, skip_connect_zs), -1) ve_C = gather_righthand(src=ve, index=score_sort_drop_index, check=False) need_padding = (score_sort_drop_index.shape[-1] != self.n_focus_on) if need_padding: print('the n_focus param is large than input, advise: pad observation instead of pad here') ve_C = pad_at_dim(ve_C, dim=-2, n=self.n_focus_on) v_C_stack = torch.cat((vs, ve_C), dim=-2) if self.adopt_selfattn: v_C_stack = self.AT_Attention(v_C_stack, mask=None) v_C_flat = my_view(v_C_stack, [0, 0, -1]); assert v_C_stack.dim()==4 v_C_final = self.AT_forward_mlp(v_C_flat) return v_C_final, v_M_final ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/dl_pool.py ================================================ """ Author: Fu Qingxu,CASIA Description: deep learning sample manager """ import torch import numpy as np class DeepLearningPool(object): def __init__(self, pool_size, batch_size) -> None: super().__init__() self.x_batch = None self.y_batch = None self.size = pool_size self.batch_size = batch_size def add_and_sample(self, x, y): n_sample = x.shape[0] assert n_sample > 0 if self.x_batch is None: self.x_batch = np.zeros(shape=(self.size, *x.shape[1:]), dtype=x.dtype) self.y_batch = np.zeros(shape=(self.size, *y.shape[1:]), dtype=y.dtype) self.current_idx = 0 self.current_size = 0 idx = self._get_storage_idx(n_sample) self.x_batch[idx] = x self.y_batch[idx] = y return self._sample() def _get_storage_idx(self, inc=None): inc = inc or 1 if self.current_idx + inc <= self.size: idx = np.arange(self.current_idx, self.current_idx + inc) self.current_idx += inc elif self.current_idx < self.size: overflow = inc - (self.size - self.current_idx) idx_a = np.arange(self.current_idx, self.size) idx_b = np.arange(0, overflow) idx = np.concatenate([idx_a, idx_b]) self.current_idx = overflow else: idx = np.arange(0, inc) self.current_idx = inc self.current_size = min(self.size, self.current_size + inc) if inc == 1: idx = idx[0] return idx def _sample(self): idx = np.random.randint(0, self.current_size, self.batch_size) return self.x_batch[idx], self.y_batch[idx] if __name__ == '__main__': dlp = DeepLearningPool(10, 7) res = dlp.add_and_sample(x=np.random.rand(2,2,3),y=np.array([1,2])) print(dlp.y_batch,'res',res[1]) res = dlp.add_and_sample(x=np.random.rand(4,2,3),y=np.array([3,4,5,6])) print(dlp.y_batch,'res',res[1]) res = dlp.add_and_sample(x=np.random.rand(3,2,3),y=np.array([7,8,9])) print(dlp.y_batch,'res',res[1]) res = dlp.add_and_sample(x=np.random.rand(3,2,3),y=np.array([10,11,12])) print(dlp.y_batch,'res',res[1]) res = dlp.add_and_sample(x=np.random.rand(3,2,3),y=np.array([13,14,15])) print(dlp.y_batch,'res',res[1]) res = dlp.add_and_sample(x=np.random.rand(3,2,3),y=np.array([16,17,18])) print(dlp.y_batch,'res',res[1]) print('end of test') ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/his.py ================================================ import matplotlib.pyplot as plt import numpy as np import matplotlib # 设置matplotlib正常显示中文和负号 matplotlib.rcParams['font.sans-serif']=['SimHei'] # 用黑体显示中文 matplotlib.rcParams['axes.unicode_minus']=False # 正常显示负号 # 随机生成(10000,)服从正态分布的数据 data = np.random.randn(10000) """ 绘制直方图 data:必选参数,绘图数据 bins:直方图的长条形数目,可选项,默认为10 normed:是否将得到的直方图向量归一化,可选项,默认为0,代表不归一化,显示频数。normed=1,表示归一化,显示频率。 facecolor:长条形的颜色 edgecolor:长条形边框的颜色 alpha:透明度 """ plt.hist(data, bins=40, facecolor="blue", edgecolor="black", alpha=0.7) # 显示横轴标签 plt.xlabel("区间") # 显示纵轴标签 plt.ylabel("频数/频率") # 显示图标题 plt.title("频数/频率分布直方图") plt.show() ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/hyper_net.py ================================================ from re import X import torch import torch.nn as nn import torch.nn.functional as F from UTIL.tensor_ops import my_view class HyperNet(nn.Module): def __init__(self, **kwargs): super(HyperNet, self).__init__() self.x_input_dim = kwargs['x_input_dim'] self.embed_dim = kwargs['embed_dim'] self.hyper_input_dim = kwargs['hyper_input_dim'] # hyper w1 b1 self.hyper_w1 = nn.Sequential( nn.Linear(self.hyper_input_dim, self.embed_dim), nn.ReLU(inplace=True), nn.Linear(self.embed_dim, self.x_input_dim * self.embed_dim)) self.hyper_b1 = nn.Sequential(nn.Linear(self.hyper_input_dim, self.embed_dim)) # hyper w2 b2 self.hyper_w2 = nn.Sequential( nn.Linear(self.hyper_input_dim, self.embed_dim), nn.ReLU(inplace=True), nn.Linear(self.embed_dim, self.embed_dim * self.embed_dim)) self.hyper_b2 = nn.Sequential(nn.Linear(self.hyper_input_dim, self.embed_dim), nn.ReLU(inplace=True), nn.Linear(self.embed_dim, 1)) def forward(self, x, hyper_x): # x shape (thread/batch, agent, core) # hyper_x shape (thread/batch, core) assert hyper_x.dim() == 3 # reshape w1 into # (..., x_input_dim, embed_dim) w1 = my_view(self.hyper_w1(hyper_x), [0, 0, self.x_input_dim, self.embed_dim]) b1 = self.hyper_b1(hyper_x).unsqueeze(-2) # b1 (thread/batch, core=embed_dim) # Second layer w2 = my_view(self.hyper_w2(hyper_x), [0, 0, self.embed_dim, self.embed_dim]) b2 = self.hyper_b2(hyper_x).unsqueeze(-2) ## x shape = (..., x_input_dim) ## w1 shape = (..., x_input_dim, embed_dim) # x reshape = (..., 1, x_input_dim) x = x.unsqueeze(-2) hidden = F.elu(torch.matmul(x, w1) + b1) # b * t, 1, emb # Forward (batch, 1, 32) * w2(batch, 32, 1) => y(batch, 1) y = torch.matmul(hidden, w2) + b2 # b * t, 1, 1 return y.squeeze(-2) class MyHyperNet(nn.Module): def __init__(self, x_in_dim, hyber_in_dim, layer_out_dims, hyber_hid_dim): super(MyHyperNet, self).__init__() self.x_in_dim = x_in_dim self.layer_out_dims = layer_out_dims self.hyber_in_dim = hyber_in_dim self.hyber_hid_dim = hyber_hid_dim self.n_layer = len(self.layer_out_dims) self.layer_dim_dict = [(x_in_dim, layer_out_dims[0])] + [(d_in, d_out) for d_in, d_out in zip(layer_out_dims[:-1], layer_out_dims[1:])] self.weight_each_layer = nn.ModuleList([ nn.Sequential(nn.Linear(self.hyber_in_dim, self.hyber_hid_dim), nn.ReLU(inplace=True), nn.Linear(self.hyber_hid_dim, d_in * d_out)) for d_in, d_out in self.layer_dim_dict ]) self.bias_each_layer = nn.ModuleList([ nn.Sequential(nn.Linear(self.hyber_in_dim, self.hyber_hid_dim), nn.ReLU(inplace=True), nn.Linear(self.hyber_hid_dim, d_out)) for d_in, d_out in self.layer_dim_dict ]) def forward(self, x, hyper_x): # x shape (thread/batch, agent, core) # hyper_x shape (thread/batch, core) assert hyper_x.dim() == 3 x = x.unsqueeze(-2) for i in range(self.n_layer): d_in, d_out = self.layer_dim_dict[i] w = my_view(self.weight_each_layer[i](hyper_x), [0, 0, d_in, d_out]) b = self.bias_each_layer[i](hyper_x).unsqueeze(-2) x = torch.matmul(x, w) + b is_last_layer = (i==(self.n_layer-1)) if is_last_layer: # do NOT use relu at last layer pass else: x = F.relu(x, inplace=True) return x.squeeze(-2) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/logit2act.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F from torch.distributions.categorical import Categorical from UTIL.tensor_ops import my_view, Args2tensor_Return2numpy, Args2tensor from UTIL.tensor_ops import pt_inf """ network initialize """ class Logit2Act(nn.Module): def __init__(self, *args, **kwargs): super().__init__() def _logit2act_rsn(self, logits_agent_cluster, eval_mode, greedy, eval_actions=None, avail_act=None, eprsn=None): if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf()) act_dist = self.ccategorical.feed_logits(logits_agent_cluster) if not greedy: act = self.ccategorical.sample(act_dist, eprsn) if not eval_mode else eval_actions else: act = torch.argmax(act_dist.probs, axis=2) # the policy gradient loss will feedback from here actLogProbs = self._get_act_log_probs(act_dist, act) # sum up the log prob of all agents distEntropy = act_dist.entropy().mean(-1) if eval_mode else None return act, actLogProbs, distEntropy, act_dist.probs def _logit2act(self, logits_agent_cluster, eval_mode, greedy, eval_actions=None, avail_act=None, **kwargs): if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf()) act_dist = Categorical(logits = logits_agent_cluster) if not greedy: act = act_dist.sample() if not eval_mode else eval_actions else: act = torch.argmax(act_dist.probs, axis=2) actLogProbs = self._get_act_log_probs(act_dist, act) # the policy gradient loss will feedback from here # sum up the log prob of all agents distEntropy = act_dist.entropy().mean(-1) if eval_mode else None return act, actLogProbs, distEntropy, act_dist.probs @staticmethod def _get_act_log_probs(distribution, action): return distribution.log_prob(action.squeeze(-1)).unsqueeze(-1) @Args2tensor_Return2numpy def act(self, *args, **kargs): return self._act(*args, **kargs) @Args2tensor def evaluate_actions(self, *args, **kargs): return self._act(*args, **kargs, eval_mode=True) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/mlp.py ================================================ import torch import torch.nn as nn import torch.nn.functional as F from .norm import DynamicNorm class SimpleMLP(nn.Module): def __init__(self, in_dim, out_dim, hidden_dim=128, use_normalization=False): super().__init__() activation_func = nn.ReLU h_dim = hidden_dim if use_normalization: print('test DynamicNorm') self.mlp = nn.Sequential( DynamicNorm(in_dim, only_for_last_dim=True, exclude_one_hot=True), nn.Linear(in_dim, h_dim), activation_func(inplace=True), nn.Linear(h_dim, out_dim) ) else: self.mlp = nn.Sequential( nn.Linear(in_dim, h_dim), activation_func(inplace=True), nn.Linear(h_dim, out_dim) ) def forward(self,x): return self.mlp(x) class ResLinear(nn.Module): def __init__(self, io_dim, h_dim, need_input_tf=False, input_tf_dim=None, inplace_relu=True) -> None: super(ResLinear, self).__init__() self.need_input_tf = need_input_tf if need_input_tf: self.f0 = nn.Linear(input_tf_dim, io_dim) self.f1 = nn.Linear(io_dim, h_dim) self.lkrelu = nn.ReLU(inplace=True) if inplace_relu else nn.ReLU(inplace=False) self.f2 = nn.Linear(h_dim, io_dim) def forward(self, xo): if self.need_input_tf: xo = self.f0(xo) x = self.lkrelu(self.f1(xo)) x = self.f2(x) + xo x = self.lkrelu(x) return x class LinearFinal(nn.Module): __constants__ = ['in_features', 'out_features'] in_features: int out_features: int weight: torch.Tensor def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: super(LinearFinal, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) def forward(self, input: torch.Tensor) -> torch.Tensor: return F.linear(input, self.weight, self.bias) def extra_repr(self) -> str: return 'in_features={}, out_features={}, bias={}'.format( self.in_features, self.out_features, self.bias is not None ) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/net_manifest.py ================================================ import torch.nn as nn def weights_init(m): def init_Linear(m, final_layer=False): nn.init.orthogonal_(m.weight.data) if final_layer:nn.init.orthogonal_(m.weight.data, gain=0.01) if m.bias is not None: nn.init.uniform_(m.bias.data, a=-0.02, b=0.02) initial_fn_dict = { 'Net': None, 'NetCentralCritic': None, 'DataParallel':None, 'BatchNorm1d':None, 'Concentration':None, 'ConcentrationHete':None, 'Pnet':None, 'Sequential':None, 'DataParallel':None, 'Tanh':None, 'ModuleList':None, 'ModuleDict':None, 'MultiHeadAttention':None, 'SimpleMLP':None, 'SimpleAttention':None, 'SelfAttention_Module':None, 'ReLU':None, 'Softmax':None, 'DynamicNorm':None, 'DynamicNormFix':None, 'EXTRACT':None, 'LinearFinal':lambda m:init_Linear(m, final_layer=True), 'Linear':init_Linear, 'ResLinear':None, 'LeakyReLU':None, 'HyperNet':None, 'MyHyperNet':None, 'DivTree':None, } classname = m.__class__.__name__ assert classname in initial_fn_dict.keys(), ('how to handle the initialization of this class? ', classname) init_fn = initial_fn_dict[classname] if init_fn is None: return init_fn(m) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/norm.py ================================================ """ CASIA, fuqingxu live vector normalization using pytorch, therefore the parameter of normalization (mean and var) can be save together with network parameters light up exclude_one_hot=True to prevent onehot component being normalized """ import torch import torch.nn as nn import torch.nn.functional as F from torch.distributions.categorical import Categorical from torch.distributions.multivariate_normal import MultivariateNormal from UTIL.tensor_ops import my_view from UTIL.tensor_ops import Args2tensor_Return2numpy class DynamicNorm(nn.Module): # ! warning! this module will mess with multi-gpu setting!! def __init__(self, input_size, only_for_last_dim, exclude_one_hot=True, exclude_nan=False): super().__init__() assert only_for_last_dim self.exclude_one_hot = exclude_one_hot self.mean = nn.Parameter(torch.zeros(input_size, requires_grad=False), requires_grad=False) self.var = nn.Parameter(torch.ones(input_size, requires_grad=False), requires_grad=False) self.n_sample = nn.Parameter(torch.zeros(1, requires_grad=False, dtype=torch.long), requires_grad=False) if self.exclude_one_hot: self.one_hot_filter = nn.Parameter(torch.ones(input_size, requires_grad=False, dtype=torch.bool), requires_grad=False) self.input_size = input_size self.exclude_nan = exclude_nan self.patience = 1000 def forward(self, x, get_mu_var=False): assert self.input_size == x.shape[-1], ('self.input_size',self.input_size,'x.shape[-1]',x.shape[-1]) _2dx = x.detach().reshape(-1, self.input_size) if self.exclude_nan: _2dx = _2dx[~torch.isnan(_2dx).any(axis=-1)] this_batch_size = _2dx.shape[0] # assert this_batch_size>=1 if this_batch_size<=0: print('Warning! An empty batch just being normalized') x = torch.clip_((x - self.mean) / torch.sqrt_(self.var + 1e-8), -10, 10) return x if self.training: with torch.no_grad(): this_batch_mean = torch.mean(_2dx, dim=0) this_batch_var = torch.var(_2dx, dim=0, unbiased=False) if torch.isnan(this_batch_var).any(): assert False, ('nan value detected in normalization! but you can turn on exclude_nan') assert _2dx.dim() == 2 delta = this_batch_mean - self.mean tot_count = self.n_sample + this_batch_size new_mean = self.mean + delta * this_batch_size / tot_count m_a = self.var * (self.n_sample) m_b = this_batch_var * (this_batch_size) M2 = m_a + m_b + torch.square_(delta) * self.n_sample * this_batch_size / (self.n_sample + this_batch_size) new_var = M2 / (self.n_sample + this_batch_size) if self.exclude_one_hot: # 滤除-1,0和1的点位 self.one_hot_filter.data &= ~(((_2dx != 0) & (_2dx != 1) & (_2dx != -1)).any(dim=0)) self.mean.data = torch.where(self.one_hot_filter, self.mean, new_mean) if self.exclude_one_hot else new_mean # new_mean new_var_clip = torch.clamp(new_var, min=0.01, max=1000) self.var.data = torch.where(self.one_hot_filter, self.var, new_var_clip) if self.exclude_one_hot else new_var_clip self.n_sample.data = tot_count if get_mu_var: return self.mean, self.var x = torch.clip_((x - self.mean) / torch.sqrt_(self.var + 1e-8), -10, 10) return x # @Args2tensor_Return2numpy # def get_mean_var(self, x): # return self.forward(x, get_mu_var=True) class DynamicNormFix(nn.Module): # ! warning! this module will mess with multi-gpu setting!! def __init__(self, input_size, only_for_last_dim, exclude_one_hot=True, exclude_nan=False): super().__init__() assert only_for_last_dim self.exclude_one_hot = exclude_one_hot self.mean = nn.Parameter(torch.zeros(input_size, requires_grad=False), requires_grad=False) self.var = nn.Parameter(torch.ones(input_size, requires_grad=False), requires_grad=False) self.var_fix = nn.Parameter(torch.ones(input_size, requires_grad=False), requires_grad=False) self.min = nn.Parameter(torch.ones(input_size, requires_grad=False)+float('inf'), requires_grad=False) self.max = nn.Parameter(torch.ones(input_size, requires_grad=False)-float('inf'), requires_grad=False) self.n_sample = nn.Parameter(torch.zeros(1, requires_grad=False, dtype=torch.long), requires_grad=False) if self.exclude_one_hot: self.one_hot_filter = nn.Parameter(torch.ones(input_size, requires_grad=False, dtype=torch.bool), requires_grad=False) self.input_size = input_size self.exclude_nan = exclude_nan self.patience = 1000 self.var_fix_wait = 1000 # var fixing, T2 is maximum x abs value after normalization self.T1 = 5 self.T2 = 10 self.TD = (self.T2**2 - self.T1**2)/self.T2**2 self.first_run = True self.debug = True # 兼容np @Args2tensor_Return2numpy def np_forward(self, x, freeze=False, get_mu_var=False): return self.forward(x, freeze, get_mu_var) def forward(self, x, freeze=False, get_mu_var=False): assert self.input_size == x.shape[-1], ('self.input_size',self.input_size,'x.shape[-1]',x.shape[-1]) _2dx = x.detach().reshape(-1, self.input_size) if self.exclude_nan: _2dx = _2dx[~torch.isnan(_2dx).any(axis=-1)] _2dx_view = my_view(_2dx, [-1, 0]) this_batch_size = _2dx.shape[0] # assert this_batch_size>=1 if this_batch_size<=0: print('Warning! An empty batch just being normalized') x = torch.clip_((x - self.mean) / torch.sqrt_(self.var_fix + 1e-8), -10, 10) return x if self.training and (not freeze): with torch.no_grad(): this_batch_mean = torch.mean(_2dx, dim=0) this_batch_var = torch.var(_2dx, dim=0, unbiased=False) if torch.isnan(this_batch_var).any(): assert False, ('nan value detected in normalization! but you can turn on exclude_nan') assert _2dx.dim() == 2 delta = this_batch_mean - self.mean tot_count = self.n_sample + this_batch_size new_mean = self.mean + delta * this_batch_size / tot_count m_a = self.var * (self.n_sample) m_b = this_batch_var * (this_batch_size) M2 = m_a + m_b + torch.square_(delta) * self.n_sample * this_batch_size / (self.n_sample + this_batch_size) new_var = M2 / (self.n_sample + this_batch_size) if self.exclude_one_hot: # 滤除-1,0和1的点位 self.one_hot_filter.data &= ~(((_2dx != 0) & (_2dx != 1) & (_2dx != -1)).any(dim=0)) self.mean.data = torch.where(self.one_hot_filter, self.mean, new_mean) if self.exclude_one_hot else new_mean # new_mean # if self.patience > 0: self.check_errors(_2dx, new_var) self.var.data = torch.where(self.one_hot_filter, self.var, new_var) if self.exclude_one_hot else new_var # begin fix variance max_tmp, _ = _2dx_view.max(0) min_tmp, _ = _2dx_view.min(0) # if self.first_run: if self.patience > 0: self.patience -= 1 self.first_run = False self.max.data = torch.maximum(max_tmp, self.max) self.min.data = torch.minimum(min_tmp, self.min) else: # self.max.data = torch.maximum(max_tmp, self.max) # self.min.data = torch.minimum(min_tmp, self.min) self.max.data = self.max + (torch.maximum(max_tmp, self.max)-self.max) * this_batch_size / tot_count self.min.data = self.min + (torch.minimum(min_tmp, self.min)-self.min) * this_batch_size / tot_count # # if self.debug: self.mcv.rec(max_tmp.squeeze().item(), 'batch max') # # if self.debug: self.mcv.rec(min_tmp.squeeze().item(), 'batch min') # # if self.debug: self.mcv.rec(torch.maximum(max_tmp, self.max).squeeze().item(), 'hist max') # # if self.debug: self.mcv.rec(torch.minimum(min_tmp, self.min).squeeze().item(), 'hist min') # if self.debug: self.mcv.rec(self.max.data, 'fixed max') # if self.debug: self.mcv.rec(self.min.data, 'fixed min') # if self.debug: self.mcv.rec_show() dm = torch.maximum((self.max - self.mean), (self.mean - self.min)) # std_th_1 = dm / self.T1 std_threshold_2 = dm / self.T2 # var1 = std_th_1**2 var2 = std_threshold_2**2 leak = self.TD * self.var + var2 # leak = (var1 - var2)/(var1) *self.var + var2 new_var_fix = torch.maximum(self.var, leak) self.var_fix.data = torch.where(self.one_hot_filter, self.var_fix, new_var_fix) if self.exclude_one_hot else new_var_fix # if self.debug: self.mcv.rec(self.var.data, 'var') # if self.debug: self.mcv.rec(self.var_fix.data, 'var fix') # if self.debug: self.mcv.rec(self.var_fix.data-self.var.data, 'delta var') # if self.debug: self.mcv.rec((1 - self.mean) / torch.sqrt_(self.var_fix + 1e-8), 'base line +1') # if self.debug: self.mcv.rec((-1 - self.mean) / torch.sqrt_(self.var_fix + 1e-8), 'base line -1') # if self.debug: self.mcv.rec((10 - self.mean) / torch.sqrt_(self.var_fix + 1e-8), 'base line +10') # if self.debug: self.mcv.rec((-10 - self.mean) / torch.sqrt_(self.var_fix + 1e-8), 'base line -10') # !!! qq = self.var_fix.data-self.var.data # !!! if self.patience > 0 and self.patience < 800 and (not (qq==0).all()): # !!! print('[norm.py] Input issue: cannot be well expressed by normal distribution', torch.where(qq!=0)) self.n_sample.data = tot_count # t = (_2dx_view - self.mean) / torch.sqrt_(self.var_fix + 1e-8) if get_mu_var: return self.mean, self.var_fix return (x - self.mean) / torch.sqrt_(self.var_fix + 1e-8) # def check_errors(self, _2dx, new_var): # self.patience -= 1 ''' test script import torch, time from ALGORITHM.common.norm import DynamicNormFix input_size = 1 only_for_last_dim = True dynamic_norm = DynamicNormFix(input_size, only_for_last_dim, exclude_one_hot=True, exclude_nan=False) for _ in range(101100): # mask = (torch.randn(60, 1, out=None) > 0) # x = torch.where(mask, # torch.randn(60, 1, out=None)*10, # torch.randn(60, 1, out=None)*5, # ) # 左边 std = 0.01; offset = -0.01; num = 5 x3 = torch.randn(num, 1, out=None) * std + offset # 中间 std = 0.01; offset = 0; num = 500 x2 = torch.randn(num, 1, out=None) * std + offset # 右边 std = 0.01; offset = 1; num = 5 x1 = torch.randn(num, 1, out=None) * std + offset # # 左边 # std = 1; offset = -10; num = 5 # x3 = torch.randn(num, 1, out=None) * std + offset # # 中间 # std = 1; offset = 5; num = 500 # x2 = torch.randn(num, 1, out=None) * std + offset # # 右边 # std = 1; offset = 5; num = 5 # x1 = torch.randn(num, 1, out=None) * std + offset x = torch.cat((x1,x2,x3), 0) y = dynamic_norm(x) print(y) time.sleep(60) ''' ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/pca.py ================================================ import numpy as np def pca(samples, target_dim): assert len(samples.shape) == 2 data = samples - np.mean(samples,axis=0) # mean at batch dim covMat = np.cov(data,rowvar=0) fValue,fVector = np.linalg.eig(covMat) fValueSort = np.argsort(-fValue) fValueTopN = fValueSort[:target_dim] fvectormat = fVector[:,fValueTopN] down_dim_data = np.dot(data, fvectormat) return down_dim_data ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/ppo_sampler.py ================================================ import torch, math import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np from random import randint, sample from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler from UTIL.colorful import * from UTIL.tensor_ops import _2tensor, __hash__, repeat_at, _2cpu2numpy from UTIL.tensor_ops import my_view, scatter_with_nan, sample_balance from config import GlobalConfig as cfg from UTIL.gpu_share import GpuShareUnit class TrajPoolSampler(): def __init__(self, n_div, traj_pool, flag, req_dict, req_dict_rename, prevent_batchsize_oom=False, mcv=None): self.n_pieces_batch_division = n_div self.prevent_batchsize_oom = prevent_batchsize_oom self.mcv = mcv if self.prevent_batchsize_oom: assert self.n_pieces_batch_division==1, 'self.n_pieces_batch_division should be 1' self.num_batch = None self.container = {} self.warned = False assert flag=='train' # req_dict = ['obs', 'state', 'action', 'actionLogProb', 'return', 'reward', 'threat', 'value'] # req_dict_rename = ['obs', 'state', 'action', 'actionLogProb', 'return', 'reward', 'threat', 'state_value'] if cfg.ScenarioConfig.AvailActProvided: req_dict.append('avail_act') req_dict_rename.append('avail_act') return_rename = "return" value_rename = "state_value" advantage_rename = "advantage" # replace 'obs' to 'obs > xxxx' for key_index, key in enumerate(req_dict): key_name = req_dict[key_index] key_rename = req_dict_rename[key_index] if not hasattr(traj_pool[0], key_name): real_key_list = [real_key for real_key in traj_pool[0].__dict__ if (key_name+'>' in real_key)] assert len(real_key_list) > 0, ('check variable provided!', key,key_index) for real_key in real_key_list: mainkey, subkey = real_key.split('>') req_dict.append(real_key) req_dict_rename.append(key_rename+'>'+subkey) self.big_batch_size = -1 # vector should have same length, check it! # load traj into a 'container' for key_index, key in enumerate(req_dict): key_name = req_dict[key_index] key_rename = req_dict_rename[key_index] if not hasattr(traj_pool[0], key_name): continue set_item = np.concatenate([getattr(traj, key_name) for traj in traj_pool], axis=0) if not (self.big_batch_size==set_item.shape[0] or (self.big_batch_size<0)): print('error') assert self.big_batch_size==set_item.shape[0] or (self.big_batch_size<0), (key,key_index) self.big_batch_size = set_item.shape[0] self.container[key_rename] = set_item # assign value to key_rename # normalize advantage inside the batch self.container[advantage_rename] = self.container[return_rename] - self.container[value_rename] self.container[advantage_rename] = ( self.container[advantage_rename] - self.container[advantage_rename].mean() ) / (self.container[advantage_rename].std() + 1e-5) # size of minibatch for each agent self.mini_batch_size = math.ceil(self.big_batch_size / self.n_pieces_batch_division) # do once self.do_once_fin = False def __len__(self): return self.n_pieces_batch_division def reminder(self, n_sample): if not self.do_once_fin: self.do_once_fin = True drop_percent = (self.big_batch_size-n_sample) / self.big_batch_size*100 if self.mcv is not None: self.mcv.rec(drop_percent, 'drop percent') if drop_percent > 20: print_ = print亮红 print_('droping %.1f percent samples..'%(drop_percent)) assert False, "GPU OOM!" else: print_ = print print_('droping %.1f percent samples..'%(drop_percent)) def get_sampler(self): if not self.prevent_batchsize_oom: # sampler = BatchSampler(SubsetRandomSampler(range(self.big_batch_size)), self.mini_batch_size, drop_last=False) else: max_n_sample = self.determine_max_n_sample() n_sample = min(self.big_batch_size, max_n_sample) self.reminder(n_sample) sampler = BatchSampler(SubsetRandomSampler(range(n_sample)), n_sample, drop_last=False) return sampler def reset_and_get_iter(self): self.sampler = self.get_sampler() for indices in self.sampler: selected = {} for key in self.container: selected[key] = self.container[key][indices] for key in [key for key in selected if '>' in key]: # re-combine child key with its parent mainkey, subkey = key.split('>') if not mainkey in selected: selected[mainkey] = {} selected[mainkey][subkey] = selected[key] del selected[key] yield selected def determine_max_n_sample(self): assert self.prevent_batchsize_oom if not hasattr(TrajPoolSampler,'MaxSampleNum'): # initialization TrajPoolSampler.MaxSampleNum = [int(self.big_batch_size*(i+1)/50) for i in range(50)] max_n_sample = self.big_batch_size elif TrajPoolSampler.MaxSampleNum[-1] > 0: # meaning that oom never happen, at least not yet # only update when the batch size increases if self.big_batch_size > TrajPoolSampler.MaxSampleNum[-1]: TrajPoolSampler.MaxSampleNum.append(self.big_batch_size) max_n_sample = self.big_batch_size else: # meaning that oom already happened, choose TrajPoolSampler.MaxSampleNum[-2] to be the limit assert TrajPoolSampler.MaxSampleNum[-2] > 0 max_n_sample = TrajPoolSampler.MaxSampleNum[-2] return max_n_sample ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/rl_alg_base.py ================================================ import time from UTIL.tensor_ops import __hash__, repeat_at from UTIL.colorful import * from .alg_base import AlgorithmBase # model IO class RLAlgorithmBase(AlgorithmBase): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): super().__init__(n_agent, n_thread, space, mcv, team) # data integraty check self._unfi_frag_ = None # Skip currupt data integraty check after this patience is exhausted self.patience = 1000 def interact_with_env(self, team_intel): raise NotImplementedError def save_model(self, update_cnt, info=None): raise NotImplementedError def process_framedata(self, traj_framedata): raise NotImplementedError # Rollout Processor 准备提交Rollout,以下划线开头和结尾的键值需要对齐(self.n_thread, ...) # note that keys starting with _ must have shape (self.n_thread, ...), details see fn:mask_paused_env() def process_framedata(self, traj_framedata): ''' hook is called when reward and next moment observation is ready, now feed them into trajectory manager. Rollout Processor | 准备提交Rollout, 以下划线开头和结尾的键值需要对齐(self.n_thread, ...) note that keys starting with _ must have shape (self.n_thread, ...), details see fn:mask_paused_env() ''' # strip info, since it is not array items_to_pop = ['info', 'Latest-Obs'] for k in items_to_pop: if k in traj_framedata: traj_framedata.pop(k) # the agent-wise reward is supposed to be the same, so averge them if self.ScenarioConfig.RewardAsUnity: traj_framedata['reward'] = repeat_at(traj_framedata['reward'], insert_dim=-1, n_times=self.n_agent) # change the name of done to be recognised (by trajectory manager) traj_framedata['_DONE_'] = traj_framedata.pop('done') traj_framedata['_TOBS_'] = traj_framedata.pop( 'Terminal-Obs-Echo') if 'Terminal-Obs-Echo' in traj_framedata else None # mask out pause thread traj_framedata = self.mask_paused_env(traj_framedata) # put the frag into memory self.batch_traj_manager.feed_traj(traj_framedata) def check_reward_type(self, AlgorithmConfig): if self.ScenarioConfig.RewardAsUnity != AlgorithmConfig.TakeRewardAsUnity: assert self.ScenarioConfig.RewardAsUnity assert not AlgorithmConfig.TakeRewardAsUnity print亮紫( 'Warning, the scenario (MISSION) provide `RewardAsUnity`, but AlgorithmConfig does not `TakeRewardAsUnity` !') print亮紫( 'If you continue, team reward will be duplicated to serve as individual rewards, wait 3s to proceed...') time.sleep(3) def mask_paused_env(self, frag): running = ~frag['_SKIP_'] if running.all(): return frag for key in frag: if not key.startswith('_') and hasattr(frag[key], '__len__') and len(frag[key]) == self.n_thread: frag[key] = frag[key][running] return frag ''' Get event from hmp task runner, called when each test rotinue is complete. ''' def on_notify(self, message, **kargs): self.save_model( update_cnt=self.traj_manager.update_cnt, info=str(kargs) ) ''' function to be called when reward is received ''' def commit_traj_frag(self, unfi_frag, req_hook=True): assert self._unfi_frag_ is None self._unfi_frag_ = unfi_frag self._check_data_hash() # check data integraty if req_hook: # leave a hook return self.traj_waiting_hook else: return None def traj_waiting_hook(self, new_frag): ''' This function will be called from hook is called when reward and next moment observation is ready ''' # do data curruption check at beginning, this is important! self._check_data_curruption() # finish the frame data with new data feedin fi_frag = self._unfi_frag_ fi_frag.update(new_frag) # call upper level function to deal with frame data self.process_framedata(traj_framedata=fi_frag) # delete data reference self._unfi_frag_ = None def _no_hook(self, new_frag): return # protect data from overwriting def _check_data_hash(self): if self.patience > 0: self.patience -= 1 self.hash_db = {} # for debugging, to detect write protection error for key in self._unfi_frag_: item = self._unfi_frag_[key] if isinstance(item, dict): self.hash_db[key] = {} for subkey in item: subitem = item[subkey] self.hash_db[key][subkey] = __hash__(subitem) else: self.hash_db[key] = __hash__(item) # protect data from overwriting def _check_data_curruption(self): if self.patience > 0: self.patience -= 1 assert self._unfi_frag_ is not None assert self.hash_db is not None for key in self._unfi_frag_: item = self._unfi_frag_[key] if isinstance(item, dict): for subkey in item: subitem = item[subkey] assert self.hash_db[key][subkey] == __hash__(subitem), ('Currupted data!') else: assert self.hash_db[key] == __hash__(item), ('Currupted data!') ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/traj.py ================================================ # cython: language_level=3 import numpy as np from UTIL.colorful import * from UTIL.tensor_ops import __hash__ class TRAJ_BASE(): key_data_type = {} key_data_shape = {} max_mem_length = -1 def __init__(self, traj_limit, env_id): self.traj_limit = traj_limit self.env_id = env_id self.readonly_lock = False self.key_dict = [] self.time_pointer = 0 self.need_reward_bootstrap = False self.deprecated_flag = False # remember something in a time step, add it to trajectory def remember(self, key, content): assert not self.readonly_lock if not (key in self.key_dict) and (content is not None): self.init_track(key=key, first_content=content) getattr(self, key)[self.time_pointer] = content elif not (key in self.key_dict) and (content is None): self.init_track_none(key=key) elif (key in self.key_dict) and (content is not None): getattr(self, key)[self.time_pointer] = content else: pass # duplicate/rename a trajectory def copy_track(self, origin_key, new_key): if hasattr(self, origin_key): origin_handle = getattr(self, origin_key) setattr(self, new_key, origin_handle.copy()) new_handle = getattr(self, new_key) self.key_dict.append(new_key) #return origin_handle, new_handle else: real_key_list = [real_key for real_key in self.__dict__ if (origin_key+'>' in real_key)] assert len(real_key_list)>0, ('this key does not exist (yet), check:', origin_key) for real_key in real_key_list: mainkey, subkey = real_key.split('>') self.copy_track(real_key, (new_key+'>'+subkey)) #return # make sure dtype is ok def check_type_shape(self, key, first_content=None): if first_content is not None: content_type = first_content.dtype content_shape = first_content.shape if key in TRAJ_BASE.key_data_type: assert TRAJ_BASE.key_data_type[key] == content_type else: TRAJ_BASE.key_data_type[key] = content_type TRAJ_BASE.key_data_shape[key] = content_shape return content_type, content_shape assert key in TRAJ_BASE.key_data_type return TRAJ_BASE.key_data_type[key], TRAJ_BASE.key_data_shape[key] # create track, executed used when a key showing up for the first time in 'self.remember' def init_track(self, key, first_content): content = first_content self.check_type_shape(key, first_content) assert isinstance(content, np.ndarray) or isinstance(content, float), (key, content.__class__) tensor_size = ((self.traj_limit,) + tuple(content.shape)) set_item = np.zeros(shape=tensor_size, dtype=content.dtype) set_item[:] = np.nan if np.issubdtype(content.dtype, np.floating) else 0 setattr(self, key, set_item) self.key_dict.append(key) # key pop up yet content is None, # read dtype from history dtype dictionary to fill the hole def init_track_none(self, key): content_dtype, content_shape = self.check_type_shape(key) tensor_size = ((self.traj_limit,) + tuple(content_shape)) set_item = np.zeros(shape=tensor_size, dtype=content_dtype) set_item[:] = np.nan if np.issubdtype(content_dtype, np.floating) else 0 setattr(self, key, set_item) self.key_dict.append(key) # push the time pointer forward, before you call 'self.remember' again to fill t+1 data def time_shift(self): assert self.time_pointer < self.traj_limit self.time_pointer += 1 # cut trajectory tail, when the number of episode time step < traj_limit def cut_tail(self): TJ = lambda key: getattr(self, key) self.readonly_lock = True n_frame = self.time_pointer # check is buffer size too big if n_frame > TRAJ_BASE.max_mem_length: TRAJ_BASE.max_mem_length = n_frame print('max_mem_length:%d, traj_limit:%d'%(TRAJ_BASE.max_mem_length, self.traj_limit)) # clip tail for key in self.key_dict: setattr(self, key, TJ(key)[:n_frame]) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/traj_gae.py ================================================ # cython: language_level=3 import numpy as np from ALGORITHM.common.traj import TRAJ_BASE import copy from UTIL.colorful import * from UTIL.tensor_ops import my_view, repeat_at, gather_righthand class trajectory(TRAJ_BASE): dead_mask_check = True # confirm mask ok def __init__(self, traj_limit, env_id, alg_cfg): super().__init__(traj_limit, env_id) self.agent_alive_reference = 'alive' self.alg_cfg = alg_cfg def early_finalize(self): assert not self.readonly_lock # unfinished traj self.need_reward_bootstrap = True def set_terminal_obs(self, tobs): self.tobs = copy.deepcopy(tobs) def cut_tail(self): # 删去多余的预留空间 super().cut_tail() TJ = lambda key: getattr(self, key) # 进一步地, 根据这个轨迹上的NaN,删除所有无效时间点 agent_alive = getattr(self, self.agent_alive_reference) assert len(agent_alive.shape) == 2, "shoud be 2D (time, agent)/dead_or_alive" if self.need_reward_bootstrap: assert False, ('it should not go here if everything goes as expected') # deprecated if nothing in it p_valid = agent_alive.any(axis=-1) p_invalid = ~p_valid is_fully_valid_traj = (p_valid[-1] == True) # assert p_valid[-1] == True, 如果有三只队伍,很有可能出现一只队伍全体阵亡,但游戏仍未结束的情况 if p_invalid.all(): #invalid traj self.deprecated_flag = True return if not is_fully_valid_traj: # adjust reward position if not fully valid reward = TJ('reward') for i in reversed(range(self.time_pointer)): if p_invalid[i] and i != 0: # invalid, push reward forward reward[i-1] += reward[i]; reward[i] = np.nan setattr(self, 'reward', reward) # clip NaN for key in self.key_dict: setattr(self, key, TJ(key)[p_valid]) if not is_fully_valid_traj: # reset time pointer self.time_pointer = p_valid.sum() # all done return def reward_push_forward(self, dead_mask): # self.new_reward = self.reward.copy() if self.alg_cfg.gamma_in_reward_forwarding: gamma = self.alg_cfg.gamma_in_reward_forwarding_value for i in reversed(range(self.time_pointer)): if i==0: continue self.reward[i-1] += np.where(dead_mask[i], self.reward[i]*gamma, 0) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 self.reward[i] = np.where(dead_mask[i], 0, self.reward[i]) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 else: for i in reversed(range(self.time_pointer)): if i==0: continue self.reward[i-1] += np.where(dead_mask[i], self.reward[i], 0) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 self.reward[i] = np.where(dead_mask[i], 0, self.reward[i]) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 return # new finalize def finalize(self): self.readonly_lock = True assert not self.deprecated_flag TJ = lambda key: getattr(self, key) assert not np.isnan(TJ('reward')).any() # deadmask agent_alive = getattr(self, self.agent_alive_reference) dead_mask = ~agent_alive if trajectory.dead_mask_check: trajectory.dead_mask_check = False if not dead_mask.any(): assert False, "Are you sure agents cannot die? If so, delete this check." self.reward_push_forward(dead_mask) # push terminal reward forward 38 42 54 threat = np.zeros(shape=dead_mask.shape) - 1 assert dead_mask.shape[0] == self.time_pointer for i in reversed(range(self.time_pointer)): # threat[:(i+1)] 不包含threat[(i+1)] if i+1 < self.time_pointer: threat[:(i+1)] += (~(dead_mask[i+1]&dead_mask[i])).astype(np.int) elif i+1 == self.time_pointer: threat[:] += (~dead_mask[i]).astype(np.int) SAFE_LIMIT = 8 threat = np.clip(threat, -1, SAFE_LIMIT) setattr(self, 'threat', np.expand_dims(threat, -1)) # ! Use GAE to calculate return if self.alg_cfg.use_policy_resonance: self.gae_finalize_return_pr(reward_key='reward', value_key='BAL_value_all_level', new_return_name='BAL_return_all_level') else: self.gae_finalize_return(reward_key='reward', value_key='value', new_return_name='return') return def gae_finalize_return(self, reward_key, value_key, new_return_name): # ------- gae parameters ------- gamma = self.alg_cfg.gamma tau = self.alg_cfg.tau # ------- -------------- ------- rewards = getattr(self, reward_key) value = getattr(self, value_key) # ------- -------------- ------- length = rewards.shape[0] assert rewards.shape[0]==value.shape[0] # if dimension not aligned if rewards.ndim == value.ndim-1: rewards = np.expand_dims(rewards, -1) # initalize two more tracks setattr(self, new_return_name, np.zeros_like(value)) self.key_dict.append(new_return_name) # ------- -------------- ------- returns = getattr(self, new_return_name) boot_strap = 0 if not self.need_reward_bootstrap else self.boot_strap_value['bootstrap_'+value_key] for step in reversed(range(length)): if step==(length-1): # 最后一帧 value_preds_delta = rewards[step] + gamma * boot_strap - value[step] gae = value_preds_delta else: value_preds_delta = rewards[step] + gamma * value[step + 1] - value[step] gae = value_preds_delta + gamma * tau * gae returns[step] = gae + value[step] def gae_finalize_return_pr(self, reward_key, value_key, new_return_name): # ------- gae parameters ------- gamma = self.alg_cfg.gamma tau = self.alg_cfg.tau # ------- -------------- ------- BAL_value_all_level = copy.deepcopy(getattr(self, value_key)) # reshape to (batch, agent*distribution_precision, 1) value = my_view(BAL_value_all_level, [0, -1, 1]) # ------- ------- reshape reward ------- ------- rewards_cp = copy.deepcopy(getattr(self, reward_key)) # if dimension not aligned if rewards_cp.ndim == value.ndim-1: rewards_cp = np.expand_dims(rewards_cp, -1) assert rewards_cp.shape[-1] == 1 n_agent = rewards_cp.shape[-2] assert BAL_value_all_level.shape[-2] == n_agent assert BAL_value_all_level.shape[-1] == self.alg_cfg.distribution_precision rewards_cp = repeat_at(rewards_cp.squeeze(-1), -1, self.alg_cfg.distribution_precision) rewards_cp = my_view(rewards_cp, [0, -1, 1]) # ------- -------------- ------- length = rewards_cp.shape[0] assert rewards_cp.shape[0]==value.shape[0] # ------- -------------- ------- returns = np.zeros_like(value) boot_strap = 0 if not self.need_reward_bootstrap else self.boot_strap_value['bootstrap_'+value_key] for step in reversed(range(length)): if step==(length-1): # 最后一帧 value_preds_delta = rewards_cp[step] + gamma * boot_strap - value[step] gae = value_preds_delta else: value_preds_delta = rewards_cp[step] + gamma * value[step + 1] - value[step] gae = value_preds_delta + gamma * tau * gae returns[step] = gae + value[step] # ------- -------------- ------- returns = my_view(returns, [0, n_agent, self.alg_cfg.distribution_precision]) # BAL_return_all_level setattr(self, new_return_name, returns) self.key_dict.append(new_return_name) def select_value_level(BAL_all_level, randl): n_agent = BAL_all_level.shape[1] tmp_index = np.expand_dims(repeat_at(randl, -1, n_agent), -1) return gather_righthand(src=BAL_all_level, index=tmp_index, check=False) self.value_selected = select_value_level(BAL_all_level=self.BAL_value_all_level, randl=self.randl) self.return_selected = select_value_level(BAL_all_level=self.BAL_return_all_level, randl=self.randl) ''' 轨迹池管理 ''' class TrajManagerBase(object): def __init__(self, n_env, traj_limit, alg_cfg): self.alg_cfg = alg_cfg self.n_env = n_env self.traj_limit = traj_limit self.update_cnt = 0 self.traj_pool = [] self.registered_keys = [] self.live_trajs = [trajectory(self.traj_limit, env_id=i, alg_cfg=self.alg_cfg) for i in range(self.n_env)] self.live_traj_frame = [0 for _ in range(self.n_env)] self._traj_lock_buf = None self.patience = 1000 pass def __check_integraty(self, traj_frag): if self.patience < 0: return # stop wasting time checking this self.patience -= 1 for key in traj_frag: if key not in self.registered_keys and (not key.startswith('_')): self.registered_keys.append(key) for key in self.registered_keys: assert key in traj_frag, ('this key sometimes disappears from the traj_frag:', key) def batch_update(self, traj_frag): self.__check_integraty(traj_frag) done = traj_frag['_DONE_']; traj_frag.pop('_DONE_') # done flag skip = traj_frag['_SKIP_']; traj_frag.pop('_SKIP_') # skip/frozen flag tobs = traj_frag['_TOBS_']; traj_frag.pop('_TOBS_') # terminal obs # single bool to list bool if isinstance(done, bool): done = [done for _ in range(self.n_env)] if isinstance(skip, bool): skip = [skip for _ in range(self.n_env)] n_active = sum(~skip) # feed cnt = 0 for env_i in range(self.n_env): if skip[env_i]: continue # otherwise frag_index = cnt; cnt += 1 env_index = env_i traj_handle = self.live_trajs[env_index] for key in traj_frag: self.traj_remember(traj_handle, key=key, content=traj_frag[key],frag_index=frag_index, n_active=n_active) self.live_traj_frame[env_index] += 1 traj_handle.time_shift() if done[env_i]: assert tobs[env_i] is not None # get the final obs traj_handle.set_terminal_obs(tobs[env_i]) self.traj_pool.append(traj_handle) self.live_trajs[env_index] = trajectory(self.traj_limit, env_id=env_index, alg_cfg=self.alg_cfg) self.live_traj_frame[env_index] = 0 def traj_remember(self, traj, key, content, frag_index, n_active): if content is None: traj.remember(key, None) elif isinstance(content, dict): for sub_key in content: self.traj_remember(traj, "".join((key , ">" , sub_key)), content=content[sub_key], frag_index=frag_index, n_active=n_active) else: assert n_active == len(content), ('length error') traj.remember(key, content[frag_index]) # * class BatchTrajManager(TrajManagerBase): def __init__(self, n_env, traj_limit, trainer_hook, alg_cfg): super().__init__(n_env, traj_limit, alg_cfg) self.trainer_hook = trainer_hook self.traj_limit = traj_limit # 函数入口 def feed_traj(self, traj_frag, require_hook=False): if require_hook: raise ModuleNotFoundError("not supported anymore") assert self._traj_lock_buf is None assert '_DONE_' in traj_frag assert '_SKIP_' in traj_frag self.batch_update(traj_frag=traj_frag) # call parent's batch_update() return def train_and_clear_traj_pool(self): print('do update %d'%self.update_cnt) for traj_handle in self.traj_pool: traj_handle.cut_tail() self.traj_pool = list(filter(lambda traj: not traj.deprecated_flag, self.traj_pool)) for traj_handle in self.traj_pool: traj_handle.finalize() self.trainer_hook(self.traj_pool, 'train') self.traj_pool = [] self.update_cnt += 1 return self.update_cnt def can_exec_training(self): num_traj_needed = self.alg_cfg.train_traj_needed if len(self.traj_pool) >= num_traj_needed: return True else: return False ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/common/traj_manager.py ================================================ ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/example_foundation.py ================================================ import numpy as np import copy import math import random class ExampleFoundation(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_thread = n_thread self.n_agent = n_agent self.handler = [None for _ in range(self.n_thread)] def interact_with_env(self, team_intel): info = team_intel['Latest-Team-Info'] done = team_intel['Env-Suffered-Reset'] step_cnt = team_intel['Current-Obs-Step'] action_list = np.zeros(shape=(self.n_agent, self.n_thread, 1)) return action_list, team_intel ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/ccategorical.py ================================================ from torch.distributions.categorical import Categorical import torch from .foundation import AlgorithmConfig from UTIL.tensor_ops import repeat_at, _2tensor from torch.distributions import kl_divergence EPS = 1e-9 # yita = p_hit = 0.14 def random_process(probs, rsn_flag): yita = AlgorithmConfig.yita with torch.no_grad(): max_place = probs.argmax(-1, keepdims=True) mask_max = torch.zeros_like(probs).scatter_(-1, max_place, 1).bool() pmax = probs[mask_max] if rsn_flag: assert max_place.shape[-1] == 1 return max_place.squeeze(-1) else: # forbit max prob being chosen, pmax = probs.max(axis=-1) p_hat = pmax + (pmax-1)/(1/yita-1) k = 1/(1-yita) #!!! write probs *= k #!!! write probs[mask_max] = p_hat # print(probs) dist = Categorical(probs=probs) samp = dist.sample() assert samp.shape[-1] != 1 return samp def random_process_allow_big_yita(probs, rsn_flag): yita = AlgorithmConfig.yita with torch.no_grad(): max_place = probs.argmax(-1, keepdims=True) mask_max = torch.zeros_like(probs).scatter_(-1, max_place, 1).bool() pmax = probs[mask_max].reshape(max_place.shape) #probs[max_place].clone() if rsn_flag: assert max_place.shape[-1] == 1 return max_place.squeeze(-1) else: # forbit max prob being chosen # pmax = probs.max(axis=-1) #probs[max_place].clone() yita_arr = torch.ones_like(pmax)*yita yita_arr_clip = torch.minimum(pmax, yita_arr) # p_hat = pmax + (pmax-1) / (1/yita_arr_clip-1) + 1e-10 p_hat = (pmax-yita_arr_clip)/(1-yita_arr_clip) k = 1/(1-yita_arr_clip) probs *= k probs[mask_max] = p_hat.reshape(-1) # print(probs) dist = Categorical(probs=probs) samp = dist.sample() assert samp.shape[-1] != 1 return samp #.squeeze(-1) def random_process_with_clamp3(probs, yita, yita_min_prob, rsn_flag): with torch.no_grad(): max_place = probs.argmax(-1, keepdims=True) mask_max = torch.zeros_like(probs).scatter_(dim=-1, index=max_place, value=1).bool() pmax = probs[mask_max].reshape(max_place.shape) # act max assert max_place.shape[-1] == 1 act_max = max_place.squeeze(-1) # act samp yita_arr = torch.ones_like(pmax)*yita # p_hat = pmax + (pmax-1) / (1/yita_arr_clip-1) + 1e-10 p_hat = (pmax-yita_arr)/((1-yita_arr)+EPS) p_hat = p_hat.clamp(min=yita_min_prob) k = (1-p_hat)/((1-pmax)+EPS) probs *= k probs[mask_max] = p_hat.reshape(-1) dist = Categorical(probs=probs) act_samp = dist.sample() # assert act_samp.shape[-1] != 1 hit_e = _2tensor(rsn_flag) return torch.where(hit_e, act_max, act_samp) class CCategorical(): def __init__(self, planner): self.planner = planner pass def sample(self, dist, eprsn): probs = dist.probs.clone() return random_process_with_clamp3(probs, self.planner.yita, self.planner.yita_min_prob, eprsn) def register_rsn(self, rsn_flag): self.rsn_flag = rsn_flag def feed_logits(self, logits): try: return Categorical(logits=logits) except: print('error') ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/cython_func.pyx ================================================ import numpy as np cimport numpy as np cimport cython from cython.parallel import prange np.import_array() ctypedef fused DTYPE_t: np.float32_t np.float64_t ctypedef fused DTYPE_intlong_t: np.int64_t np.int32_t # to compat Windows ctypedef np.uint8_t DTYPE_bool_t @cython.boundscheck(False) @cython.wraparound(False) @cython.nonecheck(False) def roll_hisory( DTYPE_t[:,:,:,:] obs_feed_new, DTYPE_t[:,:,:,:] prev_obs_feed, DTYPE_bool_t[:,:,:] valid_mask, DTYPE_intlong_t[:,:] N_valid, DTYPE_t[:,:,:,:] next_his_pool): # how many threads cdef Py_ssize_t vmax = N_valid.shape[0] # how many agents cdef Py_ssize_t wmax = N_valid.shape[1] # how many entity subjects (including self @0) cdef Py_ssize_t max_obs_entity = obs_feed_new.shape[2] cdef int n_v, th, a, t, k, pointer for th in prange(vmax, nogil=True): # for each thread range -> prange for a in prange(wmax): # for each agent pointer = 0 # step 1 fill next_his_pool[0 ~ (nv-1)] with obs_feed_new[0 ~ max_obs_entity-1] for k in range(max_obs_entity): if valid_mask[th,a,k]: next_his_pool[th, a, pointer] = obs_feed_new[th,a,k] pointer = pointer + 1 # step 2 fill next_his_pool[nv ~ (max_obs_entity-1)] with prev_obs_feed[0 ~ (max_obs_entity-1-nv)] n_v = N_valid[th,a] for k in range(n_v, max_obs_entity): next_his_pool[th,a,k] = prev_obs_feed[th,a,k-n_v] return np.asarray(next_his_pool) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/div_tree.py ================================================ import torch import torch.nn as nn import numpy as np from ALGORITHM.common.mlp import LinearFinal from UTIL.tensor_ops import add_onehot_id_at_last_dim, add_onehot_id_at_last_dim_fixlen, repeat_at, _2tensor, gather_righthand, scatter_righthand class DivTree(nn.Module): # merge by MLP version def __init__(self, input_dim, h_dim, n_action): super().__init__() # to design a division tree, I need to get the total number of agents from .foundation import AlgorithmConfig self.n_agent = AlgorithmConfig.n_agent self.div_tree = get_division_tree(self.n_agent) self.n_level = len(self.div_tree) self.max_level = len(self.div_tree) - 1 self.current_level = 0 self.init_level = AlgorithmConfig.div_tree_init_level if self.init_level < 0: self.init_level = self.max_level self.current_level_floating = 0.0 get_net = lambda: nn.Sequential( nn.Linear(h_dim+self.n_agent, h_dim), nn.ReLU(inplace=True), LinearFinal(h_dim, n_action) ) # Note: this is NOT net defining for each agent # Instead, all agents starts from self.nets[0] self.nets = torch.nn.ModuleList(modules=[ get_net() for i in range(self.n_agent) ]) def set_to_init_level(self, auto_transfer=True): if self.init_level!=self.current_level: for i in range(self.current_level, self.init_level): self.change_div_tree_level(i+1, auto_transfer) def change_div_tree_level(self, level, auto_transfer=True): print('performing div tree level change (%d -> %d/%d) \n'%(self.current_level, level, self.max_level)) self.current_level = level self.current_level_floating = level assert len(self.div_tree) > self.current_level, ('Reach max level already!') if not auto_transfer: return transfer_list = [] for i in range(self.n_agent): previous_net_index = self.div_tree[self.current_level-1, i] post_net_index = self.div_tree[self.current_level, i] if post_net_index!=previous_net_index: transfer = (previous_net_index, post_net_index) if transfer not in transfer_list: transfer_list.append(transfer) for transfer in transfer_list: from_which_net = transfer[0] to_which_net = transfer[1] self.nets[to_which_net].load_state_dict(self.nets[from_which_net].state_dict()) print('transfering model parameters from %d-th net to %d-th net'%(from_which_net, to_which_net)) return def forward(self, x_in, agent_ids): # x0: shape = (?,...,?, n_agent, core_dim) if self.current_level == 0: x0 = add_onehot_id_at_last_dim_fixlen(x_in, fixlen=self.n_agent, agent_ids=agent_ids) x2 = self.nets[0](x0) return x2, None else: x0 = add_onehot_id_at_last_dim_fixlen(x_in, fixlen=self.n_agent, agent_ids=agent_ids) res = [] for i in range(self.n_agent): use_which_net = self.div_tree[self.current_level, i] res.append(self.nets[use_which_net](x0[..., i, :])) x2 = torch.stack(res, -2) # x22 = self.nets[0](x1) return x2, None # def forward_try_parallel(self, x0): # x0: shape = (?,...,?, n_agent, core_dim) # x1 = self.shared_net(x0) # stream = [] # res = [] # for i in range(self.n_agent): # stream.append(torch.cuda.Stream()) # torch.cuda.synchronize() # for i in range(self.n_agent): # use_which_net = self.div_tree[self.current_level, i] # with torch.cuda.stream(stream[i]): # res.append(self.nets[use_which_net](x1[..., i, :])) # print(res[i]) # # s1 = torch.cuda.Stream() # # s2 = torch.cuda.Stream() # # # Wait for the above tensors to initialise. # # torch.cuda.synchronize() # # with torch.cuda.stream(s1): # # C = torch.mm(A, A) # # with torch.cuda.stream(s2): # # D = torch.mm(B, B) # # Wait for C and D to be computed. # torch.cuda.synchronize() # # Do stuff with C and D. # x2 = torch.stack(res, -2) # return x2 def _2div(arr): arr_res = arr.copy() arr_pieces = [] pa = 0 st = 0 needdivcnt = 0 for i, a in enumerate(arr): if a!=pa: arr_pieces.append([st, i]) if (i-st)!=1: needdivcnt+=1 pa = a st = i arr_pieces.append([st, len(arr)]) if (len(arr)-st)!=1: needdivcnt+=1 offset = range(len(arr_pieces), len(arr_pieces)+needdivcnt) p=0 for arr_p in arr_pieces: length = arr_p[1] - arr_p[0] if length == 1: continue half_len = int(np.ceil(length / 2)) for j in range(arr_p[0]+half_len, arr_p[1]): try: arr_res[j] = offset[p] except: print('wtf') p+=1 return arr_res def get_division_tree(n_agents): agent2divitreeindex = np.arange(n_agents) np.random.shuffle(agent2divitreeindex) max_div = np.ceil(np.log2(n_agents)).astype(int) levels = np.zeros(shape=(max_div+1, n_agents), dtype=int) tree_of_agent = []*(max_div+1) for ith, level in enumerate(levels): if ith == 0: continue res = _2div(levels[ith-1,:]) levels[ith,:] = res res_levels = levels.copy() for i, div_tree_index in enumerate(agent2divitreeindex): res_levels[:, i] = levels[:, div_tree_index] return res_levels ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/foundation.py ================================================ import os, time, torch, traceback, shutil, pickle, io import numpy as np from UTIL.colorful import * from config import GlobalConfig from UTIL.tensor_ops import repeat_at, _2tensor from ALGORITHM.common.rl_alg_base import RLAlgorithmBase class AlgorithmConfig: ''' AlgorithmConfig: This config class will be 'injected' with new settings from json. (E.g., override configs with ```python main.py --cfg example.jsonc```) (please see UTIL.config_args to find out how this advanced trick works out.) ''' # configuration, open to jsonc modification gamma = 0.99 tau = 0.95 train_traj_needed = 512 hete_n_alive_frontend = 1 TakeRewardAsUnity = False use_normalization = True wait_norm_stable = True add_prob_loss = False n_focus_on = 2 n_entity_placeholder = 11 load_checkpoint = False load_specific_checkpoint = '' # PPO part clip_param = 0.2 ppo_epoch = 16 n_pieces_batch_division = 1 value_loss_coef = 0.1 entropy_coef = 0.05 max_grad_norm = 0.5 clip_param = 0.2 lr = 1e-4 # prevent GPU OOM prevent_batchsize_oom = False gamma_in_reward_forwarding = False gamma_in_reward_forwarding_value = 0.99 net_hdim = 24 dual_conc = True n_agent = 'auto load, do not change' ConfigOnTheFly = True hete_n_net_placeholder = 5 hete_thread_align = False hete_same_prob = 0.25 hete_lasted_n = 100 policy_resonance = False use_avail_act = True debug = False ignore_test = False type_agent_diff_lr = False hete_exclude_zero_wr = False policy_matrix_testing = False test_which_cpk = 1 type_sel_override = False type_sel_override_list = [] allow_fast_test = True def str_array_to_num(str_arr): out_arr = [] buffer = {} for str in str_arr: if str not in buffer: buffer[str] = len(buffer) out_arr.append(buffer[str]) return out_arr def itemgetter(*items): # same with operator.itemgetter def g(obj): return tuple(obj[item] if item in obj else None for item in items) return g class CPU_Unpickler(pickle.Unpickler): def find_class(self, module, name): if module == 'torch.storage' and name == '_load_from_bytes': return lambda b: torch.load(io.BytesIO(b), map_location='cpu') else: return super().find_class(module, name) class ReinforceAlgorithmFoundation(RLAlgorithmBase): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from .shell_env import ShellEnvWrapper, ActionConvertLegacy from .hete_net import HeteNet super().__init__(n_agent, n_thread, space, mcv, team) AlgorithmConfig.n_agent = n_agent self.action_converter = ActionConvertLegacy( SELF_TEAM_ASSUME=team, OPP_TEAM_ASSUME=(1-team), OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team] ) n_actions = len(self.action_converter.dictionary_args) # change obs format, e.g., converting dead agent obs into NaN self.shell_env = ShellEnvWrapper(n_agent, n_thread, space, mcv, self, AlgorithmConfig, GlobalConfig.ScenarioConfig, self.team) if self.ScenarioConfig.EntityOriented: rawob_dim = self.ScenarioConfig.obs_vec_length else: rawob_dim = space['obs_space']['obs_shape'] # self.StagePlanner, for policy resonance from .stage_planner import StagePlanner self.stage_planner = StagePlanner(mcv=mcv) # heterogeneous agent types agent_type_list = [a['type'] for a in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list] self.HeteAgentType = str_array_to_num(agent_type_list) hete_type = np.array(self.HeteAgentType)[self.ScenarioConfig.AGENT_ID_EACH_TEAM[team]] # initialize policy self.policy = HeteNet(rawob_dim=rawob_dim, n_action=n_actions, hete_type=hete_type, stage_planner=self.stage_planner) self.policy = self.policy.to(self.device) # initialize optimizer and trajectory (batch) manager from .ppo import PPO from .trajectory import BatchTrajManager self.trainer = PPO(self.policy, ppo_config=AlgorithmConfig, mcv=mcv) self.traj_manager = BatchTrajManager( n_env=n_thread, traj_limit=int(GlobalConfig.ScenarioConfig.MaxEpisodeStep), trainer_hook=self.trainer.train_on_traj) self.stage_planner.trainer = self.trainer # confirm that reward method is correct self.check_reward_type(AlgorithmConfig) # load checkpoints if needed self.load_model(AlgorithmConfig) # enable config_on_the_fly ability if AlgorithmConfig.ConfigOnTheFly: self._create_config_fly() if AlgorithmConfig.policy_matrix_testing: self.threads_test_reward_sum = np.zeros(shape=(n_thread,), dtype=float) # self.threads_test_reward = [] self.recent_test_rewards = [] self.recent_test_wins = [] self._unfi_frag_matrix_ = None self.recent_test_hete_gp_summary = [] self.current_hete_gp_summary = None from VISUALIZE.mcom import mcom self.mcv_matrix = mcom( path='%s/logger/matrix/'%GlobalConfig.logdir, image_path='%s/matrix.jpg'%GlobalConfig.logdir, draw_mode='Img', tag='[ppo.py]' ) self.mcv_matrix.rec_init(color='r') def action_making(self, StateRecall, test_mode): # make sure hook is cleared assert ('_hook_' not in StateRecall) # read obs et.al. obs, threads_active_flag, avail_act, hete_pick, hete_type, gp_sel_summary, eprsn = \ itemgetter('obs', 'threads_active_flag', 'avail_act', '_hete_pick_', '_hete_type_', '_gp_pick_', '_EpRsn_')(StateRecall) # make sure obs shape is correct assert obs is not None, ('Make sure obs is ok') assert len(obs) == sum(threads_active_flag), ('check batch size') # make sure avail_act is correct if AlgorithmConfig.use_avail_act: assert avail_act is not None # policy resonance flag reshape eprsn = repeat_at(eprsn, -1, self.n_agent) thread_index = np.arange(self.n_thread)[threads_active_flag] # make decision with torch.no_grad(): action, value, action_log_prob = self.policy.act(obs=obs, test_mode=test_mode, avail_act=avail_act, hete_pick=hete_pick, hete_type=hete_type, gp_sel_summary=gp_sel_summary, thread_index=thread_index, eprsn=eprsn, ) # commit obs to buffer, vars named like _x_ are aligned, others are not! traj_framefrag = { "_SKIP_": ~threads_active_flag, "value": value, "hete_pick": hete_pick, "hete_type": hete_type, "gp_sel_summary": gp_sel_summary, "avail_act": avail_act, "actionLogProb": action_log_prob, "obs": obs, "action": action, } if avail_act is not None: traj_framefrag.update({'avail_act': avail_act}) # deal with rollout later when the reward is ready, leave a hook as a callback here if not test_mode: StateRecall['_hook_'] = self.commit_traj_frag(traj_framefrag, req_hook = True) else: if test_mode and AlgorithmConfig.policy_matrix_testing: StateRecall['_hook_'] = self.matrix_callback_special(traj_framefrag) return action.copy(), StateRecall ''' function to be called when reward is received ''' def matrix_callback_special(self, framefrag): assert self._unfi_frag_matrix_ is None self._unfi_frag_matrix_ = framefrag return self.matrix_callback_special_callback def matrix_callback_special_callback(self, new_frag): fi_frag = self._unfi_frag_matrix_ self._unfi_frag_matrix_ = None reward = new_frag['reward'].copy() done = new_frag['done'].copy() # self.threads_test_reward.append(reward) self.threads_test_reward_sum += reward * ~fi_frag['_SKIP_'] if not any(fi_frag['_SKIP_']): self.current_hete_gp_summary = fi_frag["gp_sel_summary"] if done.all(): self.recent_test_rewards.extend(self.threads_test_reward_sum) self.recent_test_wins.extend([q['team_ranking'][self.team]==0 for q in new_frag['info']]) # 0 means rank first self.recent_test_hete_gp_summary.extend(self.current_hete_gp_summary) self.threads_test_reward_sum *= 0 self.current_hete_gp_summary = None return None def interact_with_env(self, StateRecall): ''' Interfacing with marl, standard method that you must implement (redirect to shell_env to help with history rolling) ''' return self.shell_env.interact_with_env(StateRecall) def interact_with_env_genuine(self, StateRecall): ''' When shell_env finish the preparation, interact_with_env_genuine is called (Determine whether or not to do a training routinue) ''' # if not StateRecall['Test-Flag']: self.train() # when needed, train! return self.action_making(StateRecall, StateRecall['Test-Flag']) def train(self): ''' Get event from hmp task runner, save model now! ''' if self.traj_manager.can_exec_training(): if self.stage_planner.can_exec_trainning(): self.traj_manager.train_and_clear_traj_pool() else: self.traj_manager.clear_traj_pool() # read configuration if AlgorithmConfig.ConfigOnTheFly: self._config_on_fly() # self.stage_planner.update_plan() # override parent function def on_notify(self, message, **kargs): win_rate = kargs['win_rate'] mean_reward = kargs['mean_reward'] path = self.save_model( update_cnt=self.traj_manager.update_cnt, info=str(kargs) ) # print('[random win rate] ! ! ! ! !') # win_rate = np.random.rand() self.policy.register_ckp(win_rate, path, mean_reward) if AlgorithmConfig.policy_matrix_testing: from UTIL.data_struct import UniqueList # self.recent_test_hete_gp_summary_str = self.recent_test_hete_gp_summary # [str(q.tolist()) for q in self.recent_test_hete_gp_summary] recent_test_hete_gp_summary_ls = [q.tolist() for q in self.recent_test_hete_gp_summary] ulist = UniqueList(recent_test_hete_gp_summary_ls) for u in ulist: feature = self.policy.ph_to_feature[u].squeeze().cpu().numpy().tolist() feature = "[%.2f,%.2f,%.2f]"%tuple(feature) mask = [u==uu for uu in recent_test_hete_gp_summary_ls] r = np.array(self.recent_test_rewards)[mask].mean() wr = np.array(self.recent_test_wins)[mask].mean() self.mcv_matrix.rec(self.policy.ckpg_input_cnt, 'time') self.mcv_matrix.rec(r, 'r of=%s'%feature) self.mcv_matrix.rec(wr, 'w of=%s'%feature) self.mcv_matrix.rec(sum(mask), 'n of=%s'%feature) self.mcv_matrix.rec_show() self.recent_test_rewards = [] self.recent_test_wins = [] self.recent_test_hete_gp_summary = [] def save_model(self, update_cnt, info=None): ''' save model now! save if triggered when: 1. Update_cnt = 50, 100, ... 2. Given info, indicating a hmp command 3. A flag file is detected, indicating a save command from human ''' if not os.path.exists('%s/history_cpt/' % GlobalConfig.logdir): os.makedirs('%s/history_cpt/' % GlobalConfig.logdir) # dir 1 pt_path = '%s/model.pt' % GlobalConfig.logdir print绿('saving model to %s' % pt_path) torch.save({ 'policy': self.policy.state_dict(), 'optimizer': self.trainer.optimizer.state_dict(), }, pt_path) # dir 2 info = str(update_cnt) if info is None else ''.join([str(update_cnt), '_', info]) pt_path2 = '%s/history_cpt/model_%s.pt' % (GlobalConfig.logdir, info) shutil.copyfile(pt_path, pt_path2) # save ckpg_info with open('%s/history_cpt/ckpg_info.pkl'%GlobalConfig.logdir, 'wb') as f:pickle.dump((self.policy.ckpg_info, self.policy.ckpg_input_cnt, [(n.feature, n.static, n.ready_to_go) for n in self.policy._nets_flat_placeholder_]),f) print绿('save_model fin') return pt_path2 def find_ckp(self, feature): import glob list_ckp = glob.glob('%s/history_cpt/*.pt'%GlobalConfig.logdir) ckp_dir = [ckp for ckp in list_ckp if str(feature[0]) in ckp][0] cuda_n = 'cpu' if 'cpu' in self.device else self.device cpt = torch.load(ckp_dir, map_location=cuda_n) # get previous frontier network return {k.replace('_nets_flat_placeholder_.0.',''):v for k, v in cpt['policy'].items() if '_nets_flat_placeholder_.0.' in k} def load_model(self, AlgorithmConfig): ''' load model now ''' if AlgorithmConfig.load_checkpoint: manual_dir = AlgorithmConfig.load_specific_checkpoint ckpt_dir = '%s/model.pt' % GlobalConfig.logdir if manual_dir == '' else '%s/%s' % (GlobalConfig.logdir, manual_dir) cuda_n = 'cpu' if 'cpu' in self.device else self.device strict = True if not platform.system()=="Linux": assert ':' not in ckpt_dir, ('Windows OS does not allow : in file name') cpt = torch.load(ckpt_dir, map_location=cuda_n) self.policy.load_state_dict(cpt['policy'], strict=strict) # https://github.com/pytorch/pytorch/issues/3852 self.trainer.optimizer.load_state_dict(cpt['optimizer']) print黄('loaded checkpoint:', ckpt_dir) if os.path.exists('%s/history_cpt/ckpg_info.pkl'%GlobalConfig.logdir): with open('%s/history_cpt/ckpg_info.pkl'%GlobalConfig.logdir, 'rb') as f: self.policy.ckpg_info, self.policy.ckpg_input_cnt, n_flags = CPU_Unpickler(f).load() for (n, flags) in zip(self.policy._nets_flat_placeholder_, n_flags): n.feature = flags[0] n.static = flags[1] n.ready_to_go = flags[2] if n.feature!=1: n.load_state_dict(self.find_ckp(n.feature), strict=True) self.policy.ph_to_feature = _2tensor(np.array([n.feature for n in self.policy._nets_flat_placeholder_])) print黄('loaded ckpg_info') else: print('Warning, past policy missing !!') def process_framedata(self, traj_framedata): ''' hook is called when reward and next moment observation is ready, now feed them into trajectory manager. Rollout Processor | 准备提交Rollout, 以下划线开头和结尾的键值需要对齐(self.n_thread, ...) note that keys starting with _ must have shape (self.n_thread, ...), details see fn:mask_paused_env() ''' # strip info, since it is not array items_to_pop = ['info', 'Latest-Obs'] for k in items_to_pop: if k in traj_framedata: traj_framedata.pop(k) # the agent-wise reward is supposed to be the same, so averge them if self.ScenarioConfig.RewardAsUnity: traj_framedata['reward'] = repeat_at(traj_framedata['reward'], insert_dim=-1, n_times=self.n_agent) # change the name of done to be recognised (by trajectory manager) traj_framedata['_DONE_'] = traj_framedata.pop('done') traj_framedata['_TOBS_'] = traj_framedata.pop( 'Terminal-Obs-Echo') if 'Terminal-Obs-Echo' in traj_framedata else None # mask out pause thread traj_framedata = self.mask_paused_env(traj_framedata) # put the frag into memory self.traj_manager.feed_traj_framedata(traj_framedata) def mask_paused_env(self, frag): running = ~frag['_SKIP_'] if running.all(): return frag for key in frag: if not key.startswith('_') and hasattr(frag[key], '__len__') and len(frag[key]) == self.n_thread: frag[key] = frag[key][running] return frag def _create_config_fly(self): logdir = GlobalConfig.logdir self.input_file_dir = '%s/cmd_io.txt' % logdir if not os.path.exists(self.input_file_dir): with open(self.input_file_dir, 'w+', encoding='utf8') as f: f.writelines(["# Write cmd at next line: ", ""]) def _config_on_fly(self): if not os.path.exists(self.input_file_dir): return with open(self.input_file_dir, 'r', encoding='utf8') as f: cmdlines = f.readlines() cmdlines_writeback = [] any_change = False for cmdline in cmdlines: if cmdline.startswith('#') or cmdline=="\n" or cmdline==" \n": cmdlines_writeback.append(cmdline) else: any_change = True try: print亮绿('[foundation.py] ------- executing: %s ------'%cmdline) exec(cmdline) cmdlines_writeback.append('# [execute successfully]\t'+cmdline) except: print红(traceback.format_exc()) cmdlines_writeback.append('# [execute failed]\t'+cmdline) if any_change: with open(self.input_file_dir, 'w+', encoding='utf8') as f: f.writelines(cmdlines_writeback) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/hete_assignment.py ================================================ import copy import numpy as np from UTIL.tensor_ops import my_view, __hash__, repeat_at, gather_righthand from .foundation import AlgorithmConfig def random_group(random_select_fn, n_thread, hete_type, n_hete_types, n_group, selected_tps, testing): n_agent = hete_type.shape[-1] group_sel_arr = np.zeros(shape=(n_thread, n_agent), dtype=int) gp_sel_summary = [] for i in range(n_thread): group_assignment = np.array([ random_select_fn(testing) if type not in selected_tps[i] else 0 for type in range(n_hete_types) ]) assert (group_assignment[selected_tps[i]]==0).all() gp_sel_summary.append(copy.deepcopy(group_assignment)) for ht, group in enumerate(group_assignment): mask = (hete_type == ht) group_sel_arr[i,mask] = group return group_sel_arr, np.stack(gp_sel_summary).astype(np.int64) def select_nets_for_shellenv(n_types, policy, hete_type_list, n_thread, n_gp, testing): if (not testing) or (AlgorithmConfig.policy_matrix_testing): n_alive_frontend = AlgorithmConfig.hete_n_alive_frontend tmp = np.arange(n_types) # select types to use frontier if not AlgorithmConfig.type_sel_override: selected_types = np.stack([ np.random.choice( a=tmp, size=(n_alive_frontend), replace=False, p=None) for _ in range(n_thread) ]) else: selected_types = np.stack([ AlgorithmConfig.type_sel_override_list for _ in range(n_thread) ]) else: # testing but not policy_matrix_testing: select all types to use frontier selected_types = np.stack([np.arange(n_types) for _ in range(n_thread)]) # generate a random group selection array if not AlgorithmConfig.policy_matrix_testing: random_select_fn = policy.random_select else: random_select_fn = policy.random_select_matrix_test group_sel_arr, gp_sel_summary = random_group( random_select_fn=random_select_fn, n_thread=n_thread, hete_type=hete_type_list, n_hete_types=n_types, n_group=n_gp, selected_tps=selected_types, testing=testing) # group to net index n_tp = n_types get_placeholder = lambda type, group: group*n_tp + type hete_type_arr = repeat_at(hete_type_list, 0, n_thread) selected_nets = get_placeholder(type=hete_type_arr, group=group_sel_arr) return selected_nets, gp_sel_summary ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/hete_net.py ================================================ import torch, math, copy, pickle import numpy as np import torch.nn as nn from config import GlobalConfig as cfg from torch.distributions.categorical import Categorical from UTIL.colorful import print亮绿 from UTIL.tensor_ops import Args2tensor_Return2numpy, Args2tensor, __hashn__, cat_last_dim, __hash__, one_hot_with_nan, repeat_at, scatter_righthand, gather_righthand, _2cpu2numpy, my_view from .foundation import AlgorithmConfig from ALGORITHM.common.pca import pca from ALGORITHM.common.net_manifest import weights_init from .net import Net, NetCentralCritic def popgetter(*items): def g(obj): return tuple(obj.pop(item) if item in obj else None for item in items) return g class no_context(): def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): return False def _count_list_type(x): type_cnt = {} for xx in x: if xx not in type_cnt: type_cnt[xx] = 0 type_cnt[xx] += 1 return len(type_cnt) def _create_tensor_ph_or_fill_(ref, pt, offset, *args): n_threads, n_agents, mask = args if pt[offset] is None: pt[offset] = torch.zeros(size=(n_threads*n_agents, *ref.shape[2:]), device=ref.device, dtype=ref.dtype) pt[offset][mask] = ref.squeeze(0) def _tensor_expand_thread_dim_v2_(ref, pt, offset, *args): # undo dim collapse n_threads, n_agents = args v = pt[offset] pt[offset] = v.view(n_threads, n_agents, *v.shape[1:]) def dfs_create_and_fn(ref, pt, offset, fn, *args): ''' ref: target to sync pt: mutable list offset: mutable list index fn: function to be executed at leaf nodes args: anything needed ''' if ref is None: # there is nothing to sync, instead, do something at leaf node only ref = pt[offset] if ref == 'vph': pt[offset] = 'vph' return elif isinstance(ref, tuple) or isinstance(ref, list): if pt[offset] is None: pt[offset] = [None for item in ref] for i, item in enumerate(ref): dfs_create_and_fn(item, pt[offset], i, fn, *args) elif isinstance(ref, dict): if pt[offset] is None: pt[offset] = {key:None for key in ref} for key in ref: dfs_create_and_fn(ref[key], pt[offset], key, fn, *args) elif isinstance(ref, torch.Tensor): fn(ref, pt, offset, *args) else: assert False def _deal_single_in(x, mask_flatten): if isinstance(x, torch.Tensor): # collapse first two dims return x.view(-1, *x.shape[2:])[mask_flatten].unsqueeze(0) else: return x # todo: https://pytorch.org/tutorials/advanced/torch-script-parallelism.html?highlight=parallel def distribute_compute(fn_arr, mask_arr, **kwargs): """compute on each network Args: fn_arr : a list of forwarding networks mask_arr : mask of kwargs Returns: tuple tensors: the result of networks """ # python don't have pointers, # however, a list is a mutable type in python, that's what we need g_out = [None] n_threads = mask_arr[0].shape[0] n_agents = mask_arr[0].shape[1] # calculated result will be gathered into ret_tuple_gather ret_tuple_gather = [] # one by one we compute the result for fn, mask in zip(fn_arr, mask_arr): assert mask.dim()==2 mask_flatten = mask.flatten() agent_ids = torch.where(mask)[1] agent_ids = agent_ids.unsqueeze(0) # fake an extral dimension _kwargs = {key:_deal_single_in(kwargs[key], mask_flatten) for key in kwargs} with torch.no_grad() if fn.static else no_context() as gs: # no_grad is already declared outside in act mode ret_tuple = fn._act(agent_ids=agent_ids, **_kwargs) ret_tuple_gather.append(ret_tuple) # stack ret_tuple_gather into g_out for ret_tuple, fn, mask in zip(ret_tuple_gather, fn_arr, mask_arr): mask_flatten = mask.flatten() dfs_create_and_fn(ret_tuple, g_out, 0, _create_tensor_ph_or_fill_, n_threads, n_agents, mask_flatten) # reshape the tensor dfs_create_and_fn(None, g_out, 0, _tensor_expand_thread_dim_v2_, n_threads, n_agents) return tuple(g_out[0]) class HeteNet(nn.Module): def __init__(self, rawob_dim, n_action, hete_type, **kwargs): super().__init__() self.rawob_dim = rawob_dim self.n_action = n_action self.hete_type = hete_type self.n_hete_types = _count_list_type(self.hete_type) self.hete_n_net_placeholder = AlgorithmConfig.hete_n_net_placeholder self.use_normalization = AlgorithmConfig.use_normalization self.n_tp = self.n_hete_types self.n_gp = self.hete_n_net_placeholder self.n_agent_each_tp = [sum(self.hete_type==i) for i in range(self.n_hete_types)] self.n_agents = len(self.hete_type) # convertion between placeholder index and type-group index self.tpgp_2_ph = lambda type, group: group*self.n_tp + type self.ph_2_tpgp = lambda ph: (ph%self.n_hete_types, ph//self.n_hete_types) self.ph_2_gp = lambda ph: ph//self.n_hete_types # initialize net placeholders self._nets_flat_placeholder_ = torch.nn.ModuleList(modules=[ Net(rawob_dim, n_action, **kwargs) for _ in range( self.n_gp ) ]) # initialize critic self._critic_central = NetCentralCritic(rawob_dim, n_action, **kwargs) # reshape the handle of networks self.nets = [ [ self._nets_flat_placeholder_[gp] ] for gp in range(self.n_gp)] # the frontier nets self.frontend_nets = self.nets[0] # the static nets self.static_nets = self.nets[1:] # heterogeneous feature dimension self.hete_feature_dim = 1 # add flags to each nets for gp, n_arr in enumerate(self.nets): for _, n in enumerate(n_arr): ph_index = gp n.gp = gp # n.lr_div = self.n_agent_each_tp[tp] / self.n_agents if gp!=0: # lock static nets: the static nets are not loaded yet n.feature = np.zeros(self.hete_feature_dim) n.ready_to_go = False self.lock_net(ph_index) else: # unlock frontier nets: the frontier nets are ready n.feature = np.ones(self.hete_feature_dim) n.ready_to_go = True self.unlock_net(ph_index) # a list to trace the vital checkpoints self.ckpg_info = [] # track the number of checkpoints commited self.ckpg_input_cnt = 0 # feature array, arranged according to placeholders self.ph_to_feature = torch.tensor(np.array([n.feature for n in self._nets_flat_placeholder_]), dtype=torch.float, device=cfg.device) # # from UTIL.sync_exp import SynWorker # self.syn_worker = SynWorker('follow') def lock_net(self, i): n = self._nets_flat_placeholder_[i] n.static = True n.eval() def unlock_net(self, i): n = self._nets_flat_placeholder_[i] n.static = False n.train() def register_ckp(self, win_rate, cpk_path, mean_reward): # deal with new checkpoint self.ckpg_input_cnt += 1 # get previous win rates prev_win_rate = [self.ckpg_info[i]['win_rate'] for i in range(len(self.ckpg_info))] # if the winrate is not a breakthough, give up if len(prev_win_rate)>0 and win_rate <= max(prev_win_rate): return if AlgorithmConfig.hete_exclude_zero_wr and win_rate==0: return # list the infomation about this checkpoint self.ckpg_info.append({ 'win_rate': win_rate, 'mean_reward': mean_reward, 'ckpg_cnt': self.ckpg_input_cnt, 'cpk_path': cpk_path, 'model': copy.deepcopy(self.frontend_nets[0].state_dict()), 'feature': [ win_rate ], }) # sort according to win rate self.ckpg_info.sort(key=lambda x:x['win_rate']) # remove a checkpoint that is too close to its neighbor self.trim_ckp() print('ckp register change!') print([self.ckpg_info[i]['win_rate'] for i in range(len(self.ckpg_info))]) print([self.ckpg_info[i]['ckpg_cnt'] for i in range(len(self.ckpg_info))]) # reload parameters for i, static_nets in enumerate(self.static_nets): # some net cannot be loaded with parameters yet, because ckpg_info has not collect enough samples if i >= len(self.ckpg_info): continue for _, net in enumerate(static_nets): # load parameters net.load_state_dict(self.ckpg_info[i]['model'], strict=True) # the net must be static assert net.static # now the net is ready net.ready_to_go = True net.feature = self.ckpg_info[i]['feature'] # reload the net features self.ph_to_feature = torch.tensor(np.array([n.feature for n in self._nets_flat_placeholder_]), dtype=torch.float, device=cfg.device) print('parameters reloaded') def random_select(self, testing, *args, **kwargs): """randomly select a group index Args: AlgorithmConfig.hete_same_prob: a probability about choosing the frontier net as the teammate Returns: int: a group index """ assert not testing if np.random.rand() < AlgorithmConfig.hete_same_prob: return 0 # choose randomly among existing nets n_option = len(self.ckpg_info) if n_option > 0: if n_option > AlgorithmConfig.hete_lasted_n: assert AlgorithmConfig.hete_lasted_n != 0 rand_sel = np.random.randint(low=n_option+1-AlgorithmConfig.hete_lasted_n, high=n_option+1) else: rand_sel = np.random.randint(low=1, high=n_option+1) return rand_sel else: return 0 if AlgorithmConfig.policy_matrix_testing: def random_select_matrix_test(self, testing, *args, **kwargs): if testing: hete_frontier_prob = 0 # 1 / (AlgorithmConfig.hete_lasted_n+1) # print('manual selection') n_option = len(self.ckpg_info) LAST = AlgorithmConfig.test_which_cpk # return 0 return (n_option+1) - LAST else: hete_frontier_prob = AlgorithmConfig.hete_same_prob if np.random.rand() < hete_frontier_prob: return 0 # choose randomly among existing nets n_option = len(self.ckpg_info) if n_option > 0: if AlgorithmConfig.hete_lasted_n == 0: return 0 if n_option > AlgorithmConfig.hete_lasted_n: assert AlgorithmConfig.hete_lasted_n != 0 rand_sel = np.random.randint(low=n_option+1-AlgorithmConfig.hete_lasted_n, high=n_option+1) else: rand_sel = np.random.randint(low=1, high=n_option+1) return rand_sel else: return 0 # called after training update def on_update(self, update_cnt): return def redirect_to_frontend(self, i): return i%self.n_tp def acquire_net(self, i): tp, gp = self.ph_2_tpgp(i) return self._nets_flat_placeholder_[gp] def exe(self, hete_pick=None, **kargs): # shape n_thread = hete_pick.shape[0] n_agents = hete_pick.shape[1] # pop items from kargs gp_sel_summary, thread_indices, hete_type = popgetter('gp_sel_summary', 'thread_index', 'hete_type')(kargs) # get ph_feature # _012345 = torch.arange(self.n_tp, device=kargs['obs'].device, dtype=torch.int64) ph_sel = gp_sel_summary # *self.n_tp + repeat_at(_012345, 0, n_thread) # group * self.n_tp + tp ph_feature = self.ph_to_feature[ph_sel] # my_view(, [0, -1]) ph_feature_cp_raw = repeat_at(ph_feature, 1, n_agents) agent2tp_onehot = torch.nn.functional.one_hot(hete_type.long(), num_classes=self.n_tp).unsqueeze(-1) type_gp_mat = repeat_at(gp_sel_summary, -1, self.n_tp) same_gp = (type_gp_mat == type_gp_mat.transpose(-1,-2)).long() agent_self_type_mask2 = gather_righthand(same_gp, index=hete_type, check=False).unsqueeze(-1) assert ph_feature_cp_raw.dim() == 4 ph_feature_cp2 = (ph_feature_cp_raw*(1-agent_self_type_mask2) + agent_self_type_mask2) ph_feature_cp_obs_ = torch.cat((ph_feature_cp2, agent2tp_onehot), 2) ph_feature_cp_critic_ = torch.cat((ph_feature_cp_raw, agent2tp_onehot), 2) ph_feature_cp_obs = my_view(ph_feature_cp_obs_, [0,0,-1]) # ph_feature_cp_obs.shape = torch.Size([n_thread=16, n_agents=10, core_dim=12]) ph_feature_cp_critic = my_view(ph_feature_cp_critic_, [0,0,-1]) # ph_feature_cp_obs.shape = torch.Size([n_thread=16, n_agents=10, core_dim=12]) # add ph_feature to kwargs kargs['obs_hfeature'] = ph_feature_cp_obs # get a manifest of running nets # invo_hete_types = [i for i in range(self.n_tp*self.n_gp) if (i in hete_pick)] invo_gps = [i for i in range(self.n_gp) if (i in gp_sel_summary)] running_nets = [self.nets[gp][0] for gp in invo_gps] # make sure all nets under testing is frontend / frontier if 'test_mode' in kargs and kargs['test_mode']: for net in running_nets: if not AlgorithmConfig.policy_matrix_testing: assert not net.static # run actor policy networks actor_result = distribute_compute( fn_arr = running_nets, mask_arr = [(self.ph_2_gp(hete_pick) == gp) for gp in invo_gps], **kargs ) # run critic network kargs.pop('obs_hfeature') # replace h_feature kargs['obs_hfeature_critic'] = ph_feature_cp_critic critic_result = self._critic_central.estimate_state(**kargs) # combine actor_result and critic_result actor_result = list(actor_result) for i, item in enumerate(actor_result): if item=='vph': actor_result[i] = critic_result # done ! return tuple(actor_result) @Args2tensor_Return2numpy def act(self, **kargs): return self.exe(**kargs) @Args2tensor def evaluate_actions(self, **kargs): return self.exe(**kargs, eval_mode=True) def trim_ckp(self): RemoveNew = True max_static_gp = self.n_gp - 1 if len(self.ckpg_info) <= max_static_gp: return else: assert len(self.ckpg_info) == max_static_gp+1 # find two ckp with nearest winrate_list = np.array([self.ckpg_info[i]['win_rate'] for i in range(len(self.ckpg_info))]) winrate_list = np.abs(winrate_list[1:] - winrate_list[:-1]) index = np.argmin(winrate_list) old_index = index new_index = index + 1 if self.ckpg_info[new_index]['ckpg_cnt'] < self.ckpg_info[old_index]['ckpg_cnt']: new_index, old_index = old_index, new_index if RemoveNew: self.ckpg_info.pop(new_index) else: self.ckpg_info.pop(old_index) assert len(self.ckpg_info) == max_static_gp pass ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/net.py ================================================ import torch, math, copy import numpy as np import torch.nn as nn from torch.distributions.categorical import Categorical from UTIL.colorful import print亮绿 from UTIL.tensor_ops import Args2tensor_Return2numpy, Args2tensor, __hashn__, my_view from UTIL.tensor_ops import pt_inf from UTIL.exp_helper import changed from .ccategorical import CCategorical from .foundation import AlgorithmConfig from ALGORITHM.common.attention import SimpleAttention from ALGORITHM.common.norm import DynamicNormFix from ALGORITHM.common.net_manifest import weights_init from ALGORITHM.common.hyper_net import HyperNet """ network initialize """ class Net(nn.Module): def __init__(self, rawob_dim, n_action, **kwargs): super().__init__() self.update_cnt = nn.Parameter( torch.zeros(1, requires_grad=False, dtype=torch.long), requires_grad=False) self.use_normalization = AlgorithmConfig.use_normalization self.use_policy_resonance = AlgorithmConfig.policy_resonance self.n_action = n_action if self.use_policy_resonance: self.ccategorical = CCategorical(kwargs['stage_planner']) self.is_resonance_active = lambda: kwargs['stage_planner'].is_resonance_active() h_dim = AlgorithmConfig.net_hdim # observation normalization if self.use_normalization: self._batch_norm = DynamicNormFix(rawob_dim, only_for_last_dim=True, exclude_one_hot=True, exclude_nan=True) n_entity = AlgorithmConfig.n_entity_placeholder # # # # # # # # # # actor-critic share # # # # # # # # # # # # self.obs_encoder = nn.Sequential(nn.Linear(rawob_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim)) self.attention_layer = SimpleAttention(h_dim=h_dim) # # # # # # # # # # actor # # # # # # # # # # # # _size = n_entity * h_dim self.hyper_net = HyperNet(embed_dim=h_dim, hyper_input_dim=6, x_input_dim=_size) self.policy_head = nn.Sequential( nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, self.n_action)) self.is_recurrent = False self.apply(weights_init) return def act(self, *args, **kargs): return self._act(*args, **kargs) def evaluate_actions(self, *args, **kargs): return self._act(*args, **kargs, eval_mode=True) def _act(self, obs=None, test_mode=None, eval_mode=False, eval_actions=None, avail_act=None, agent_ids=None, eprsn=None, obs_hfeature=None): assert (self.ready_to_go) mask_dead = torch.isnan(obs).any(-1) # find dead agents # if not (obs[..., -3+self.tp][~mask_dead] == -1).all().item(): # assert False if self.static: assert self.gp >=1 # if not test_mode: assert not self.ready_to_go eval_act = eval_actions if eval_mode else None others = {} if self.use_normalization: if torch.isnan(obs).all(): pass else: obs = self._batch_norm(obs, freeze=(eval_mode or test_mode or self.static)) obs_hfeature_norm = obs_hfeature mask_dead = torch.isnan(obs).any(-1) obs = torch.nan_to_num_(obs, 0) # replace dead agents' obs, from NaN to 0 # # # # # # # # # # actor-critic share # # # # # # # # # # # # baec = self.obs_encoder(obs) baec = self.attention_layer(k=baec,q=baec,v=baec, mask=mask_dead) # # # # # # # # # # actor # # # # # # # # # # # # at_bac = my_view(baec,[0,0,-1]) at_bac_hn = self.hyper_net(at_bac, hyper_x=obs_hfeature_norm) logits = self.policy_head(at_bac_hn) # choose action selector logit2act = self._logit2act_rsn if self.use_policy_resonance and self.is_resonance_active() else self._logit2act # apply action selector act, actLogProbs, distEntropy, probs = logit2act( logits, eval_mode=eval_mode, greedy=(test_mode or self.static), eval_actions=eval_act, avail_act=avail_act, eprsn=eprsn ) if not eval_mode: return act, 'vph', actLogProbs else: return 'vph', actLogProbs, distEntropy, probs, others def _logit2act_rsn(self, logits_agent_cluster, eval_mode, greedy, eval_actions=None, avail_act=None, eprsn=None): if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf()) act_dist = self.ccategorical.feed_logits(logits_agent_cluster) if not greedy: act = self.ccategorical.sample(act_dist, eprsn) if not eval_mode else eval_actions else: act = torch.argmax(act_dist.probs, axis=2) # the policy gradient loss will feedback from here actLogProbs = self._get_act_log_probs(act_dist, act) # sum up the log prob of all agents distEntropy = act_dist.entropy().mean(-1) if eval_mode else None return act, actLogProbs, distEntropy, act_dist.probs def _logit2act(self, logits_agent_cluster, eval_mode, greedy, eval_actions=None, avail_act=None, **kwargs): if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf()) act_dist = Categorical(logits = logits_agent_cluster) if not greedy: act = act_dist.sample() if not eval_mode else eval_actions else: act = torch.argmax(act_dist.probs, axis=2) actLogProbs = self._get_act_log_probs(act_dist, act) # the policy gradient loss will feedback from here # sum up the log prob of all agents distEntropy = act_dist.entropy().mean(-1) if eval_mode else None return act, actLogProbs, distEntropy, act_dist.probs @staticmethod def _get_act_log_probs(distribution, action): return distribution.log_prob(action.squeeze(-1)).unsqueeze(-1) class NetCentralCritic(nn.Module): def __init__(self, rawob_dim, n_action, **kwargs): super().__init__() self.update_cnt = nn.Parameter( torch.zeros(1, requires_grad=False, dtype=torch.long), requires_grad=False) self.use_normalization = AlgorithmConfig.use_normalization self.use_policy_resonance = AlgorithmConfig.policy_resonance self.n_action = n_action if self.use_policy_resonance: self.ccategorical = CCategorical(kwargs['stage_planner']) self.is_resonance_active = lambda: kwargs['stage_planner'].is_resonance_active() h_dim = AlgorithmConfig.net_hdim # observation normalization if self.use_normalization: self._batch_norm = DynamicNormFix(rawob_dim, only_for_last_dim=True, exclude_one_hot=True, exclude_nan=True) n_entity = AlgorithmConfig.n_entity_placeholder # # # # # # # # # # actor-critic share # # # # # # # # # # # # self.obs_encoder = nn.Sequential(nn.Linear(rawob_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim)) self.attention_layer = SimpleAttention(h_dim=h_dim) # # # # # # # # # # critic # # # # # # # # # # # # _size = n_entity * h_dim self.hyper_net = HyperNet(embed_dim=h_dim, hyper_input_dim=6, x_input_dim=_size) self.ct_encoder = nn.Sequential(nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim)) self.ct_attention_layer = SimpleAttention(h_dim=h_dim) self.get_value = nn.Sequential(nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True),nn.Linear(h_dim, 1)) self.is_recurrent = False self.apply(weights_init) return def estimate_state(self, obs=None, test_mode=None, eval_mode=False, eval_actions=None, avail_act=None, agent_ids=None, eprsn=None, obs_hfeature_critic=None): if self.use_normalization: if torch.isnan(obs).all(): pass else: obs = self._batch_norm(obs, freeze=(eval_mode or test_mode)) obs_hfeature_norm = obs_hfeature_critic mask_dead = torch.isnan(obs).any(-1) obs = torch.nan_to_num_(obs, 0) # replace dead agents' obs, from NaN to 0 # # # # # # # # # # actor-critic share # # # # # # # # # # # # baec = self.obs_encoder(obs) baec = self.attention_layer(k=baec,q=baec,v=baec, mask=mask_dead) # # # # # # # # # # critic # # # # # # # # # # # # ct_bac = my_view(baec,[0,0,-1]) ct_bac_hn = self.hyper_net(ct_bac, hyper_x=obs_hfeature_norm) ct_bac = self.ct_encoder(ct_bac_hn) ct_bac = self.ct_attention_layer(k=ct_bac,q=ct_bac,v=ct_bac) value = self.get_value(ct_bac) return value ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/ppo.py ================================================ import torch, math, traceback import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np from random import randint, sample from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler from UTIL.colorful import * from UTIL.tensor_ops import _2tensor, __hash__, __hashn__ from config import GlobalConfig as cfg from UTIL.gpu_share import GpuShareUnit from .ppo_sampler import TrajPoolSampler from VISUALIZE.mcom import mcom class PPO(): def __init__(self, policy_and_critic, ppo_config, mcv=None): self.policy_and_critic = policy_and_critic self.clip_param = ppo_config.clip_param self.ppo_epoch = ppo_config.ppo_epoch self.use_avail_act = ppo_config.ppo_epoch self.n_pieces_batch_division = ppo_config.n_pieces_batch_division self.value_loss_coef = ppo_config.value_loss_coef self.entropy_coef = ppo_config.entropy_coef self.max_grad_norm = ppo_config.max_grad_norm self.add_prob_loss = ppo_config.add_prob_loss self.prevent_batchsize_oom = ppo_config.prevent_batchsize_oom # self.freeze_body = ppo_config.freeze_body self.lr = ppo_config.lr self.all_parameter = list(policy_and_critic.named_parameters()) self.parameter = [p for p_name, p in self.all_parameter] # 535 # set learning rate differently? if ppo_config.type_agent_diff_lr: others_parameters = [v for k,v in self.all_parameter if '_nets_flat' not in k] adam_lr_list = [ {'params': list(n.parameters()), 'lr':self.lr*n.lr_div} for n in policy_and_critic._nets_flat_placeholder_ ] + [{'params': list(policy_and_critic._critic_central.parameters()), 'lr':self.lr}] # 33*3*5 + 40 = 535 assert sum([len(list(d['params'])) for d in adam_lr_list]) == len(self.all_parameter) self.optimizer = optim.Adam(adam_lr_list, lr=self.lr) else: self.optimizer = optim.Adam(self.parameter, lr=self.lr) self.g_update_delayer = 0 self.g_initial_value_loss = 0 # 轮流训练式 self.mcv = mcv self.ppo_update_cnt = 0 self.batch_size_reminder = True self.trivial_dict = {} assert self.n_pieces_batch_division == 1 self.gpu_share_unit = GpuShareUnit(cfg.device, gpu_party=cfg.gpu_party) self.mcv2 = mcom( path='%s/logger/ppo/'%cfg.logdir, image_path='%s/detail_reward.jpg'%cfg.logdir, rapid_flush=True, draw_mode=cfg.draw_mode, tag='[ppo.py]' ) self.mcv2.rec_init(color='g') def freeze_body(self): self.freeze_body = True self.at_parameter = [p for p_name, p in self.all_parameter if 'AT_policy_head' in p_name] self.at_optimizer = optim.Adam(self.at_parameter, lr=self.lr) self.ct_parameter = [p for p_name, p in self.all_parameter if 'CT_' in p_name] self.ct_optimizer = optim.Adam(self.ct_parameter, lr=self.lr*10.0) #(self.lr) print('change train object') def train_on_traj(self, traj_pool, task): while True: try: with self.gpu_share_unit: self.train_on_traj_(traj_pool, task) break # 运行到这说明显存充足 except RuntimeError as err: print(traceback.format_exc()) if self.prevent_batchsize_oom: # in some cases, reversing MaxSampleNum a single time is not enough if TrajPoolSampler.MaxSampleNum[-1] < 0: TrajPoolSampler.MaxSampleNum.pop(-1) assert TrajPoolSampler.MaxSampleNum[-1] > 0 TrajPoolSampler.MaxSampleNum[-1] = -1 print亮红('Insufficient gpu memory, using previous sample size !') else: assert False torch.cuda.empty_cache() def log_reward_rich(self, traj_pool, mcv2): tags = {} for traj in traj_pool: traj.reward_sum = sum(traj.reward[:,0]) gp_list = traj.gp_sel_summary[0] if (gp_list==0).all(): tag = 'frontend' if tag not in tags: tags[tag] = [] tags[tag].append(traj.reward_sum) else: gp = max(gp_list) wr = self.policy_and_critic.ckpg_info[gp-1]['win_rate'] tp = np.argmax(gp_list) tag = 'tp:%d wr:%.2f'%(tp, wr) if tag not in tags: tags[tag] = [] tags[tag].append(traj.reward_sum) tags = dict(sorted(tags.items())) for k in tags: mcv2.rec(np.array(tags[k]).mean(), k) mcv2.rec_show() def train_on_traj_(self, traj_pool, task): self.log_reward_rich(traj_pool, self.mcv2) ppo_valid_percent_list = [] sampler = TrajPoolSampler(n_div=1, traj_pool=traj_pool, flag=task, prevent_batchsize_oom=self.prevent_batchsize_oom, mcv=self.mcv) # before_training_hash = [__hashn__(t.parameters()) for t in (self.policy_and_critic._nets_flat_placeholder_)] for e in range(self.ppo_epoch): sample_iter = sampler.reset_and_get_iter() self.optimizer.zero_grad() # ! get traj fragment sample = next(sample_iter) # ! build graph, then update network loss_final, others = self.establish_pytorch_graph(task, sample, e) loss_final = loss_final*0.5 if e==0: print('[PPO.py] Memory Allocated %.2f GB'%(torch.cuda.memory_allocated()/1073741824)) loss_final.backward() # log ppo_valid_percent_list.append(others.pop('PPO valid percent').item()) self.log_trivial(dictionary=others); others = None nn.utils.clip_grad_norm_(self.parameter, self.max_grad_norm) self.optimizer.step() if ppo_valid_percent_list[-1] < 0.70: print亮黄('policy change too much, epoch terminate early'); break pass # finish all epoch update print亮黄(np.array(ppo_valid_percent_list)) self.log_trivial_finalize() net_updated = [any([p.grad is not None for p in t.parameters()]) for t in (self.policy_and_critic._nets_flat_placeholder_)] self.optimizer.zero_grad(set_to_none=True) self.ppo_update_cnt += 1 for updated, net in zip(net_updated, self.policy_and_critic._nets_flat_placeholder_): if updated: net.update_cnt.data[0] = self.ppo_update_cnt self.policy_and_critic.on_update(self.ppo_update_cnt) torch.cuda.empty_cache() return self.ppo_update_cnt def freeze_body(self): assert False, "function forbidden" self.freeze_body = True self.parameter_pv = [p_name for p_name, p in self.all_parameter if not any(p_name.startswith(kw) for kw in ('obs_encoder', 'attention_layer'))] self.parameter = [p for p_name, p in self.all_parameter if not any(p_name.startswith(kw) for kw in ('obs_encoder', 'attention_layer'))] self.optimizer = optim.Adam(self.parameter, lr=self.lr) print('change train object') def log_trivial(self, dictionary): for key in dictionary: if key not in self.trivial_dict: self.trivial_dict[key] = [] item = dictionary[key].item() if hasattr(dictionary[key], 'item') else dictionary[key] self.trivial_dict[key].append(item) def log_trivial_finalize(self, print=True): for key in self.trivial_dict: self.trivial_dict[key] = np.array(self.trivial_dict[key]) print_buf = ['[ppo.py] '] for key in self.trivial_dict: self.trivial_dict[key] = self.trivial_dict[key].mean() print_buf.append(' %s:%.3f, '%(key, self.trivial_dict[key])) if self.mcv is not None: self.mcv.rec(self.trivial_dict[key], key) if print: print紫(''.join(print_buf)) if self.mcv is not None: self.mcv.rec_show() self.trivial_dict = {} def establish_pytorch_graph(self, flag, sample, n): obs = _2tensor(sample['obs']) advantage = _2tensor(sample['advantage']) action = _2tensor(sample['action']) oldPi_actionLogProb = _2tensor(sample['actionLogProb']) real_value = _2tensor(sample['return']) hete_pick = _2tensor(sample['hete_pick']) hete_type = _2tensor(sample['hete_type']) gp_sel_summary = _2tensor(sample['gp_sel_summary']) avail_act = _2tensor(sample['avail_act']) if 'avail_act' in sample else None # batchsize = advantage.shape[0]#; print亮紫(batchsize) batch_agent_size = advantage.shape[0]*advantage.shape[1] assert flag == 'train' newPi_value, newPi_actionLogProb, entropy, probs, others = \ self.policy_and_critic.evaluate_actions( obs=obs, eval_actions=action, test_mode=False, avail_act=avail_act, hete_pick=hete_pick, hete_type=hete_type, gp_sel_summary=gp_sel_summary) entropy_loss = entropy.mean() n_actions = probs.shape[-1] if self.add_prob_loss: assert n_actions <= 15 # penalty_prob_line = (1/n_actions)*0.12 probs_loss = (penalty_prob_line - torch.clamp(probs, min=0, max=penalty_prob_line)).mean() if not self.add_prob_loss: probs_loss = torch.zeros_like(probs_loss) # dual clip ppo core E = newPi_actionLogProb - oldPi_actionLogProb E_clip = torch.zeros_like(E) E_clip = torch.where(advantage > 0, torch.clamp(E, max=np.log(1.0+self.clip_param)), E_clip) E_clip = torch.where(advantage < 0, torch.clamp(E, min=np.log(1.0-self.clip_param), max=np.log(5) ), E_clip) ratio = torch.exp(E_clip) policy_loss = -(ratio*advantage).mean() # add all loses value_loss = 0.5 * F.mse_loss(real_value, newPi_value) AT_net_loss = policy_loss - entropy_loss*self.entropy_coef # + probs_loss*20 CT_net_loss = value_loss * 1.0 # AE_new_loss = ae_loss * 1.0 loss_final = AT_net_loss + CT_net_loss # + AE_new_loss ppo_valid_percent = ((E_clip == E).int().sum()/batch_agent_size) nz_mask = real_value!=0 value_loss_abs = (real_value[nz_mask] - newPi_value[nz_mask]).abs().mean() others = { 'Value loss Abs': value_loss_abs, 'PPO valid percent': ppo_valid_percent, 'CT_net_loss': CT_net_loss, 'AT_net_loss': AT_net_loss, } return loss_final, others ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/ppo_sampler.py ================================================ import torch, math, traceback import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np from random import randint, sample from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler from UTIL.colorful import * from UTIL.tensor_ops import _2tensor, __hash__, repeat_at from config import GlobalConfig as cfg from UTIL.gpu_share import GpuShareUnit class TrajPoolSampler(): def __init__(self, n_div, traj_pool, flag, prevent_batchsize_oom=False, mcv=None): self.n_pieces_batch_division = n_div self.prevent_batchsize_oom = prevent_batchsize_oom self.mcv = mcv if self.prevent_batchsize_oom: assert self.n_pieces_batch_division==1, ('?') self.num_batch = None self.container = {} self.warned = False assert flag=='train' req_dict = ['hete_type', 'gp_sel_summary', 'avail_act', 'obs', 'action', 'actionLogProb', 'return', 'reward', 'hete_pick', 'value'] req_dict_rename = ['hete_type', 'gp_sel_summary', 'avail_act', 'obs', 'action', 'actionLogProb', 'return', 'reward', 'hete_pick', 'state_value'] return_rename = "return" value_rename = "state_value" advantage_rename = "advantage" # replace 'obs' to 'obs > xxxx' for key_index, key in enumerate(req_dict): key_name = req_dict[key_index] key_rename = req_dict_rename[key_index] if not hasattr(traj_pool[0], key_name): real_key_list = [real_key for real_key in traj_pool[0].__dict__ if (key_name+'>' in real_key)] assert len(real_key_list) > 0, ('check variable provided!', key,key_index) for real_key in real_key_list: mainkey, subkey = real_key.split('>') req_dict.append(real_key) req_dict_rename.append(key_rename+'>'+subkey) self.big_batch_size = -1 # vector should have same length, check it! # load traj into a 'container' for key_index, key in enumerate(req_dict): key_name = req_dict[key_index] key_rename = req_dict_rename[key_index] if not hasattr(traj_pool[0], key_name): continue set_item = np.concatenate([getattr(traj, key_name) for traj in traj_pool], axis=0) if not (self.big_batch_size==set_item.shape[0] or (self.big_batch_size<0)): print('error') assert self.big_batch_size==set_item.shape[0] or (self.big_batch_size<0), (key,key_index) self.big_batch_size = set_item.shape[0] self.container[key_rename] = set_item # 指针赋值 # normalize advantage inside the batch self.container[advantage_rename] = self.container[return_rename] - self.container[value_rename] self.container[advantage_rename] = ( self.container[advantage_rename] - self.container[advantage_rename].mean() ) / (self.container[advantage_rename].std() + 1e-5) # size of minibatch for each agent self.mini_batch_size = math.ceil(self.big_batch_size / self.n_pieces_batch_division) def __len__(self): return self.n_pieces_batch_division def determine_max_n_sample(self): assert self.prevent_batchsize_oom if not hasattr(TrajPoolSampler,'MaxSampleNum'): # initialization TrajPoolSampler.MaxSampleNum = [int(self.big_batch_size*(i+1)/50) for i in range(50)] max_n_sample = self.big_batch_size elif TrajPoolSampler.MaxSampleNum[-1] > 0: # meaning that oom never happen, at least not yet # only update when the batch size increases if self.big_batch_size > TrajPoolSampler.MaxSampleNum[-1]: TrajPoolSampler.MaxSampleNum.append(self.big_batch_size) max_n_sample = self.big_batch_size else: # meaning that oom already happened, choose TrajPoolSampler.MaxSampleNum[-2] to be the limit assert TrajPoolSampler.MaxSampleNum[-2] > 0 max_n_sample = TrajPoolSampler.MaxSampleNum[-2] return max_n_sample def reset_and_get_iter(self): if not self.prevent_batchsize_oom: self.sampler = BatchSampler(SubsetRandomSampler(range(self.big_batch_size)), self.mini_batch_size, drop_last=False) else: max_n_sample = self.determine_max_n_sample() n_sample = min(self.big_batch_size, max_n_sample) if not hasattr(self,'reminded'): self.reminded = True drop_percent = (self.big_batch_size-n_sample)/self.big_batch_size*100 if self.mcv is not None: self.mcv.rec(drop_percent, 'drop percent') if drop_percent > 20: print_ = print亮红 print_('droping %.1f percent samples..'%(drop_percent)) assert False, "GPU OOM!" else: print_ = print print_('droping %.1f percent samples..'%(drop_percent)) self.sampler = BatchSampler(SubsetRandomSampler(range(n_sample)), n_sample, drop_last=False) for indices in self.sampler: selected = {} for key in self.container: selected[key] = self.container[key][indices] for key in [key for key in selected if '>' in key]: # 重新把子母键值组合成二重字典 mainkey, subkey = key.split('>') if not mainkey in selected: selected[mainkey] = {} selected[mainkey][subkey] = selected[key] del selected[key] yield selected ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/shell_env.py ================================================ import numpy as np from config import GlobalConfig from UTIL.colorful import * from UTIL.tensor_ops import my_view, __hash__, repeat_at, gather_righthand from MISSION.uhmap.actset_lookup import encode_action_as_digits from .foundation import AlgorithmConfig from .cython_func import roll_hisory from .hete_assignment import select_nets_for_shellenv class ShellEnvConfig: add_avail_act = False class ActionConvertLegacy(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.SELF_TEAM_ASSUME = SELF_TEAM_ASSUME self.OPP_TEAM_ASSUME = OPP_TEAM_ASSUME self.OPP_NUM_ASSUME = OPP_NUM_ASSUME # (main_cmd, sub_cmd, x=None, y=None, z=None, UID=None, T=None, T_index=None) self.dictionary_args = [ ('N/A', 'N/A', None, None, None, None, None, None), # 0 ('Idle', 'DynamicGuard', None, None, None, None, None, None), # 1 ('Idle', 'StaticAlert', None, None, None, None, None, None), # 2 ('Idle', 'AsFarAsPossible', None, None, None, None, None, None), # 4 ('Idle', 'StayWhenTargetInRange', None, None, None, None, None, None), # 5 ('SpecificMoving', 'Dir+X', None, None, None, None, None, None), # 7 ('SpecificMoving', 'Dir+Y', None, None, None, None, None, None), # 8 ('SpecificMoving', 'Dir-X', None, None, None, None, None, None), # 9 ('SpecificMoving', 'Dir-Y', None, None, None, None, None, None), # 10 ] for i in range(self.OPP_NUM_ASSUME): self.dictionary_args.append( ('SpecificAttacking', 'N/A', None, None, None, None, OPP_TEAM_ASSUME, i) ) def convert_act_arr(self, type, a): if type == 'RLA_UAV_Support': args = self.dictionary_args[a] # override wrong actions if args[0] == 'SpecificAttacking': return encode_action_as_digits('N/A', 'N/A', None, None, None, None, None, None) # override incorrect actions if args[0] == 'Idle': return encode_action_as_digits('Idle', 'StaticAlert', None, None, None, None, None, None) return encode_action_as_digits(*args) else: return encode_action_as_digits(*self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0 ENABLE = 1 n_act = len(self.dictionary_args) ret = np.zeros(n_act) + ENABLE for i in range(n_act): args = self.dictionary_args[i] # for all kind of agents if args[0] == 'PatrolMoving': ret[i] = DISABLE if type == 'RLA_UAV_Support': if args[0] == 'PatrolMoving': ret[i] = DISABLE if args[0] == 'SpecificAttacking': ret[i] = DISABLE if args[0] == 'Idle': ret[i] = DISABLE if args[1] == 'StaticAlert': ret[i] = ENABLE return ret def confirm_parameters_are_correct(self, team, agent_num, opp_agent_num): assert team == self.SELF_TEAM_ASSUME assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert opp_agent_num == self.OPP_NUM_ASSUME def count_list_type(x): type_cnt = {} for xx in x: if xx not in type_cnt: type_cnt[xx] = 0 type_cnt[xx] += 1 return len(type_cnt) class ShellEnvWrapper(object): def __init__(self, n_agent, n_thread, space, mcv, rl_functional, alg_config, ScenarioConfig, team): self.n_agent = n_agent self.n_thread = n_thread self.team = team self.space = space self.mcv = mcv self.rl_functional = rl_functional if GlobalConfig.ScenarioConfig.EntityOriented: self.core_dim = GlobalConfig.ScenarioConfig.obs_vec_length else: self.core_dim = space['obs_space']['obs_shape'] self.n_entity_placeholder = alg_config.n_entity_placeholder # whether to use avail_act to block forbiden actions self.AvailActProvided = False if hasattr(ScenarioConfig, 'AvailActProvided'): self.AvailActProvided = ScenarioConfig.AvailActProvided self.action_converter = ActionConvertLegacy( SELF_TEAM_ASSUME=team, OPP_TEAM_ASSUME=(1-team), OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team] ) # heterogeneous agent types agent_type_list = [a['type'] for a in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list] opp_type_list = [a['type'] for a in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list if a['team']!=self.team] self_type_list = [a['type'] for a in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list if a['team']==self.team] def str_array_to_num(str_arr): out_arr = [] buffer = {} for str in str_arr: if str not in buffer: buffer[str] = len(buffer) out_arr.append(buffer[str]) return out_arr self.HeteAgentType = str_array_to_num(agent_type_list) self.hete_type = np.array(self.HeteAgentType)[GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[team]] self.n_hete_types = count_list_type(self.hete_type) # check parameters assert self.n_agent == len(self_type_list) self.action_converter.confirm_parameters_are_correct(team, self.n_agent, len(opp_type_list)) self.patience = 2000 self.epsiode_cnt = 0 def cold_start_warmup(self, StateRecall): self.agent_uid = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team] self.agent_type = [agent_meta['type'] for agent_meta in StateRecall['Latest-Team-Info'][0]['dataArr'] if agent_meta['uId'] in self.agent_uid] if ShellEnvConfig.add_avail_act: self.avail_act = np.stack(tuple(self.action_converter.get_tp_avail_act(tp) for tp in self.agent_type)) self.avail_act = repeat_at(self.avail_act, insert_dim=0, n_times=self.n_thread) def interact_with_env(self, StateRecall): # warm up at first execution if not hasattr(self, 'agent_type'): self.cold_start_warmup(StateRecall) # action init to: -1 act = np.zeros(shape=(self.n_thread, self.n_agent), dtype=np.int) - 1 # read and reshape observation obs = StateRecall['Latest-Obs'] obs = my_view(obs,[0, 0, -1, self.core_dim]) # mask out invalid observation with NaN obs[(obs==0).all(-1)] = np.nan # stopped env mask P = StateRecall['ENV-PAUSE'] # running env mask R = ~P # reset env mask RST = StateRecall['Env-Suffered-Reset'] # when needed, train! if not StateRecall['Test-Flag']: self.rl_functional.train() # if true: just experienced full reset on all episode, this is the first step of all env threads if RST.all(): if AlgorithmConfig.allow_fast_test and GlobalConfig.test_only and (self.epsiode_cnt > GlobalConfig.report_reward_interval): import sys sys.exit(0) self.epsiode_cnt += self.n_thread # policy resonance eprsn_yita = self.rl_functional.stage_planner.yita if AlgorithmConfig.policy_resonance else 0 EpRsn = np.random.rand(self.n_thread) < eprsn_yita StateRecall['_EpRsn_'] = EpRsn # heterogeneous agent identification StateRecall['_hete_type_'] = repeat_at(self.hete_type, 0, self.n_thread) # select static/frontier actor network StateRecall['_hete_pick_'], StateRecall['_gp_pick_'] = select_nets_for_shellenv( n_types=self.n_hete_types, policy=self.rl_functional.policy, hete_type_list=self.hete_type, n_thread = self.n_thread, n_gp=AlgorithmConfig.hete_n_net_placeholder, testing=StateRecall['Test-Flag'] ) print([(t['win_rate'], t['ckpg_cnt']) for t in self.rl_functional.policy.ckpg_info]) # prepare observation for the real RL algorithm I_StateRecall = { 'obs':obs[R], 'avail_act':self.avail_act[R], 'Test-Flag':StateRecall['Test-Flag'], '_EpRsn_':StateRecall['_EpRsn_'][R], '_hete_pick_':StateRecall['_hete_pick_'][R], '_hete_type_':StateRecall['_hete_type_'][R], '_gp_pick_':StateRecall['_gp_pick_'][R], 'threads_active_flag':R, 'Latest-Team-Info':StateRecall['Latest-Team-Info'][R], } # load available act to limit action space if possible if self.AvailActProvided: avail_act = np.array([info['avail-act'] for info in np.array(StateRecall['Latest-Team-Info'][R], dtype=object)]) I_StateRecall.update({'avail_act':avail_act}) # the real RL algorithm ! ! act_active, internal_recall = self.rl_functional.interact_with_env_genuine(I_StateRecall) # get decision results act[R] = act_active # confirm actions are valid (satisfy 'avail-act') if ShellEnvConfig.add_avail_act and self.patience>0: self.patience -= 1 assert (gather_righthand(self.avail_act, repeat_at(act, -1, 1), check=False)[R]==1).all() # translate action into ue4 tuple action act_converted = np.array([[ self.action_converter.convert_act_arr(self.agent_type[agentid], act) for agentid, act in enumerate(th) ] for th in act]) # swap thread(batch) axis and agent axis actions_list = np.swapaxes(act_converted, 0, 1) # register callback hook if not StateRecall['Test-Flag']: StateRecall['_hook_'] = internal_recall['_hook_'] assert StateRecall['_hook_'] is not None else: if AlgorithmConfig.policy_matrix_testing: StateRecall['_hook_'] = internal_recall['_hook_'] assert StateRecall['_hook_'] is not None # all done return actions_list, StateRecall ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/stage_planner.py ================================================ import math from .foundation import AlgorithmConfig from UTIL.colorful import * class PolicyRsnConfig: resonance_start_at_update = 10 yita_min_prob = 0.15 # should be >= (1/n_action) yita_max = 0.75 yita_inc_per_update = 0.0075 # (increase to 0.75 in 500 updates) freeze_critic = False yita_shift_method = '-sin' yita_shift_cycle = 1000 class StagePlanner: def __init__(self, mcv) -> None: if AlgorithmConfig.policy_resonance: self.resonance_active = False self.yita = 0 self.yita_min_prob = PolicyRsnConfig.yita_min_prob self.freeze_body = False self.update_cnt = 0 self.mcv = mcv self.trainer = None if AlgorithmConfig.wait_norm_stable: self.wait_norm_stable_cnt = 2 else: self.wait_norm_stable_cnt = 0 return def is_resonance_active(self,): return self.resonance_active def is_body_freeze(self,): return self.freeze_body def get_yita(self): return self.yita def get_yita_min_prob(self): return PolicyRsnConfig.yita_min_prob def can_exec_trainning(self): if self.wait_norm_stable_cnt > 0: print亮绿('waiting initial normalization stable, skip training!') self.wait_norm_stable_cnt -= 1 return False else: return True def update_plan(self): self.update_cnt += 1 if AlgorithmConfig.policy_resonance: if self.resonance_active: self.when_pr_active() elif not self.resonance_active: self.when_pr_inactive() return def activate_pr(self): self.resonance_active = True self.freeze_body = True if PolicyRsnConfig.freeze_critic: self.trainer.freeze_body() def when_pr_inactive(self): assert not self.resonance_active if PolicyRsnConfig.resonance_start_at_update >= 0: # mean need to activate pr later if self.update_cnt > PolicyRsnConfig.resonance_start_at_update: # time is up, activate pr self.activate_pr() # log pr = 1 if self.resonance_active else 0 self.mcv.rec(pr, 'resonance') self.mcv.rec(self.yita, 'self.yita') def when_pr_active(self): assert self.resonance_active self._update_yita() # log pr = 1 if self.resonance_active else 0 self.mcv.rec(pr, 'resonance') self.mcv.rec(self.yita, 'self.yita') def _update_yita(self): ''' increase self.yita by @yita_inc_per_update per function call ''' if PolicyRsnConfig.yita_shift_method == '-cos': self.yita = PolicyRsnConfig.yita_max t = -math.cos(2*math.pi/PolicyRsnConfig.yita_shift_cycle * self.update_cnt) * PolicyRsnConfig.yita_max if t<=0: self.yita = 0 else: self.yita = t print亮绿('yita update:', self.yita) elif PolicyRsnConfig.yita_shift_method == '-sin': self.yita = PolicyRsnConfig.yita_max t = -math.sin(2*math.pi/PolicyRsnConfig.yita_shift_cycle * self.update_cnt) * PolicyRsnConfig.yita_max if t<=0: self.yita = 0 else: self.yita = t print亮绿('yita update:', self.yita) elif PolicyRsnConfig.yita_shift_method == 'slow-inc': self.yita += PolicyRsnConfig.yita_inc_per_update if self.yita > PolicyRsnConfig.yita_max: self.yita = PolicyRsnConfig.yita_max print亮绿('yita update:', self.yita) else: assert False ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/trajectory.py ================================================ # cython: language_level=3 from config import GlobalConfig import numpy as np from numpy.core.numeric import indices from .foundation import AlgorithmConfig from ALGORITHM.common.traj import TRAJ_BASE import copy from UTIL.colorful import * from UTIL.tensor_ops import __hash__, my_view, np_one_hot, np_repeat_at, np_softmax, scatter_with_nan class trajectory(TRAJ_BASE): def __init__(self, traj_limit, env_id): super().__init__(traj_limit, env_id) self.reference_track_name = 'value' def early_finalize(self): assert not self.readonly_lock # unfinished traj self.need_reward_bootstrap = True def set_terminal_obs(self, tobs): self.tobs = copy.deepcopy(tobs) def cut_tail(self): # 删去多余的预留空间 super().cut_tail() TJ = lambda key: getattr(self, key) # 进一步地, 根据这个轨迹上的NaN,删除所有无效时间点 reference_track = getattr(self, self.reference_track_name) if self.need_reward_bootstrap: assert False, ('it should not go here if everything goes as expected') # print('need_reward_bootstrap') 找到最后一个不是nan的位置 T = np.where(~np.isnan(reference_track.squeeze()))[0][-1] self.boot_strap_value = { 'bootstrap_value':TJ('value').squeeze()[T].copy(), } assert not hasattr(self,'tobs') self.set_terminal_obs(TJ('g_obs')[T].copy()) reference_track[T] = np.nan # deprecated if nothing in it p_invalid = np.isnan(my_view(reference_track, [0, -1])).any(axis=-1) p_valid = ~p_invalid if p_invalid.all(): #invalid traj self.deprecated_flag = True return # adjust reward position reward = TJ('reward') for i in reversed(range(self.time_pointer)): if p_invalid[i] and i != 0: # invalid, push reward forward reward[i-1] += reward[i]; reward[i] = np.nan setattr(self, 'reward', reward) # clip NaN for key in self.key_dict: setattr(self, key, TJ(key)[p_valid]) # all done return def reward_push_forward(self, dead_mask): # self.new_reward = self.reward.copy() if AlgorithmConfig.gamma_in_reward_forwarding: gamma = AlgorithmConfig.gamma_in_reward_forwarding_value for i in reversed(range(self.time_pointer)): if i==0: continue self.reward[i-1] += np.where(dead_mask[i], self.reward[i]*gamma, 0) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 self.reward[i] = np.where(dead_mask[i], 0, self.reward[i]) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 else: for i in reversed(range(self.time_pointer)): if i==0: continue self.reward[i-1] += np.where(dead_mask[i], self.reward[i], 0) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 self.reward[i] = np.where(dead_mask[i], 0, self.reward[i]) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 return # new finalize def finalize(self): self.readonly_lock = True assert not self.deprecated_flag TJ = lambda key: getattr(self, key) assert not np.isnan(TJ('reward')).any() # deadmask tmp = np.isnan(my_view(self.obs, [0,0,-1])) dead_mask = tmp.all(-1) # if (True): # check if the mask is correct # dead_mask_self = np.isnan(my_view(self.obs, [0,0,-1])[:,:,0]) # assert (dead_mask==dead_mask_self).all() # dead_mask2 = tmp.any(-1) # assert (dead_mask==dead_mask2).all() self.reward_push_forward(dead_mask) # push terminal reward forward 38 42 54 threat = np.zeros(shape=dead_mask.shape) - 1 assert dead_mask.shape[0] == self.time_pointer for i in reversed(range(self.time_pointer)): # threat[:(i+1)] 不包含threat[(i+1)] if i+1 < self.time_pointer: threat[:(i+1)] += (~(dead_mask[i+1]&dead_mask[i])).astype(np.int) elif i+1 == self.time_pointer: threat[:] += (~dead_mask[i]).astype(np.int) SAFE_LIMIT = 11 threat = np.clip(threat, -1, SAFE_LIMIT) setattr(self, 'threat', np.expand_dims(threat, -1)) # ! Use GAE to calculate return self.gae_finalize_return(reward_key='reward', value_key='value', new_return_name='return') return def gae_finalize_return(self, reward_key, value_key, new_return_name): # ------- gae parameters ------- gamma = AlgorithmConfig.gamma tau = AlgorithmConfig.tau # ------- -------------- ------- rewards = getattr(self, reward_key) value = getattr(self, value_key) length = rewards.shape[0] assert rewards.shape[0]==value.shape[0] # if dimension not aligned if rewards.ndim == value.ndim-1: rewards = np.expand_dims(rewards, -1) # initalize two more tracks setattr(self, new_return_name, np.zeros_like(value)) self.key_dict.append(new_return_name) returns = getattr(self, new_return_name) boot_strap = 0 if not self.need_reward_bootstrap else self.boot_strap_value['bootstrap_'+value_key] for step in reversed(range(length)): if step==(length-1): # 最后一帧 value_preds_delta = rewards[step] + gamma * boot_strap - value[step] gae = value_preds_delta else: value_preds_delta = rewards[step] + gamma * value[step + 1] - value[step] gae = value_preds_delta + gamma * tau * gae returns[step] = gae + value[step] class TrajPoolManager(object): def __init__(self): self.cnt = 0 def absorb_finalize_pool(self, pool): for traj_handle in pool: traj_handle.cut_tail() pool = list(filter(lambda traj: not traj.deprecated_flag, pool)) for traj_handle in pool: traj_handle.finalize() self.cnt += 1 task = ['train'] return task, pool ''' 轨迹池管理 ''' class TrajManagerBase(object): def __init__(self, n_env, traj_limit): self.n_env = n_env self.traj_limit = traj_limit self.update_cnt = 0 self.traj_pool = [] self.registered_keys = [] self.live_trajs = [trajectory(self.traj_limit, env_id=i) for i in range(self.n_env)] self.live_traj_frame = [0 for _ in range(self.n_env)] self._traj_lock_buf = None self.patience = 1000 pass def __check_integraty(self, traj_frag): if self.patience < 0: return # stop wasting time checking this self.patience -= 1 for key in traj_frag: if key not in self.registered_keys and (not key.startswith('_')): self.registered_keys.append(key) for key in self.registered_keys: assert key in traj_frag, ('this key sometimes disappears from the traj_frag:', key) def batch_update(self, traj_frag): self.__check_integraty(traj_frag) done = traj_frag['_DONE_']; traj_frag.pop('_DONE_') # done flag skip = traj_frag['_SKIP_']; traj_frag.pop('_SKIP_') # skip/frozen flag tobs = traj_frag['_TOBS_']; traj_frag.pop('_TOBS_') # terminal obs # single bool to list bool if isinstance(done, bool): done = [done for i in range(self.n_env)] if isinstance(skip, bool): skip = [skip for i in range(self.n_env)] n_active = sum(~skip) # feed cnt = 0 for env_i in range(self.n_env): if skip[env_i]: continue # otherwise frag_index = cnt; cnt += 1 env_index = env_i traj_handle = self.live_trajs[env_index] for key in traj_frag: self.traj_remember(traj_handle, key=key, content=traj_frag[key],frag_index=frag_index, n_active=n_active) self.live_traj_frame[env_index] += 1 traj_handle.time_shift() if done[env_i]: assert tobs[env_i] is not None # get the final obs traj_handle.set_terminal_obs(tobs[env_i]) self.traj_pool.append(traj_handle) self.live_trajs[env_index] = trajectory(self.traj_limit, env_id=env_index) self.live_traj_frame[env_index] = 0 def traj_remember(self, traj, key, content, frag_index, n_active): if content is None: traj.remember(key, None) elif isinstance(content, dict): for sub_key in content: self.traj_remember(traj, "".join((key , ">" , sub_key)), content=content[sub_key], frag_index=frag_index, n_active=n_active) else: assert n_active == len(content), ('length error') traj.remember(key, content[frag_index]) # * class BatchTrajManager(TrajManagerBase): def __init__(self, n_env, traj_limit, trainer_hook): super().__init__(n_env, traj_limit) self.trainer_hook = trainer_hook self.traj_limit = traj_limit self.train_traj_needed = AlgorithmConfig.train_traj_needed self.pool_manager = TrajPoolManager() def update(self, traj_frag, index): assert traj_frag is not None for j, env_i in enumerate(index): traj_handle = self.live_trajs[env_i] for key in traj_frag: if traj_frag[key] is None: assert False, key if isinstance(traj_frag[key], dict): # 如果是二重字典,特殊处理 for sub_key in traj_frag[key]: content = traj_frag[key][sub_key][j] traj_handle.remember(key + ">" + sub_key, content) else: content = traj_frag[key][j] traj_handle.remember(key, content) self.live_traj_frame[env_i] += 1 traj_handle.time_shift() return # 函数入口 def feed_traj_framedata(self, traj_frag, require_hook=False): # an unlock hook must be executed before new trajectory feed in assert self._traj_lock_buf is None if require_hook: # the traj_frag is not intact, lock up traj_frag, wait for more assert '_SKIP_' in traj_frag assert '_DONE_' not in traj_frag assert 'reward' not in traj_frag self._traj_lock_buf = traj_frag return self.unlock_fn else: assert '_DONE_' in traj_frag assert '_SKIP_' in traj_frag self.batch_update(traj_frag=traj_frag) return def clear_traj_pool(self): print('do update %d'%self.update_cnt) _, self.traj_pool = self.pool_manager.absorb_finalize_pool(pool=self.traj_pool) self.traj_pool = [] # self.update_cnt += 1 # assert ppo_update_cnt == self.update_cnt return self.update_cnt def train_and_clear_traj_pool(self): print('do update %d'%self.update_cnt) current_task_l, self.traj_pool = self.pool_manager.absorb_finalize_pool(pool=self.traj_pool) for current_task in current_task_l: ppo_update_cnt = self.trainer_hook(self.traj_pool, current_task) self.traj_pool = [] self.update_cnt += 1 # assert ppo_update_cnt == self.update_cnt return self.update_cnt def can_exec_training(self): if len(self.traj_pool) >= self.train_traj_needed: return True else: return False def unlock_fn(self, traj_frag): assert self._traj_lock_buf is not None traj_frag.update(self._traj_lock_buf) self._traj_lock_buf = None assert '_DONE_' in traj_frag assert '_SKIP_' in traj_frag self.batch_update(traj_frag=traj_frag) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/my_ai/foundation.py ================================================ import numpy as np from UTIL.colorful import * from UTIL.tensor_ops import my_view, __hash__ from config import GlobalConfig from MISSION.uhmap.actionset import strActionToDigits, ActDigitLen class AlgorithmConfig: preserve = '' class ReinforceAlgorithmFoundation(object): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.space = space self.mcv = mcv def interact_with_env(self, StateRecall): obs = StateRecall['Latest-Obs'] P = StateRecall['ENV-PAUSE'] active_thread_obs = obs[~P] actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) for env_index in range(self.n_thread): for agent_index in range(self.n_agent): if np.random.rand() < 0.5: color_index = np.random.randint(low=0, high=4) actions[env_index, agent_index] = strActionToDigits(f'ActionSetDemo::ChangeColor;{color_index}') else: uid = 11 if agent_index % 2 == 0 else 10 actions[env_index, agent_index] = strActionToDigits(f'ActionSetDemo::FireToWaterdrop;{uid}') StateRecall['_hook_'] = None return actions, StateRecall class DiscreteRLFoundation(object): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.space = space self.mcv = mcv self.action_list = [ strActionToDigits('ActionSetDemo::ChangeColor;0'), strActionToDigits('ActionSetDemo::ChangeColor;1'), strActionToDigits('ActionSetDemo::ChangeColor;2'), strActionToDigits('ActionSetDemo::ChangeColor;3'), strActionToDigits('ActionSetDemo::FireToWaterdrop;10'), strActionToDigits('ActionSetDemo::FireToWaterdrop;11'), strActionToDigits('ActionSetDemo::MoveToDirection;X=1.0 Y=0.0 Z=0.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=1.0 Y=1.0 Z=0.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=0.0 Y=1.0 Z=0.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=-1.0 Y=1.0 Z=0.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=-1.0 Y=0.0 Z=0.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=-1.0 Y=-1.0 Z=0.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=0.0 Y=-1.0 Z=0.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=1.0 Y=-1.0 Z=0.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=0.0 Y=0.0 Z=1.0'), strActionToDigits('ActionSetDemo::MoveToDirection;X=0.0 Y=0.0 Z=-1.0'), ] self.how_many_actions = len(self.action_list) def interact_with_env(self, StateRecall): obs = StateRecall['Latest-Obs'] P = StateRecall['ENV-PAUSE'] active_thread_obs = obs[~P] actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) for env_index in range(self.n_thread): for agent_index in range(self.n_agent): action_x = np.random.randint(low=0,high=self.how_many_actions) actions[env_index, agent_index] = self.action_list[action_x] StateRecall['_hook_'] = None return actions, StateRecall ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/ccategorical.py ================================================ from torch.distributions.categorical import Categorical import torch from .foundation import AlgorithmConfig from UTIL.tensor_ops import repeat_at, _2tensor from torch.distributions import kl_divergence EPS = 1e-9 # yita = p_hit = 0.14 def random_process(probs, rsn_flag): yita = AlgorithmConfig.yita with torch.no_grad(): max_place = probs.argmax(-1, keepdims=True) mask_max = torch.zeros_like(probs).scatter_(-1, max_place, 1).bool() pmax = probs[mask_max] if rsn_flag: assert max_place.shape[-1] == 1 return max_place.squeeze(-1) else: # forbit max prob being chosen, pmax = probs.max(axis=-1) p_hat = pmax + (pmax-1)/(1/yita-1) k = 1/(1-yita) #!!! write probs *= k #!!! write probs[mask_max] = p_hat # print(probs) dist = Categorical(probs=probs) samp = dist.sample() assert samp.shape[-1] != 1 return samp def random_process_allow_big_yita(probs, rsn_flag): yita = AlgorithmConfig.yita with torch.no_grad(): max_place = probs.argmax(-1, keepdims=True) mask_max = torch.zeros_like(probs).scatter_(-1, max_place, 1).bool() pmax = probs[mask_max].reshape(max_place.shape) #probs[max_place].clone() if rsn_flag: assert max_place.shape[-1] == 1 return max_place.squeeze(-1) else: # forbit max prob being chosen # pmax = probs.max(axis=-1) #probs[max_place].clone() yita_arr = torch.ones_like(pmax)*yita yita_arr_clip = torch.minimum(pmax, yita_arr) # p_hat = pmax + (pmax-1) / (1/yita_arr_clip-1) + 1e-10 p_hat = (pmax-yita_arr_clip)/(1-yita_arr_clip) k = 1/(1-yita_arr_clip) probs *= k probs[mask_max] = p_hat.reshape(-1) # print(probs) dist = Categorical(probs=probs) samp = dist.sample() assert samp.shape[-1] != 1 return samp #.squeeze(-1) def random_process_with_clamp3(probs, yita, yita_min_prob, rsn_flag): with torch.no_grad(): max_place = probs.argmax(-1, keepdims=True) mask_max = torch.zeros_like(probs).scatter_(dim=-1, index=max_place, value=1).bool() pmax = probs[mask_max].reshape(max_place.shape) # act max assert max_place.shape[-1] == 1 act_max = max_place.squeeze(-1) # act samp yita_arr = torch.ones_like(pmax)*yita # p_hat = pmax + (pmax-1) / (1/yita_arr_clip-1) + 1e-10 p_hat = (pmax-yita_arr)/((1-yita_arr)+EPS) p_hat = p_hat.clamp(min=yita_min_prob) k = (1-p_hat)/((1-pmax)+EPS) probs *= k probs[mask_max] = p_hat.reshape(-1) dist = Categorical(probs=probs) act_samp = dist.sample() # assert act_samp.shape[-1] != 1 hit_e = _2tensor(rsn_flag) return torch.where(hit_e, act_max, act_samp) class CCategorical(): def __init__(self, planner): self.planner = planner pass def sample(self, dist, eprsn): probs = dist.probs.clone() return random_process_with_clamp3(probs, self.planner.yita, self.planner.yita_min_prob, eprsn) def register_rsn(self, rsn_flag): self.rsn_flag = rsn_flag def feed_logits(self, logits): try: return Categorical(logits=logits) except: print('error') ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/cython_func.pyx ================================================ import numpy as np cimport numpy as np cimport cython from cython.parallel import prange np.import_array() ctypedef fused DTYPE_float: np.float32_t np.float64_t ctypedef fused DTYPE_int64_t: np.int64_t np.int32_t # to compat Windows ctypedef np.uint8_t DTYPE_bool_t @cython.boundscheck(False) @cython.wraparound(False) @cython.nonecheck(False) def roll_hisory( DTYPE_float[:,:,:,:] obs_feed_new, DTYPE_float[:,:,:,:] prev_obs_feed, DTYPE_bool_t[:,:,:] valid_mask, DTYPE_int64_t[:,:] N_valid, DTYPE_float[:,:,:,:] next_his_pool): # how many threads cdef Py_ssize_t vmax = N_valid.shape[0] # how many agents cdef Py_ssize_t wmax = N_valid.shape[1] # how many entity subjects (including self @0) cdef Py_ssize_t max_obs_entity = obs_feed_new.shape[2] cdef int n_v, th, a, t, k, pointer for th in prange(vmax, nogil=True): # for each thread range -> prange for a in prange(wmax): # for each agent pointer = 0 # step 1 fill next_his_pool[0 ~ (nv-1)] with obs_feed_new[0 ~ max_obs_entity-1] for k in range(max_obs_entity): if valid_mask[th,a,k]: next_his_pool[th,a, pointer] = obs_feed_new[th,a, k] pointer = pointer + 1 # step 2 fill next_his_pool[nv ~ (max_obs_entity-1)] with prev_obs_feed[0 ~ (max_obs_entity-1-nv)] n_v = N_valid[th,a] for k in range(n_v, max_obs_entity): next_his_pool[th,a, k] = prev_obs_feed[th,a, k-n_v] return np.asarray(next_his_pool) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/div_tree.py ================================================ import torch import torch.nn as nn import numpy as np from ALGORITHM.common.mlp import LinearFinal from UTIL.tensor_ops import add_onehot_id_at_last_dim, add_onehot_id_at_last_dim_fixlen, repeat_at, _2tensor, gather_righthand, scatter_righthand class DivTree(nn.Module): # merge by MLP version def __init__(self, input_dim, h_dim, n_action): super().__init__() # to design a division tree, I need to get the total number of agents from .foundation import AlgorithmConfig self.n_agent = AlgorithmConfig.n_agent self.div_tree = get_division_tree(self.n_agent) self.n_level = len(self.div_tree) self.max_level = len(self.div_tree) - 1 self.current_level = 0 self.init_level = AlgorithmConfig.div_tree_init_level if self.init_level < 0: self.init_level = self.max_level self.current_level_floating = 0.0 get_net = lambda: nn.Sequential( nn.Linear(h_dim+self.n_agent, h_dim), nn.ReLU(inplace=True), LinearFinal(h_dim, n_action) ) # Note: this is NOT net defining for each agent # Instead, all agents starts from self.nets[0] self.nets = torch.nn.ModuleList(modules=[ get_net() for i in range(self.n_agent) ]) def set_to_init_level(self, auto_transfer=True): if self.init_level!=self.current_level: for i in range(self.current_level, self.init_level): self.change_div_tree_level(i+1, auto_transfer) def change_div_tree_level(self, level, auto_transfer=True): print('performing div tree level change (%d -> %d/%d) \n'%(self.current_level, level, self.max_level)) self.current_level = level self.current_level_floating = level assert len(self.div_tree) > self.current_level, ('Reach max level already!') if not auto_transfer: return transfer_list = [] for i in range(self.n_agent): previous_net_index = self.div_tree[self.current_level-1, i] post_net_index = self.div_tree[self.current_level, i] if post_net_index!=previous_net_index: transfer = (previous_net_index, post_net_index) if transfer not in transfer_list: transfer_list.append(transfer) for transfer in transfer_list: from_which_net = transfer[0] to_which_net = transfer[1] self.nets[to_which_net].load_state_dict(self.nets[from_which_net].state_dict()) print('transfering model parameters from %d-th net to %d-th net'%(from_which_net, to_which_net)) return def forward(self, x_in, agent_ids): # x0: shape = (?,...,?, n_agent, core_dim) if self.current_level == 0: x0 = add_onehot_id_at_last_dim_fixlen(x_in, fixlen=self.n_agent, agent_ids=agent_ids) x2 = self.nets[0](x0) return x2, None else: x0 = add_onehot_id_at_last_dim_fixlen(x_in, fixlen=self.n_agent, agent_ids=agent_ids) res = [] for i in range(self.n_agent): use_which_net = self.div_tree[self.current_level, i] res.append(self.nets[use_which_net](x0[..., i, :])) x2 = torch.stack(res, -2) # x22 = self.nets[0](x1) return x2, None # def forward_try_parallel(self, x0): # x0: shape = (?,...,?, n_agent, core_dim) # x1 = self.shared_net(x0) # stream = [] # res = [] # for i in range(self.n_agent): # stream.append(torch.cuda.Stream()) # torch.cuda.synchronize() # for i in range(self.n_agent): # use_which_net = self.div_tree[self.current_level, i] # with torch.cuda.stream(stream[i]): # res.append(self.nets[use_which_net](x1[..., i, :])) # print(res[i]) # # s1 = torch.cuda.Stream() # # s2 = torch.cuda.Stream() # # # Wait for the above tensors to initialise. # # torch.cuda.synchronize() # # with torch.cuda.stream(s1): # # C = torch.mm(A, A) # # with torch.cuda.stream(s2): # # D = torch.mm(B, B) # # Wait for C and D to be computed. # torch.cuda.synchronize() # # Do stuff with C and D. # x2 = torch.stack(res, -2) # return x2 def _2div(arr): arr_res = arr.copy() arr_pieces = [] pa = 0 st = 0 needdivcnt = 0 for i, a in enumerate(arr): if a!=pa: arr_pieces.append([st, i]) if (i-st)!=1: needdivcnt+=1 pa = a st = i arr_pieces.append([st, len(arr)]) if (len(arr)-st)!=1: needdivcnt+=1 offset = range(len(arr_pieces), len(arr_pieces)+needdivcnt) p=0 for arr_p in arr_pieces: length = arr_p[1] - arr_p[0] if length == 1: continue half_len = int(np.ceil(length / 2)) for j in range(arr_p[0]+half_len, arr_p[1]): try: arr_res[j] = offset[p] except: print('wtf') p+=1 return arr_res def get_division_tree(n_agents): agent2divitreeindex = np.arange(n_agents) np.random.shuffle(agent2divitreeindex) max_div = np.ceil(np.log2(n_agents)).astype(int) levels = np.zeros(shape=(max_div+1, n_agents), dtype=int) tree_of_agent = []*(max_div+1) for ith, level in enumerate(levels): if ith == 0: continue res = _2div(levels[ith-1,:]) levels[ith,:] = res res_levels = levels.copy() for i, div_tree_index in enumerate(agent2divitreeindex): res_levels[:, i] = levels[:, div_tree_index] return res_levels ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/foundation.py ================================================ import os, time, torch, traceback, shutil import numpy as np from UTIL.colorful import * from config import GlobalConfig from UTIL.tensor_ops import repeat_at from ALGORITHM.common.rl_alg_base import RLAlgorithmBase class AlgorithmConfig: ''' AlgorithmConfig: This config class will be 'injected' with new settings from json. (E.g., override configs with ```python main.py --cfg example.jsonc```) (please see UTIL.config_args to find out how this advanced trick works out.) ''' # configuration, open to jsonc modification gamma = 0.99 tau = 0.95 train_traj_needed = 512 TakeRewardAsUnity = False use_normalization = True add_prob_loss = False n_entity_placeholder = 10 load_checkpoint = False load_specific_checkpoint = '' # PPO part clip_param = 0.2 ppo_epoch = 16 n_pieces_batch_division = 1 value_loss_coef = 0.1 entropy_coef = 0.05 max_grad_norm = 0.5 clip_param = 0.2 lr = 1e-4 # sometimes the episode length gets longer, # resulting in more samples and causing GPU OOM, # prevent this by fixing the number of samples to initial # by randomly sampling and droping prevent_batchsize_oom = False gamma_in_reward_forwarding = False gamma_in_reward_forwarding_value = 0.99 net_hdim = 24 dual_conc = True n_entity_placeholder = 'auto load, do not change' n_agent = 'auto load, do not change' entity_distinct = 'auto load, do not change' ConfigOnTheFly = True policy_resonance = False use_avail_act = True debug = False def str_array_to_num(str_arr): out_arr = [] buffer = {} for str in str_arr: if str not in buffer: buffer[str] = len(buffer) out_arr.append(buffer[str]) return out_arr def itemgetter(*items): # same with operator.itemgetter def g(obj): return tuple(obj[item] if item in obj else None for item in items) return g class ReinforceAlgorithmFoundation(RLAlgorithmBase): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from .shell_env import ShellEnvWrapper, ActionConvertLegacy from .net import Net super().__init__(n_agent, n_thread, space, mcv, team) AlgorithmConfig.n_agent = n_agent # change obs format, e.g., converting dead agent obs into NaN self.shell_env = ShellEnvWrapper(n_agent, n_thread, space, mcv, self, AlgorithmConfig, GlobalConfig.ScenarioConfig, self.team) n_actions = len(self.shell_env.action_converter.dictionary_args) if self.ScenarioConfig.EntityOriented: AlgorithmConfig.n_entity_placeholder = GlobalConfig.ScenarioConfig.obs_n_entity rawob_dim = self.ScenarioConfig.obs_vec_length else: rawob_dim = space['obs_space']['obs_shape'] # self.StagePlanner, for policy resonance from .stage_planner import StagePlanner self.stage_planner = StagePlanner(mcv=mcv) # initialize policy self.policy = Net(rawob_dim=rawob_dim, n_action=n_actions, stage_planner=self.stage_planner) self.policy = self.policy.to(self.device) # initialize optimizer and trajectory (batch) manager from .ppo import PPO from .trajectory import BatchTrajManager self.trainer = PPO(self.policy, ppo_config=AlgorithmConfig, mcv=mcv) self.traj_manager = BatchTrajManager( n_env=n_thread, traj_limit=int(GlobalConfig.ScenarioConfig.MaxEpisodeStep), trainer_hook=self.trainer.train_on_traj) self.stage_planner.trainer = self.trainer # confirm that reward method is correct self.check_reward_type(AlgorithmConfig) # load checkpoints if needed self.load_model(AlgorithmConfig) # enable config_on_the_fly ability if AlgorithmConfig.ConfigOnTheFly: self._create_config_fly() def action_making(self, StateRecall, test_mode): # make sure hook is cleared assert ('_hook_' not in StateRecall) # read obs obs, threads_active_flag, avail_act, eprsn = \ itemgetter('obs', 'threads_active_flag', 'avail_act', '_EpRsn_')(StateRecall) # make sure obs is right assert obs is not None, ('Make sure obs is ok') assert len(obs) == sum(threads_active_flag), ('check batch size') # make sure avail_act is correct if AlgorithmConfig.use_avail_act: assert avail_act is not None # policy resonance flag reshape eprsn = repeat_at(eprsn, -1, self.n_agent) thread_index = np.arange(self.n_thread)[threads_active_flag] # make decision with torch.no_grad(): action, value, action_log_prob = self.policy.act(obs=obs, test_mode=test_mode, avail_act=avail_act, eprsn=eprsn, ) # commit obs to buffer, vars named like _x_ are aligned, others are not! traj_framefrag = { "_SKIP_": ~threads_active_flag, "value": value, "avail_act": avail_act, "actionLogProb": action_log_prob, "obs": obs, "action": action, } if avail_act is not None: traj_framefrag.update({'avail_act': avail_act}) # deal with rollout later when the reward is ready, leave a hook as a callback here if not test_mode: StateRecall['_hook_'] = self.commit_traj_frag(traj_framefrag, req_hook = True) return action.copy(), StateRecall def interact_with_env(self, StateRecall): ''' Interfacing with marl, standard method that you must implement (redirect to shell_env to help with history rolling) ''' return self.shell_env.interact_with_env(StateRecall) def interact_with_env_genuine(self, StateRecall): ''' When shell_env finish the preparation, interact_with_env_genuine is called (Determine whether or not to do a training routinue) ''' # if not StateRecall['Test-Flag']: self.train() # when needed, train! return self.action_making(StateRecall, StateRecall['Test-Flag']) def train(self): ''' Get event from hmp task runner, save model now! ''' if self.traj_manager.can_exec_training(): if self.stage_planner.can_exec_trainning(): self.traj_manager.train_and_clear_traj_pool() else: self.traj_manager.clear_traj_pool() # read configuration if AlgorithmConfig.ConfigOnTheFly: self._config_on_fly() # self.stage_planner.update_plan() def save_model(self, update_cnt, info=None): ''' save model now! save if triggered when: 1. Update_cnt = 50, 100, ... 2. Given info, indicating a hmp command 3. A flag file is detected, indicating a save command from human ''' if not os.path.exists('%s/history_cpt/' % GlobalConfig.logdir): os.makedirs('%s/history_cpt/' % GlobalConfig.logdir) # dir 1 pt_path = '%s/model.pt' % GlobalConfig.logdir print绿('saving model to %s' % pt_path) torch.save({ 'policy': self.policy.state_dict(), 'optimizer': self.trainer.optimizer.state_dict(), }, pt_path) # dir 2 info = str(update_cnt) if info is None else ''.join([str(update_cnt), '_', info]) pt_path2 = '%s/history_cpt/model_%s.pt' % (GlobalConfig.logdir, info) shutil.copyfile(pt_path, pt_path2) print绿('save_model fin') def load_model(self, AlgorithmConfig): ''' load model now ''' if AlgorithmConfig.load_checkpoint: manual_dir = AlgorithmConfig.load_specific_checkpoint ckpt_dir = '%s/model.pt' % GlobalConfig.logdir if manual_dir == '' else '%s/%s' % (GlobalConfig.logdir, manual_dir) cuda_n = 'cpu' if 'cpu' in self.device else self.device strict = True cpt = torch.load(ckpt_dir, map_location=cuda_n) self.policy.load_state_dict(cpt['policy'], strict=strict) # https://github.com/pytorch/pytorch/issues/3852 self.trainer.optimizer.load_state_dict(cpt['optimizer']) print黄('loaded checkpoint:', ckpt_dir) def process_framedata(self, traj_framedata): ''' hook is called when reward and next moment observation is ready, now feed them into trajectory manager. Rollout Processor | 准备提交Rollout, 以下划线开头和结尾的键值需要对齐(self.n_thread, ...) note that keys starting with _ must have shape (self.n_thread, ...), details see fn:mask_paused_env() ''' # strip info, since it is not array items_to_pop = ['info', 'Latest-Obs'] for k in items_to_pop: if k in traj_framedata: traj_framedata.pop(k) # the agent-wise reward is supposed to be the same, so averge them if self.ScenarioConfig.RewardAsUnity: traj_framedata['reward'] = repeat_at(traj_framedata['reward'], insert_dim=-1, n_times=self.n_agent) # change the name of done to be recognised (by trajectory manager) traj_framedata['_DONE_'] = traj_framedata.pop('done') traj_framedata['_TOBS_'] = traj_framedata.pop( 'Terminal-Obs-Echo') if 'Terminal-Obs-Echo' in traj_framedata else None # mask out pause thread traj_framedata = self.mask_paused_env(traj_framedata) # put the frag into memory self.traj_manager.feed_traj_framedata(traj_framedata) def mask_paused_env(self, frag): running = ~frag['_SKIP_'] if running.all(): return frag for key in frag: if not key.startswith('_') and hasattr(frag[key], '__len__') and len(frag[key]) == self.n_thread: frag[key] = frag[key][running] return frag def _create_config_fly(self): logdir = GlobalConfig.logdir self.input_file_dir = '%s/cmd_io.txt' % logdir if not os.path.exists(self.input_file_dir): with open(self.input_file_dir, 'w+', encoding='utf8') as f: f.writelines(["# Write cmd at next line: ", ""]) def _config_on_fly(self): if not os.path.exists(self.input_file_dir): return with open(self.input_file_dir, 'r', encoding='utf8') as f: cmdlines = f.readlines() cmdlines_writeback = [] any_change = False for cmdline in cmdlines: if cmdline.startswith('#') or cmdline=="\n" or cmdline==" \n": cmdlines_writeback.append(cmdline) else: any_change = True try: print亮绿('[foundation.py] ------- executing: %s ------'%cmdline) exec(cmdline) cmdlines_writeback.append('# [execute successfully]\t'+cmdline) except: print红(traceback.format_exc()) cmdlines_writeback.append('# [execute failed]\t'+cmdline) if any_change: with open(self.input_file_dir, 'w+', encoding='utf8') as f: f.writelines(cmdlines_writeback) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/net.py ================================================ import torch, math, copy import numpy as np import torch.nn as nn from torch.distributions.categorical import Categorical from UTIL.colorful import print亮绿 from UTIL.tensor_ops import Args2tensor_Return2numpy, Args2tensor, __hashn__, my_view from UTIL.tensor_ops import pt_inf from UTIL.exp_helper import changed from .ccategorical import CCategorical from .foundation import AlgorithmConfig from ALGORITHM.common.attention import SimpleAttention from ALGORITHM.common.norm import DynamicNormFix from ALGORITHM.common.net_manifest import weights_init """ network initialize """ class Net(nn.Module): def __init__(self, rawob_dim, n_action, **kwargs): super().__init__() self.update_cnt = nn.Parameter( torch.zeros(1, requires_grad=False, dtype=torch.long), requires_grad=False) self.use_normalization = AlgorithmConfig.use_normalization self.use_policy_resonance = AlgorithmConfig.policy_resonance self.n_action = n_action if self.use_policy_resonance: self.ccategorical = CCategorical(kwargs['stage_planner']) self.is_resonance_active = lambda: kwargs['stage_planner'].is_resonance_active() h_dim = AlgorithmConfig.net_hdim # observation normalization if self.use_normalization: self._batch_norm = DynamicNormFix(rawob_dim, only_for_last_dim=True, exclude_one_hot=True, exclude_nan=True) n_entity = AlgorithmConfig.n_entity_placeholder # # # # # # # # # # actor-critic share # # # # # # # # # # # # self.obs_encoder = nn.Sequential(nn.Linear(rawob_dim, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim)) self.attention_layer = SimpleAttention(h_dim=h_dim) # # # # # # # # # # actor # # # # # # # # # # # # _size = n_entity * h_dim self.policy_head = nn.Sequential( nn.Linear(_size, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim//2), nn.ReLU(inplace=True), nn.Linear(h_dim//2, self.n_action)) # # # # # # # # # # critic # # # # # # # # # # # # _size = n_entity * h_dim self.ct_encoder = nn.Sequential(nn.Linear(_size, h_dim), nn.ReLU(inplace=True), nn.Linear(h_dim, h_dim)) self.ct_attention_layer = SimpleAttention(h_dim=h_dim) self.get_value = nn.Sequential(nn.Linear(h_dim, h_dim), nn.ReLU(inplace=True),nn.Linear(h_dim, 1)) self.is_recurrent = False self.apply(weights_init) return @Args2tensor_Return2numpy def act(self, *args, **kargs): return self._act(*args, **kargs) @Args2tensor def evaluate_actions(self, *args, **kargs): return self._act(*args, **kargs, eval_mode=True) def _act(self, obs=None, test_mode=None, eval_mode=False, eval_actions=None, avail_act=None, agent_ids=None, eprsn=None): eval_act = eval_actions if eval_mode else None others = {} if self.use_normalization: if torch.isnan(obs).all(): pass else: obs = self._batch_norm(obs, freeze=(eval_mode or test_mode)) mask_dead = torch.isnan(obs).any(-1) obs = torch.nan_to_num_(obs, 0) # replace dead agents' obs, from NaN to 0 # # # # # # # # # # actor-critic share # # # # # # # # # # # # baec = self.obs_encoder(obs) baec = self.attention_layer(k=baec,q=baec,v=baec, mask=mask_dead) # # # # # # # # # # actor # # # # # # # # # # # # at_bac = my_view(baec,[0,0,-1]) logits = self.policy_head(at_bac) # choose action selector logit2act = self._logit2act_rsn if self.use_policy_resonance and self.is_resonance_active() else self._logit2act # apply action selector act, actLogProbs, distEntropy, probs = logit2act( logits, eval_mode=eval_mode, test_mode=test_mode, eval_actions=eval_act, avail_act=avail_act, eprsn=eprsn) # # # # # # # # # # critic # # # # # # # # # # # # ct_bac = my_view(baec,[0,0,-1]) ct_bac = self.ct_encoder(ct_bac) ct_bac = self.ct_attention_layer(k=ct_bac,q=ct_bac,v=ct_bac) value = self.get_value(ct_bac) if not eval_mode: return act, value, actLogProbs else: return value, actLogProbs, distEntropy, probs, others def _logit2act_rsn(self, logits_agent_cluster, eval_mode, test_mode, eval_actions=None, avail_act=None, eprsn=None): if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf()) act_dist = self.ccategorical.feed_logits(logits_agent_cluster) if not test_mode: act = self.ccategorical.sample(act_dist, eprsn) if not eval_mode else eval_actions else: act = torch.argmax(act_dist.probs, axis=2) # the policy gradient loss will feedback from here actLogProbs = self._get_act_log_probs(act_dist, act) # sum up the log prob of all agents distEntropy = act_dist.entropy().mean(-1) if eval_mode else None return act, actLogProbs, distEntropy, act_dist.probs def _logit2act(self, logits_agent_cluster, eval_mode, test_mode, eval_actions=None, avail_act=None, **kwargs): if avail_act is not None: logits_agent_cluster = torch.where(avail_act>0, logits_agent_cluster, -pt_inf()) act_dist = Categorical(logits = logits_agent_cluster) if not test_mode: act = act_dist.sample() if not eval_mode else eval_actions else: act = torch.argmax(act_dist.probs, axis=2) actLogProbs = self._get_act_log_probs(act_dist, act) # the policy gradient loss will feedback from here # sum up the log prob of all agents distEntropy = act_dist.entropy().mean(-1) if eval_mode else None return act, actLogProbs, distEntropy, act_dist.probs @staticmethod def _get_act_log_probs(distribution, action): return distribution.log_prob(action.squeeze(-1)).unsqueeze(-1) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/ppo.py ================================================ import torch, math, traceback import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np from random import randint, sample from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler from UTIL.colorful import * from UTIL.tensor_ops import _2tensor, __hash__, __hashn__ from config import GlobalConfig as cfg from UTIL.gpu_share import GpuShareUnit from .ppo_sampler import TrajPoolSampler from VISUALIZE.mcom import mcom class PPO(): def __init__(self, policy_and_critic, ppo_config, mcv=None): self.policy_and_critic = policy_and_critic self.clip_param = ppo_config.clip_param self.ppo_epoch = ppo_config.ppo_epoch self.use_avail_act = ppo_config.ppo_epoch self.n_pieces_batch_division = ppo_config.n_pieces_batch_division self.value_loss_coef = ppo_config.value_loss_coef self.entropy_coef = ppo_config.entropy_coef self.max_grad_norm = ppo_config.max_grad_norm self.add_prob_loss = ppo_config.add_prob_loss self.prevent_batchsize_oom = ppo_config.prevent_batchsize_oom # self.freeze_body = ppo_config.freeze_body self.lr = ppo_config.lr self.all_parameter = list(policy_and_critic.named_parameters()) # if not self.freeze_body: self.parameter = [p for p_name, p in self.all_parameter] self.optimizer = optim.Adam(self.parameter, lr=self.lr) self.g_update_delayer = 0 self.g_initial_value_loss = 0 # 轮流训练式 self.mcv = mcv self.ppo_update_cnt = 0 self.batch_size_reminder = True self.trivial_dict = {} assert self.n_pieces_batch_division == 1 self.gpu_share_unit = GpuShareUnit(cfg.device, gpu_party=cfg.gpu_party) def train_on_traj(self, traj_pool, task): while True: try: with self.gpu_share_unit: self.train_on_traj_(traj_pool, task) break # 运行到这说明显存充足 except RuntimeError as err: print(traceback.format_exc()) if self.prevent_batchsize_oom: # in some cases, reversing MaxSampleNum a single time is not enough if TrajPoolSampler.MaxSampleNum[-1] < 0: TrajPoolSampler.MaxSampleNum.pop(-1) assert TrajPoolSampler.MaxSampleNum[-1] > 0 TrajPoolSampler.MaxSampleNum[-1] = -1 print亮红('Insufficient gpu memory, using previous sample size !') else: assert False torch.cuda.empty_cache() def train_on_traj_(self, traj_pool, task): ppo_valid_percent_list = [] sampler = TrajPoolSampler(n_div=1, traj_pool=traj_pool, flag=task, prevent_batchsize_oom=self.prevent_batchsize_oom, mcv=self.mcv) # before_training_hash = [__hashn__(t.parameters()) for t in (self.policy_and_critic._nets_flat_placeholder_)] for e in range(self.ppo_epoch): sample_iter = sampler.reset_and_get_iter() self.optimizer.zero_grad() # ! get traj fragment sample = next(sample_iter) # ! build graph, then update network loss_final, others = self.establish_pytorch_graph(task, sample, e) loss_final = loss_final*0.5 if e==0: print('[PPO.py] Memory Allocated %.2f GB'%(torch.cuda.memory_allocated()/1073741824)) loss_final.backward() # log ppo_valid_percent_list.append(others.pop('PPO valid percent').item()) self.log_trivial(dictionary=others); others = None nn.utils.clip_grad_norm_(self.parameter, self.max_grad_norm) self.optimizer.step() if ppo_valid_percent_list[-1] < 0.70: print亮黄('policy change too much, epoch terminate early'); break pass # finish all epoch update print亮黄(np.array(ppo_valid_percent_list)) self.log_trivial_finalize() self.ppo_update_cnt += 1 return self.ppo_update_cnt def freeze_body(self): assert False, "function forbidden" self.freeze_body = True self.parameter_pv = [p_name for p_name, p in self.all_parameter if not any(p_name.startswith(kw) for kw in ('obs_encoder', 'attention_layer'))] self.parameter = [p for p_name, p in self.all_parameter if not any(p_name.startswith(kw) for kw in ('obs_encoder', 'attention_layer'))] self.optimizer = optim.Adam(self.parameter, lr=self.lr) print('change train object') def log_trivial(self, dictionary): for key in dictionary: if key not in self.trivial_dict: self.trivial_dict[key] = [] item = dictionary[key].item() if hasattr(dictionary[key], 'item') else dictionary[key] self.trivial_dict[key].append(item) def log_trivial_finalize(self, print=True): for key in self.trivial_dict: self.trivial_dict[key] = np.array(self.trivial_dict[key]) print_buf = ['[ppo.py] '] for key in self.trivial_dict: self.trivial_dict[key] = self.trivial_dict[key].mean() print_buf.append(' %s:%.3f, '%(key, self.trivial_dict[key])) if self.mcv is not None: self.mcv.rec(self.trivial_dict[key], key) if print: print紫(''.join(print_buf)) if self.mcv is not None: self.mcv.rec_show() self.trivial_dict = {} def establish_pytorch_graph(self, flag, sample, n): obs = _2tensor(sample['obs']) advantage = _2tensor(sample['advantage']) action = _2tensor(sample['action']) oldPi_actionLogProb = _2tensor(sample['actionLogProb']) real_value = _2tensor(sample['return']) avail_act = _2tensor(sample['avail_act']) if 'avail_act' in sample else None # batchsize = advantage.shape[0]#; print亮紫(batchsize) batch_agent_size = advantage.shape[0]*advantage.shape[1] assert flag == 'train' newPi_value, newPi_actionLogProb, entropy, probs, others = \ self.policy_and_critic.evaluate_actions( obs=obs, eval_actions=action, test_mode=False, avail_act=avail_act) entropy_loss = entropy.mean() n_actions = probs.shape[-1] if self.add_prob_loss: assert n_actions <= 15 # penalty_prob_line = (1/n_actions)*0.12 probs_loss = (penalty_prob_line - torch.clamp(probs, min=0, max=penalty_prob_line)).mean() if not self.add_prob_loss: probs_loss = torch.zeros_like(probs_loss) # dual clip ppo core E = newPi_actionLogProb - oldPi_actionLogProb E_clip = torch.zeros_like(E) E_clip = torch.where(advantage > 0, torch.clamp(E, max=np.log(1.0+self.clip_param)), E_clip) E_clip = torch.where(advantage < 0, torch.clamp(E, min=np.log(1.0-self.clip_param), max=np.log(5) ), E_clip) ratio = torch.exp(E_clip) policy_loss = -(ratio*advantage).mean() # add all loses value_loss = 0.5 * F.mse_loss(real_value, newPi_value) AT_net_loss = policy_loss - entropy_loss*self.entropy_coef # + probs_loss*20 CT_net_loss = value_loss * 1.0 # AE_new_loss = ae_loss * 1.0 loss_final = AT_net_loss + CT_net_loss # + AE_new_loss ppo_valid_percent = ((E_clip == E).int().sum()/batch_agent_size) nz_mask = real_value!=0 value_loss_abs = (real_value[nz_mask] - newPi_value[nz_mask]).abs().mean() others = { 'Value loss Abs': value_loss_abs, 'PPO valid percent': ppo_valid_percent, 'CT_net_loss': CT_net_loss, 'AT_net_loss': AT_net_loss, } return loss_final, others ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/ppo_sampler.py ================================================ import torch, math, traceback import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np from random import randint, sample from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler from UTIL.colorful import * from UTIL.tensor_ops import _2tensor, __hash__, repeat_at from config import GlobalConfig as cfg from UTIL.gpu_share import GpuShareUnit class TrajPoolSampler(): def __init__(self, n_div, traj_pool, flag, prevent_batchsize_oom=False, mcv=None): self.n_pieces_batch_division = n_div self.prevent_batchsize_oom = prevent_batchsize_oom self.mcv = mcv if self.prevent_batchsize_oom: assert self.n_pieces_batch_division==1, ('?') self.num_batch = None self.container = {} self.warned = False assert flag=='train' req_dict = ['avail_act', 'obs', 'action', 'actionLogProb', 'return', 'reward', 'value'] req_dict_rename = ['avail_act', 'obs', 'action', 'actionLogProb', 'return', 'reward', 'state_value'] return_rename = "return" value_rename = "state_value" advantage_rename = "advantage" # replace 'obs' to 'obs > xxxx' for key_index, key in enumerate(req_dict): key_name = req_dict[key_index] key_rename = req_dict_rename[key_index] if not hasattr(traj_pool[0], key_name): real_key_list = [real_key for real_key in traj_pool[0].__dict__ if (key_name+'>' in real_key)] assert len(real_key_list) > 0, ('check variable provided!', key,key_index) for real_key in real_key_list: mainkey, subkey = real_key.split('>') req_dict.append(real_key) req_dict_rename.append(key_rename+'>'+subkey) self.big_batch_size = -1 # vector should have same length, check it! # load traj into a 'container' for key_index, key in enumerate(req_dict): key_name = req_dict[key_index] key_rename = req_dict_rename[key_index] if not hasattr(traj_pool[0], key_name): continue set_item = np.concatenate([getattr(traj, key_name) for traj in traj_pool], axis=0) if not (self.big_batch_size==set_item.shape[0] or (self.big_batch_size<0)): print('error') assert self.big_batch_size==set_item.shape[0] or (self.big_batch_size<0), (key,key_index) self.big_batch_size = set_item.shape[0] self.container[key_rename] = set_item # 指针赋值 # normalize advantage inside the batch self.container[advantage_rename] = self.container[return_rename] - self.container[value_rename] self.container[advantage_rename] = ( self.container[advantage_rename] - self.container[advantage_rename].mean() ) / (self.container[advantage_rename].std() + 1e-5) # size of minibatch for each agent self.mini_batch_size = math.ceil(self.big_batch_size / self.n_pieces_batch_division) def __len__(self): return self.n_pieces_batch_division def determine_max_n_sample(self): assert self.prevent_batchsize_oom if not hasattr(TrajPoolSampler,'MaxSampleNum'): # initialization TrajPoolSampler.MaxSampleNum = [int(self.big_batch_size*(i+1)/50) for i in range(50)] max_n_sample = self.big_batch_size elif TrajPoolSampler.MaxSampleNum[-1] > 0: # meaning that oom never happen, at least not yet # only update when the batch size increases if self.big_batch_size > TrajPoolSampler.MaxSampleNum[-1]: TrajPoolSampler.MaxSampleNum.append(self.big_batch_size) max_n_sample = self.big_batch_size else: # meaning that oom already happened, choose TrajPoolSampler.MaxSampleNum[-2] to be the limit assert TrajPoolSampler.MaxSampleNum[-2] > 0 max_n_sample = TrajPoolSampler.MaxSampleNum[-2] return max_n_sample def reset_and_get_iter(self): if not self.prevent_batchsize_oom: self.sampler = BatchSampler(SubsetRandomSampler(range(self.big_batch_size)), self.mini_batch_size, drop_last=False) else: max_n_sample = self.determine_max_n_sample() n_sample = min(self.big_batch_size, max_n_sample) if not hasattr(self,'reminded'): self.reminded = True drop_percent = (self.big_batch_size-n_sample)/self.big_batch_size*100 if self.mcv is not None: self.mcv.rec(drop_percent, 'drop percent') if drop_percent > 20: print_ = print亮红 print_('droping %.1f percent samples..'%(drop_percent)) assert False, "GPU OOM!" else: print_ = print print_('droping %.1f percent samples..'%(drop_percent)) self.sampler = BatchSampler(SubsetRandomSampler(range(n_sample)), n_sample, drop_last=False) for indices in self.sampler: selected = {} for key in self.container: selected[key] = self.container[key][indices] for key in [key for key in selected if '>' in key]: # 重新把子母键值组合成二重字典 mainkey, subkey = key.split('>') if not mainkey in selected: selected[mainkey] = {} selected[mainkey][subkey] = selected[key] del selected[key] yield selected ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/shell_env.py ================================================ import numpy as np from config import GlobalConfig from UTIL.colorful import * from UTIL.tensor_ops import my_view, __hash__, repeat_at, gather_righthand from MISSION.uhmap.actset_lookup import encode_action_as_digits from MISSION.uhmap.actionset_v3 import strActionToDigits, ActDigitLen from .foundation import AlgorithmConfig from .cython_func import roll_hisory class ShellEnvConfig: add_avail_act = False class ActionConvertPredatorPrey(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.dictionary_args = [ 'ActionSet4::MoveToDirection;X=1.0 Y=0.0 Z=0.0', 'ActionSet4::MoveToDirection;X=1.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=0.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=0.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=-1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=0.0 Y=-1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=1.0 Y=-1.0 Z=0.0', ] def convert_act_arr(self, type, a): return strActionToDigits(self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0 ENABLE = 1 n_act = len(self.dictionary_args) ret = np.zeros(n_act) + ENABLE return ret def confirm_parameters_are_correct(self, team, agent_num, opp_agent_num): pass class ActionConvertLegacy(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.SELF_TEAM_ASSUME = SELF_TEAM_ASSUME self.OPP_TEAM_ASSUME = OPP_TEAM_ASSUME self.OPP_NUM_ASSUME = OPP_NUM_ASSUME # (main_cmd, sub_cmd, x=None, y=None, z=None, UID=None, T=None, T_index=None) self.dictionary_args = [ ('N/A', 'N/A', None, None, None, None, None, None), # 0 ('Idle', 'DynamicGuard', None, None, None, None, None, None), # 1 ('Idle', 'StaticAlert', None, None, None, None, None, None), # 2 ('Idle', 'AsFarAsPossible', None, None, None, None, None, None), # 4 ('Idle', 'StayWhenTargetInRange', None, None, None, None, None, None), # 5 ('SpecificMoving', 'Dir+X', None, None, None, None, None, None), # 7 ('SpecificMoving', 'Dir+Y', None, None, None, None, None, None), # 8 ('SpecificMoving', 'Dir-X', None, None, None, None, None, None), # 9 ('SpecificMoving', 'Dir-Y', None, None, None, None, None, None), # 10 ] for i in range(self.OPP_NUM_ASSUME): self.dictionary_args.append( ('SpecificAttacking', 'N/A', None, None, None, None, OPP_TEAM_ASSUME, i) ) def convert_act_arr(self, type, a): if type == 'RLA_UAV_Support': args = self.dictionary_args[a] # override wrong actions if args[0] == 'SpecificAttacking': return encode_action_as_digits('N/A', 'N/A', None, None, None, None, None, None) # override incorrect actions if args[0] == 'Idle': return encode_action_as_digits('Idle', 'StaticAlert', None, None, None, None, None, None) return encode_action_as_digits(*args) else: return encode_action_as_digits(*self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0 ENABLE = 1 n_act = len(self.dictionary_args) ret = np.zeros(n_act) + ENABLE for i in range(n_act): args = self.dictionary_args[i] # for all kind of agents if args[0] == 'PatrolMoving': ret[i] = DISABLE if type == 'RLA_UAV_Support': if args[0] == 'PatrolMoving': ret[i] = DISABLE if args[0] == 'SpecificAttacking': ret[i] = DISABLE if args[0] == 'Idle': ret[i] = DISABLE if args[1] == 'StaticAlert': ret[i] = ENABLE return ret def confirm_parameters_are_correct(self, team, agent_num, opp_agent_num): assert team == self.SELF_TEAM_ASSUME assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert opp_agent_num == self.OPP_NUM_ASSUME def count_list_type(x): type_cnt = {} for xx in x: if xx not in type_cnt: type_cnt[xx] = 0 type_cnt[xx] += 1 return len(type_cnt) class ShellEnvWrapper(object): def __init__(self, n_agent, n_thread, space, mcv, rl_functional, alg_config, ScenarioConfig, team): self.n_agent = n_agent self.n_thread = n_thread self.team = team self.space = space self.mcv = mcv self.rl_functional = rl_functional if GlobalConfig.ScenarioConfig.EntityOriented: self.core_dim = GlobalConfig.ScenarioConfig.obs_vec_length else: self.core_dim = space['obs_space']['obs_shape'] self.n_entity_placeholder = alg_config.n_entity_placeholder # whether to use avail_act to block forbiden actions self.AvailActProvided = False if hasattr(ScenarioConfig, 'AvailActProvided'): self.AvailActProvided = ScenarioConfig.AvailActProvided if GlobalConfig.ScenarioConfig.SubTaskSelection in ['UhmapLargeScale', 'UhmapHuge', 'UhmapBreakingBad']: ActionToDiscreteConverter = ActionConvertLegacy else: ActionToDiscreteConverter = ActionConvertPredatorPrey self.action_converter = ActionToDiscreteConverter( SELF_TEAM_ASSUME=team, OPP_TEAM_ASSUME=(1-team), OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team] ) # check parameters self.patience = 2000 def interact_with_env(self, StateRecall): if not hasattr(self, 'agent_type'): self.agent_uid = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team] self.agent_type = [agent_meta['type'] for agent_meta in StateRecall['Latest-Team-Info'][0]['dataArr'] if agent_meta['uId'] in self.agent_uid] if ShellEnvConfig.add_avail_act: self.avail_act = np.stack(tuple(self.action_converter.get_tp_avail_act(tp) for tp in self.agent_type)) self.avail_act = repeat_at(self.avail_act, insert_dim=0, n_times=self.n_thread) act = np.zeros(shape=(self.n_thread, self.n_agent), dtype=np.int) - 1 # 初始化全部为 -1 # read internal coop graph info obs = StateRecall['Latest-Obs'] obs = my_view(obs,[0, 0, -1, self.core_dim]) obs[(obs==0).all(-1)] = np.nan n_entity_raw = obs.shape[-2] AlgorithmConfig.entity_distinct = [list(range(1)), list(range(1,n_entity_raw)), list(range(n_entity_raw,2*n_entity_raw))] P = StateRecall['ENV-PAUSE'] R = ~P RST = StateRecall['Env-Suffered-Reset'] # when needed, train! if not StateRecall['Test-Flag']: self.rl_functional.train() if RST.all(): # just experienced full reset on all episode, this is the first step of all env threads # randomly pick threads eprsn_yita = self.rl_functional.stage_planner.yita if AlgorithmConfig.policy_resonance else 0 EpRsn = np.random.rand(self.n_thread) < eprsn_yita StateRecall['_EpRsn_'] = EpRsn # prepare observation for the real RL algorithm obs_feed = obs[R] I_StateRecall = { 'obs':obs_feed, 'avail_act':self.avail_act[R], 'Test-Flag':StateRecall['Test-Flag'], '_EpRsn_':StateRecall['_EpRsn_'][R], 'threads_active_flag':R, 'Latest-Team-Info':StateRecall['Latest-Team-Info'][R], } # load available act to limit action space if possible if self.AvailActProvided: avail_act = np.array([info['avail-act'] for info in np.array(StateRecall['Latest-Team-Info'][R], dtype=object)]) I_StateRecall.update({'avail_act':avail_act}) # the real RL algorithm ! ! act_active, internal_recall = self.rl_functional.interact_with_env_genuine(I_StateRecall) # get decision results act[R] = act_active # confirm actions are valid (satisfy 'avail-act') if ShellEnvConfig.add_avail_act and self.patience>0: self.patience -= 1 assert (gather_righthand(self.avail_act, repeat_at(act, -1, 1), check=False)[R]==1).all() # translate action into ue4 tuple action act_converted = np.array([[ self.action_converter.convert_act_arr(self.agent_type[agentid], act) for agentid, act in enumerate(th) ] for th in act]) # swap thread(batch) axis and agent axis actions_list = np.swapaxes(act_converted, 0, 1) # register callback hook if not StateRecall['Test-Flag']: StateRecall['_hook_'] = internal_recall['_hook_'] assert StateRecall['_hook_'] is not None return actions_list, StateRecall ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/stage_planner.py ================================================ import math from .foundation import AlgorithmConfig from UTIL.colorful import * class PolicyRsnConfig: resonance_start_at_update = 10 yita_min_prob = 0.15 # should be >= (1/n_action) yita_max = 0.75 yita_inc_per_update = 0.0075 # (increase to 0.75 in 500 updates) freeze_critic = False yita_shift_method = '-sin' yita_shift_cycle = 1000 class StagePlanner: def __init__(self, mcv) -> None: if AlgorithmConfig.policy_resonance: self.resonance_active = False self.yita = 0 self.yita_min_prob = PolicyRsnConfig.yita_min_prob self.freeze_body = False self.update_cnt = 0 self.mcv = mcv self.trainer = None def is_resonance_active(self,): return self.resonance_active def is_body_freeze(self,): return self.freeze_body def get_yita(self): return self.yita def get_yita_min_prob(self): return PolicyRsnConfig.yita_min_prob def can_exec_trainning(self): return True def update_plan(self): self.update_cnt += 1 if AlgorithmConfig.policy_resonance: if self.resonance_active: self.when_pr_active() elif not self.resonance_active: self.when_pr_inactive() return def activate_pr(self): self.resonance_active = True self.freeze_body = True if PolicyRsnConfig.freeze_critic: self.trainer.freeze_body() def when_pr_inactive(self): assert not self.resonance_active if PolicyRsnConfig.resonance_start_at_update >= 0: # mean need to activate pr later if self.update_cnt > PolicyRsnConfig.resonance_start_at_update: # time is up, activate pr self.activate_pr() # log pr = 1 if self.resonance_active else 0 self.mcv.rec(pr, 'resonance') self.mcv.rec(self.yita, 'self.yita') def when_pr_active(self): assert self.resonance_active self._update_yita() # log pr = 1 if self.resonance_active else 0 self.mcv.rec(pr, 'resonance') self.mcv.rec(self.yita, 'self.yita') def _update_yita(self): ''' increase self.yita by @yita_inc_per_update per function call ''' if PolicyRsnConfig.yita_shift_method == '-cos': self.yita = PolicyRsnConfig.yita_max t = -math.cos(2*math.pi/PolicyRsnConfig.yita_shift_cycle * self.update_cnt) * PolicyRsnConfig.yita_max if t<=0: self.yita = 0 else: self.yita = t print亮绿('yita update:', self.yita) elif PolicyRsnConfig.yita_shift_method == '-sin': self.yita = PolicyRsnConfig.yita_max t = -math.sin(2*math.pi/PolicyRsnConfig.yita_shift_cycle * self.update_cnt) * PolicyRsnConfig.yita_max if t<=0: self.yita = 0 else: self.yita = t print亮绿('yita update:', self.yita) elif PolicyRsnConfig.yita_shift_method == 'slow-inc': self.yita += PolicyRsnConfig.yita_inc_per_update if self.yita > PolicyRsnConfig.yita_max: self.yita = PolicyRsnConfig.yita_max print亮绿('yita update:', self.yita) else: assert False ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/ppo_ma/trajectory.py ================================================ # cython: language_level=3 from config import GlobalConfig import numpy as np from numpy.core.numeric import indices from .foundation import AlgorithmConfig from ALGORITHM.common.traj import TRAJ_BASE import copy from UTIL.colorful import * from UTIL.tensor_ops import __hash__, my_view, np_one_hot, np_repeat_at, np_softmax, scatter_with_nan class trajectory(TRAJ_BASE): def __init__(self, traj_limit, env_id): super().__init__(traj_limit, env_id) self.reference_track_name = 'value' def early_finalize(self): assert not self.readonly_lock # unfinished traj self.need_reward_bootstrap = True def set_terminal_obs(self, tobs): self.tobs = copy.deepcopy(tobs) def cut_tail(self): # 删去多余的预留空间 super().cut_tail() TJ = lambda key: getattr(self, key) # 进一步地, 根据这个轨迹上的NaN,删除所有无效时间点 reference_track = getattr(self, self.reference_track_name) if self.need_reward_bootstrap: assert False, ('it should not go here if everything goes as expected') # print('need_reward_bootstrap') 找到最后一个不是nan的位置 T = np.where(~np.isnan(reference_track.squeeze()))[0][-1] self.boot_strap_value = { 'bootstrap_value':TJ('value').squeeze()[T].copy(), } assert not hasattr(self,'tobs') self.set_terminal_obs(TJ('g_obs')[T].copy()) reference_track[T] = np.nan # deprecated if nothing in it p_invalid = np.isnan(my_view(reference_track, [0, -1])).any(axis=-1) p_valid = ~p_invalid if p_invalid.all(): #invalid traj self.deprecated_flag = True return # adjust reward position reward = TJ('reward') for i in reversed(range(self.time_pointer)): if p_invalid[i] and i != 0: # invalid, push reward forward reward[i-1] += reward[i]; reward[i] = np.nan setattr(self, 'reward', reward) # clip NaN for key in self.key_dict: setattr(self, key, TJ(key)[p_valid]) # all done return def reward_push_forward(self, dead_mask): # self.new_reward = self.reward.copy() if AlgorithmConfig.gamma_in_reward_forwarding: gamma = AlgorithmConfig.gamma_in_reward_forwarding_value for i in reversed(range(self.time_pointer)): if i==0: continue self.reward[i-1] += np.where(dead_mask[i], self.reward[i]*gamma, 0) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 self.reward[i] = np.where(dead_mask[i], 0, self.reward[i]) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 else: for i in reversed(range(self.time_pointer)): if i==0: continue self.reward[i-1] += np.where(dead_mask[i], self.reward[i], 0) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 self.reward[i] = np.where(dead_mask[i], 0, self.reward[i]) # if dead_mask[i]==True, this frame is invalid, move reward forward, set self.reward[i] to 0 return # new finalize def finalize(self): self.readonly_lock = True assert not self.deprecated_flag TJ = lambda key: getattr(self, key) assert not np.isnan(TJ('reward')).any() # deadmask tmp = np.isnan(my_view(self.obs, [0,0,-1])) dead_mask = tmp.all(-1) # if (True): # check if the mask is correct # dead_mask_self = np.isnan(my_view(self.obs, [0,0,-1])[:,:,0]) # assert (dead_mask==dead_mask_self).all() # dead_mask2 = tmp.any(-1) # assert (dead_mask==dead_mask2).all() self.reward_push_forward(dead_mask) # push terminal reward forward 38 42 54 threat = np.zeros(shape=dead_mask.shape) - 1 assert dead_mask.shape[0] == self.time_pointer for i in reversed(range(self.time_pointer)): # threat[:(i+1)] 不包含threat[(i+1)] if i+1 < self.time_pointer: threat[:(i+1)] += (~(dead_mask[i+1]&dead_mask[i])).astype(np.int) elif i+1 == self.time_pointer: threat[:] += (~dead_mask[i]).astype(np.int) SAFE_LIMIT = 11 threat = np.clip(threat, -1, SAFE_LIMIT) setattr(self, 'threat', np.expand_dims(threat, -1)) # ! Use GAE to calculate return self.gae_finalize_return(reward_key='reward', value_key='value', new_return_name='return') return def gae_finalize_return(self, reward_key, value_key, new_return_name): # ------- gae parameters ------- gamma = AlgorithmConfig.gamma tau = AlgorithmConfig.tau # ------- -------------- ------- rewards = getattr(self, reward_key) value = getattr(self, value_key) length = rewards.shape[0] assert rewards.shape[0]==value.shape[0] # if dimension not aligned if rewards.ndim == value.ndim-1: rewards = np.expand_dims(rewards, -1) # initalize two more tracks setattr(self, new_return_name, np.zeros_like(value)) self.key_dict.append(new_return_name) returns = getattr(self, new_return_name) boot_strap = 0 if not self.need_reward_bootstrap else self.boot_strap_value['bootstrap_'+value_key] for step in reversed(range(length)): if step==(length-1): # 最后一帧 value_preds_delta = rewards[step] + gamma * boot_strap - value[step] gae = value_preds_delta else: value_preds_delta = rewards[step] + gamma * value[step + 1] - value[step] gae = value_preds_delta + gamma * tau * gae returns[step] = gae + value[step] class TrajPoolManager(object): def __init__(self): self.cnt = 0 def absorb_finalize_pool(self, pool): for traj_handle in pool: traj_handle.cut_tail() pool = list(filter(lambda traj: not traj.deprecated_flag, pool)) for traj_handle in pool: traj_handle.finalize() self.cnt += 1 task = ['train'] return task, pool ''' 轨迹池管理 ''' class TrajManagerBase(object): def __init__(self, n_env, traj_limit): self.n_env = n_env self.traj_limit = traj_limit self.update_cnt = 0 self.traj_pool = [] self.registered_keys = [] self.live_trajs = [trajectory(self.traj_limit, env_id=i) for i in range(self.n_env)] self.live_traj_frame = [0 for _ in range(self.n_env)] self._traj_lock_buf = None self.patience = 1000 pass def __check_integraty(self, traj_frag): if self.patience < 0: return # stop wasting time checking this self.patience -= 1 for key in traj_frag: if key not in self.registered_keys and (not key.startswith('_')): self.registered_keys.append(key) for key in self.registered_keys: assert key in traj_frag, ('this key sometimes disappears from the traj_frag:', key) def batch_update(self, traj_frag): self.__check_integraty(traj_frag) done = traj_frag['_DONE_']; traj_frag.pop('_DONE_') # done flag skip = traj_frag['_SKIP_']; traj_frag.pop('_SKIP_') # skip/frozen flag tobs = traj_frag['_TOBS_']; traj_frag.pop('_TOBS_') # terminal obs # single bool to list bool if isinstance(done, bool): done = [done for i in range(self.n_env)] if isinstance(skip, bool): skip = [skip for i in range(self.n_env)] n_active = sum(~skip) # feed cnt = 0 for env_i in range(self.n_env): if skip[env_i]: continue # otherwise frag_index = cnt; cnt += 1 env_index = env_i traj_handle = self.live_trajs[env_index] for key in traj_frag: self.traj_remember(traj_handle, key=key, content=traj_frag[key],frag_index=frag_index, n_active=n_active) self.live_traj_frame[env_index] += 1 traj_handle.time_shift() if done[env_i]: assert tobs[env_i] is not None # get the final obs traj_handle.set_terminal_obs(tobs[env_i]) self.traj_pool.append(traj_handle) self.live_trajs[env_index] = trajectory(self.traj_limit, env_id=env_index) self.live_traj_frame[env_index] = 0 def traj_remember(self, traj, key, content, frag_index, n_active): if content is None: traj.remember(key, None) elif isinstance(content, dict): for sub_key in content: self.traj_remember(traj, "".join((key , ">" , sub_key)), content=content[sub_key], frag_index=frag_index, n_active=n_active) else: assert n_active == len(content), ('length error') traj.remember(key, content[frag_index]) # * class BatchTrajManager(TrajManagerBase): def __init__(self, n_env, traj_limit, trainer_hook): super().__init__(n_env, traj_limit) self.trainer_hook = trainer_hook self.traj_limit = traj_limit self.train_traj_needed = AlgorithmConfig.train_traj_needed self.pool_manager = TrajPoolManager() def update(self, traj_frag, index): assert traj_frag is not None for j, env_i in enumerate(index): traj_handle = self.live_trajs[env_i] for key in traj_frag: if traj_frag[key] is None: assert False, key if isinstance(traj_frag[key], dict): # 如果是二重字典,特殊处理 for sub_key in traj_frag[key]: content = traj_frag[key][sub_key][j] traj_handle.remember(key + ">" + sub_key, content) else: content = traj_frag[key][j] traj_handle.remember(key, content) self.live_traj_frame[env_i] += 1 traj_handle.time_shift() return # 函数入口 def feed_traj_framedata(self, traj_frag, require_hook=False): # an unlock hook must be executed before new trajectory feed in assert self._traj_lock_buf is None if require_hook: # the traj_frag is not intact, lock up traj_frag, wait for more assert '_SKIP_' in traj_frag assert '_DONE_' not in traj_frag assert 'reward' not in traj_frag self._traj_lock_buf = traj_frag return self.unlock_fn else: assert '_DONE_' in traj_frag assert '_SKIP_' in traj_frag self.batch_update(traj_frag=traj_frag) return def clear_traj_pool(self): print('do update %d'%self.update_cnt) _, self.traj_pool = self.pool_manager.absorb_finalize_pool(pool=self.traj_pool) self.traj_pool = [] # self.update_cnt += 1 # assert ppo_update_cnt == self.update_cnt return self.update_cnt def train_and_clear_traj_pool(self): print('do update %d'%self.update_cnt) current_task_l, self.traj_pool = self.pool_manager.absorb_finalize_pool(pool=self.traj_pool) for current_task in current_task_l: ppo_update_cnt = self.trainer_hook(self.traj_pool, current_task) self.traj_pool = [] self.update_cnt += 1 # assert ppo_update_cnt == self.update_cnt return self.update_cnt def can_exec_training(self): if len(self.traj_pool) >= self.train_traj_needed: return True else: return False def unlock_fn(self, traj_frag): assert self._traj_lock_buf is not None traj_frag.update(self._traj_lock_buf) self._traj_lock_buf = None assert '_DONE_' in traj_frag assert '_SKIP_' in traj_frag self.batch_update(traj_frag=traj_frag) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/random/actionset.py ================================================ # ==================================================================== # random moving # ==================================================================== import numpy as np from MISSION.uhmap.actionset_v3 import strActionToDigits, ActDigitLen from MISSION.uhmap.actset_lookup import encode_action_as_digits class ActionConvertV1Dummy(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.SELF_TEAM_ASSUME = SELF_TEAM_ASSUME self.OPP_TEAM_ASSUME = OPP_TEAM_ASSUME self.OPP_NUM_ASSUME = OPP_NUM_ASSUME # (main_cmd, sub_cmd, x=None, y=None, z=None, UID=None, T=None, T_index=None) self.dictionary_args = [ 'ActionSet1::N/A;N/A' , ] self.ActDigitLen = ActDigitLen self.n_act = len(self.dictionary_args) def convert_act_arr(self, type, a): return strActionToDigits(self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0 ENABLE = 1 n_act = len(self.dictionary_args) ret = np.zeros(n_act) + ENABLE for i in range(n_act): args = self.dictionary_args[i] # for all kind of agents if args[0] == 'PatrolMoving': ret[i] = DISABLE if type == 'RLA_UAV_Support': if args[0] == 'PatrolMoving': ret[i] = DISABLE if args[0] == 'SpecificAttacking': ret[i] = DISABLE if args[0] == 'Idle': ret[i] = DISABLE if args[1] == 'StaticAlert': ret[i] = ENABLE return ret def confirm_parameters_are_correct(self, team, agent_num, opp_agent_num): assert team == self.SELF_TEAM_ASSUME assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert opp_agent_num == self.OPP_NUM_ASSUME class ActionConvertV1Carrier(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.SELF_TEAM_ASSUME = SELF_TEAM_ASSUME self.OPP_TEAM_ASSUME = OPP_TEAM_ASSUME self.OPP_NUM_ASSUME = OPP_NUM_ASSUME # (main_cmd, sub_cmd, x=None, y=None, z=None, UID=None, T=None, T_index=None) self.dictionary_args = [ 'ActionSet1::N/A;N/A' , 'ActionSet1::Special;Detach' , 'ActionSet1::Idle;DynamicGuard' , 'ActionSet1::Idle;StaticAlert' , 'ActionSet1::Idle;AggressivePersue' , 'ActionSet1::Idle;AsFarAsPossible' , 'ActionSet1::Idle;StayWhenTargetInRange' , 'ActionSet1::Idle;StayWhenTargetInHalfRange' , 'ActionSet1::SpecificMoving;Dir+X' , 'ActionSet1::SpecificMoving;Dir+Y' , 'ActionSet1::SpecificMoving;Dir-X' , 'ActionSet1::SpecificMoving;Dir-Y' , 'ActionSet1::PatrolMoving;Dir+X' , 'ActionSet1::PatrolMoving;Dir+Y' , 'ActionSet1::PatrolMoving;Dir-X' , 'ActionSet1::PatrolMoving;Dir-Y' , ] for i in range(self.OPP_NUM_ASSUME): self.dictionary_args.append( f'ActionSet1::SpecificAttacking;T{OPP_TEAM_ASSUME}-{i}') self.ActDigitLen = ActDigitLen self.n_act = len(self.dictionary_args) def convert_act_arr(self, type, a): return strActionToDigits(self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0 ENABLE = 1 n_act = len(self.dictionary_args) ret = np.zeros(n_act) + ENABLE for i in range(n_act): args = self.dictionary_args[i] # for all kind of agents if args[0] == 'PatrolMoving': ret[i] = DISABLE if type == 'RLA_UAV_Support': if args[0] == 'PatrolMoving': ret[i] = DISABLE if args[0] == 'SpecificAttacking': ret[i] = DISABLE if args[0] == 'Idle': ret[i] = DISABLE if args[1] == 'StaticAlert': ret[i] = ENABLE return ret def confirm_parameters_are_correct(self, team, agent_num, opp_agent_num): assert team == self.SELF_TEAM_ASSUME assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert opp_agent_num == self.OPP_NUM_ASSUME class ActionConvertV1Momentum(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.SELF_TEAM_ASSUME = SELF_TEAM_ASSUME self.OPP_TEAM_ASSUME = OPP_TEAM_ASSUME self.OPP_NUM_ASSUME = OPP_NUM_ASSUME # (main_cmd, sub_cmd, x=None, y=None, z=None, UID=None, T=None, T_index=None) self.dictionary_args = [ 'ActionSet1::MoveToDirection2D@Z;X=1.0 Y=0.0 Z=700.0', 'ActionSet1::MoveToDirection2D@Z;X=1.0 Y=1.0 Z=700.0', 'ActionSet1::MoveToDirection2D@Z;X=0.0 Y=1.0 Z=700.0', 'ActionSet1::MoveToDirection2D@Z;X=-1.0 Y=1.0 Z=700.0', 'ActionSet1::MoveToDirection2D@Z;X=-1.0 Y=0.0 Z=700.0', 'ActionSet1::MoveToDirection2D@Z;X=-1.0 Y=-1.0 Z=700.0', 'ActionSet1::MoveToDirection2D@Z;X=0.0 Y=-1.0 Z=700.0', 'ActionSet1::MoveToDirection2D@Z;X=1.0 Y=-1.0 Z=700.0', ] self.ActDigitLen = ActDigitLen self.n_act = len(self.dictionary_args) def convert_act_arr(self, type, a): return strActionToDigits(self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0 ENABLE = 1 n_act = len(self.dictionary_args) ret = np.zeros(n_act) + ENABLE return ret def confirm_parameters_are_correct(self, team, agent_num, opp_agent_num): assert team == self.SELF_TEAM_ASSUME assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert opp_agent_num == self.OPP_NUM_ASSUME class ActionConvertMovingV4(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.dictionary_args = [ 'ActionSet4::MoveToDirection;X=1.0 Y=0.0 Z=0.0', 'ActionSet4::MoveToDirection;X=1.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=0.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=0.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=-1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=0.0 Y=-1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=1.0 Y=-1.0 Z=0.0', ] self.n_act = len(self.dictionary_args) self.ActDigitLen = ActDigitLen def convert_act_arr(self, type, a): return strActionToDigits(self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0; ENABLE = 1 ret = np.zeros(self.n_act) + ENABLE # enable all return ret class CarrierAction(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.dictionary_args = [ 'ActionSet4::MoveToDirection;X=1.0 Y=0.0 Z=0.0', 'ActionSet4::MoveToDirection;X=1.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=0.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=0.0 Z=0.0', 'ActionSet4::MoveToDirection;X=-1.0 Y=-1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=0.0 Y=-1.0 Z=0.0', 'ActionSet4::MoveToDirection;X=1.0 Y=-1.0 Z=0.0', ] self.n_act = len(self.dictionary_args) self.ActDigitLen = ActDigitLen def convert_act_arr(self, type, a): return strActionToDigits(self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0; ENABLE = 1 ret = np.zeros(self.n_act) + ENABLE # enable all return ret class ActionConvertV2(): def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None: self.SELF_TEAM_ASSUME = SELF_TEAM_ASSUME self.OPP_TEAM_ASSUME = OPP_TEAM_ASSUME self.OPP_NUM_ASSUME = OPP_NUM_ASSUME # (main_cmd, sub_cmd, x=None, y=None, z=None, UID=None, T=None, T_index=None) self.dictionary_args = [ 'ActionSet2::N/A;N/A' , 'ActionSet2::Idle;DynamicGuard' , 'ActionSet2::Idle;StaticAlert' , 'ActionSet2::Idle;AggressivePersue' , 'ActionSet2::Idle;AsFarAsPossible' , 'ActionSet2::Idle;StayWhenTargetInRange' , 'ActionSet2::Idle;StayWhenTargetInHalfRange' , 'ActionSet2::SpecificMoving;Dir+X' , 'ActionSet2::SpecificMoving;Dir+Y' , 'ActionSet2::SpecificMoving;Dir-X' , 'ActionSet2::SpecificMoving;Dir-Y' , 'ActionSet2::PatrolMoving;Dir+X' , 'ActionSet2::PatrolMoving;Dir+Y' , 'ActionSet2::PatrolMoving;Dir-X' , 'ActionSet2::PatrolMoving;Dir-Y' , ] for i in range(self.OPP_NUM_ASSUME): self.dictionary_args.append( f'ActionSet2::SpecificAttacking;T{OPP_TEAM_ASSUME}-{i}') self.ActDigitLen = ActDigitLen self.n_act = len(self.dictionary_args) def convert_act_arr(self, type, a): return strActionToDigits(self.dictionary_args[a]) def get_tp_avail_act(self, type): DISABLE = 0 ENABLE = 1 n_act = len(self.dictionary_args) ret = np.zeros(n_act) + ENABLE for i in range(n_act): args = self.dictionary_args[i] # for all kind of agents if args[0] == 'PatrolMoving': ret[i] = DISABLE if type == 'RLA_UAV_Support': if args[0] == 'PatrolMoving': ret[i] = DISABLE if args[0] == 'SpecificAttacking': ret[i] = DISABLE if args[0] == 'Idle': ret[i] = DISABLE if args[1] == 'StaticAlert': ret[i] = ENABLE return ret def confirm_parameters_are_correct(self, team, agent_num, opp_agent_num): assert team == self.SELF_TEAM_ASSUME assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1 assert opp_agent_num == self.OPP_NUM_ASSUME ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/random/foundation.py ================================================ import numpy as np from UTIL.colorful import * from UTIL.tensor_ops import my_view, __hash__ from config import GlobalConfig class AlgorithmConfig: preserve = '' # 改变自身颜色的动作 ChangeColor(color_index) # 运动动作 MoveToDirection(x, y, z) # 提高一段时间的加速度的动作 AccHighLevel4 # 发射武器动作 FireToWaterDrop(water_drop_uid) class RandomController(object): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.space = space self.mcv = mcv self.n_action = GlobalConfig.ScenarioConfig.n_actions def interact_with_env(self, StateRecall): obs = StateRecall['Latest-Obs'] P = StateRecall['ENV-PAUSE'] active_thread_obs = obs[~P] actions = np.random.randint(low=0,high=self.n_action, size=(self.n_thread, self.n_agent, 1)) StateRecall['_hook_'] = None return actions, StateRecall class DummyRandomControllerWithActionSetV1(object): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from .actionset import ActionConvertV1Dummy self.n_agent = n_agent self.n_thread = n_thread self.space = space self.mcv = mcv self.actions_set = ActionConvertV1Dummy( SELF_TEAM_ASSUME=team, OPP_TEAM_ASSUME=(1-team), OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team] ) self.n_action = self.actions_set.n_act def interact_with_env(self, StateRecall): obs = StateRecall['Latest-Obs'] P = StateRecall['ENV-PAUSE'] active_thread_obs = obs[~P] actions = np.random.randint(low=0,high=self.n_action, size=(self.n_thread, self.n_agent, 1)) act_converted = np.array( list(map(lambda x: self.actions_set.convert_act_arr(None, x), actions.flatten()))).reshape(self.n_thread, self.n_agent, self.actions_set.ActDigitLen) StateRecall['_hook_'] = None return act_converted, StateRecall class RandomControllerWithActionSetV2(object): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from .actionset import ActionConvertV2 self.n_agent = n_agent self.n_thread = n_thread self.space = space self.mcv = mcv self.actions_set = ActionConvertV2( SELF_TEAM_ASSUME=team, OPP_TEAM_ASSUME=(1-team), OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team] ) self.n_action = self.actions_set.n_act def interact_with_env(self, StateRecall): obs = StateRecall['Latest-Obs'] P = StateRecall['ENV-PAUSE'] active_thread_obs = obs[~P] actions = np.random.randint(low=0,high=self.n_action, size=(self.n_thread, self.n_agent, 1)) act_converted = np.array( list(map(lambda x: self.actions_set.convert_act_arr(None, x), actions.flatten()))).reshape(self.n_thread, self.n_agent, self.actions_set.ActDigitLen) StateRecall['_hook_'] = None return act_converted, StateRecall class RandomControllerWithActionSetV4(object): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from .actionset import ActionConvertMovingV4 self.n_agent = n_agent self.n_thread = n_thread self.space = space self.mcv = mcv self.actions_set = ActionConvertMovingV4( SELF_TEAM_ASSUME=team, OPP_TEAM_ASSUME=(1-team), OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team] ) self.n_action = self.actions_set.n_act def interact_with_env(self, StateRecall): obs = StateRecall['Latest-Obs'] P = StateRecall['ENV-PAUSE'] active_thread_obs = obs[~P] actions = np.random.randint(low=0,high=self.n_action, size=(self.n_thread, self.n_agent, 1)) act_converted = np.array( list(map(lambda x: self.actions_set.convert_act_arr(None, x), actions.flatten()))).reshape(self.n_thread, self.n_agent, self.actions_set.ActDigitLen) StateRecall['_hook_'] = None return act_converted, StateRecall class RandomControllerWithActionSetV1(object): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from .actionset import ActionConvertV1Carrier self.n_agent = n_agent self.n_thread = n_thread self.space = space self.mcv = mcv self.actions_set = ActionConvertV1Carrier( SELF_TEAM_ASSUME=team, OPP_TEAM_ASSUME=(1-team), OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team] ) self.n_action = self.actions_set.n_act def interact_with_env(self, StateRecall): obs = StateRecall['Latest-Obs'] P = StateRecall['ENV-PAUSE'] active_thread_obs = obs[~P] actions = np.random.randint(low=0,high=self.n_action, size=(self.n_thread, self.n_agent, 1)) act_converted = np.array( list(map(lambda x: self.actions_set.convert_act_arr(None, x), actions.flatten()))).reshape(self.n_thread, self.n_agent, self.actions_set.ActDigitLen) StateRecall['_hook_'] = None return act_converted, StateRecall class RandomControllerWithMomentumAgent(object): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from .actionset import ActionConvertV1Momentum self.n_agent = n_agent self.n_thread = n_thread self.space = space self.mcv = mcv self.actions_set = ActionConvertV1Momentum( SELF_TEAM_ASSUME=team, OPP_TEAM_ASSUME=(1-team), OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team] ) self.n_action = self.actions_set.n_act def interact_with_env(self, StateRecall): obs = StateRecall['Latest-Obs'] P = StateRecall['ENV-PAUSE'] active_thread_obs = obs[~P] actions = np.random.randint(low=0,high=self.n_action, size=(self.n_thread, self.n_agent, 1)) act_converted = np.array( list(map(lambda x: self.actions_set.convert_act_arr(None, x), actions.flatten()))).reshape(self.n_thread, self.n_agent, self.actions_set.ActDigitLen) StateRecall['_hook_'] = None return act_converted, StateRecall ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/a_attackpost.py ================================================ import numpy as np from UTIL.colorful import * from UTIL.tensor_ops import my_view, __hash__ from config import GlobalConfig from MISSION.uhmap.actionset_v3 import strActionToDigits, ActDigitLen class AlgorithmConfig: preserve = '' class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.ScenarioConfig = GlobalConfig.ScenarioConfig self.attack_order = {} self.team = team def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class AttackPostPreprogramBaseline(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) self_agent_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team] for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue x_arr = np.array([d['agentLocationArr'][0] for d in np.array(State_Recall['Latest-Team-Info'][thread]['dataArr'])[self_agent_uid_range]]) x_arr_valid = np.array([x for x in x_arr if np.isfinite(x)]) x_avg = x_arr_valid.mean() for index, x in enumerate(x_arr): if not np.isfinite(x): pass if x > x_avg-1000: actions[thread, index] = strActionToDigits(f'ActionSet2::SpecificAttacking;T1-0') else: actions[thread, index] = strActionToDigits(f'ActionSet2::Idle;DynamicGuard') # actions[thread, :] = strActionToDigits(f'ActionSet2::SpecificAttacking;T1-0') # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan return actions, {} ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/a_escape.py ================================================ import numpy as np from UTIL.colorful import * from UTIL.tensor_ops import my_view, __hash__ from config import GlobalConfig from MISSION.uhmap.actionset_v3 import strActionToDigits, ActDigitLen class AlgorithmConfig: preserve = '' class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.ScenarioConfig = GlobalConfig.ScenarioConfig self.attack_order = {} self.team = team def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class EscapeGreenPreprogramBaseline(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) self_agent_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team] for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue x_arr = np.array([d['agentLocationArr'][0] for d in np.array(State_Recall['Latest-Team-Info'][thread]['dataArr'])[self_agent_uid_range]]) x_arr_valid = np.array([x for x in x_arr if np.isfinite(x)]) x_avg = x_arr_valid.mean() for index, x in enumerate(x_arr): if not np.isfinite(x): pass actions[thread, index] = strActionToDigits(f'ActionSet2::SpecificAttacking;T1-0') # actions[thread, :] = strActionToDigits(f'ActionSet2::SpecificAttacking;T1-0') # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan return actions, {} class EscapeRedPreprogramBaseline(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) self_agent_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team] for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue x_arr = np.array([d['agentLocationArr'][0] for d in np.array(State_Recall['Latest-Team-Info'][thread]['dataArr'])[self_agent_uid_range]]) x_arr_valid = np.array([x for x in x_arr if np.isfinite(x)]) x_avg = x_arr_valid.mean() for index, x in enumerate(x_arr): if not np.isfinite(x): pass if index <2: actions[thread, index] = strActionToDigits(f'ActionSet4::MoveToDirection;X=-1.0 Y=0.0 Z=0.0') else: actions[thread, index] = strActionToDigits(f'ActionSet4::MoveToDirection;X=+1.0 Y=0.0 Z=0.0') # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan return actions, {} ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/a_test_reproduce.py ================================================ import numpy as np from UTIL.colorful import * from UTIL.tensor_ops import my_view, __hash__ from config import GlobalConfig from MISSION.uhmap.actionset_v3 import strActionToDigits, ActDigitLen class AlgorithmConfig: preserve = '' class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.ScenarioConfig = GlobalConfig.ScenarioConfig self.attack_order = {} self.team = team def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} pre_def_color = [ '(R=1,G=0,B=0,A=1)', '(R=0,G=1,B=0,A=1)', '(R=0,G=0,B=1,A=1)', ] sel_l = [ [-8, -8, -4, -3, -5, -5, -4, -2, 0, 0, 1, 2, 3, 0, 4, 4, 3, 5, 6, 8, -7, -5, -6, -6, -3, -3, -2, -1, -1, 0, 1, 1, 0, 4, 5, 6, 5, 6, 6, 4, -7, -6, -5, -6, -3, -4, -3, -2, -1, 0, 0, -1, 0, 1, 4, 3, 5, 6, 5, 6, -7, -8, -5, -4, -4, -2, -1, 0, 0, -1, 0, 1, 2, 0, 3, 5, 2, 4, 4, 8, -7, -6, -5, -6, -3, -4, 0, -2, -1, 0, -1, -1, 1, 2, 1, 2, 6, 4, 3, 5, -7, -6, -5, -4, -4, -3, -3, -2, -3, 1, 1, 1, 3, 2, 2, 6, 5, 3, 5, 7, -5, -5, -2, -3, -4, -2, -4, -1, 0, -1, 0, 0, 2, 1, 5, 1, 2, 3, 6, 6, -7, -6, -7, -5, -4, -1, -2, -5, -2, -1, -1, 1, 1, 4, 3, 4, 4, 4, 5, 7, -8, -5, -4, -2, -3, -1, 0, -1, 1, -1, 2, -1, 3, 1, 0, 2, 5, 4, 4, 5, -6, -5, -3, -4, -3, -4, -2, 0, -1, -3, 0, 2, 2, -1, 2, 5, 5, 3, 5, 4], [-5, -6, -3, -4, -3, -4, -2, -2, -1, 1, -1, 2, 1, 2, 5, 4, 3, 2, 5, 8, -8, -8, -6, -2, -3, -2, -3, -1, 1, -2, 2, 1, 1, 3, 3, 3, 3, 4, 7, 6, -6, -5, -5, -7, -2, -2, -2, -4, -2, -1, 0, 1, 2, 2, 5, 3, 7, 5, 4, 7, -8, -8, -3, -4, -4, -4, -3, -3, -2, 1, -2, 1, 2, 1, 2, 4, 4, 5, 6, 7, -7, -6, -4, -3, -4, -3, -1, 1, -1, 0, 0, 0, 4, 2, 2, 3, 4, 5, 5, 5, -5, -5, -5, -3, -2, -2, -3, -2, -1, 0, 0, 2, 3, 3, 3, 2, 5, 5, 4, 6, -8, -6, -6, -3, -3, -2, 0, -2, -1, 2, 0, 2, 2, 2, 3, 2, 1, 4, 4, 7, -8, -6, -6, -3, -2, -3, -2, -1, 1, -1, 2, 3, 1, 2, 2, 3, 2, 3, 3, 8, -6, -6, -5, -2, -2, -2, -2, -1, 0, -1, -1, 2, 3, 2, 0, 3, 3, 5, 6, 8, -7, -5, -3, -5, -5, -4, -1, -2, 0, 0, 1, 1, 0, 1, 1, 3, 4, 3, 3, 5], [-8, -8, -5, -6, -1, -2, -2, 0, -2, -2, 0, 2, 2, 2, 5, 2, 3, 6, 7, 6, -8, -8, -4, -5, -4, -5, -2, -1, -1, -1, 1, 0, 3, 1, 3, 5, 5, 7, 5, 7, -7, -6, -5, -5, -7, -2, -1, 0, -1, -2, 1, 1, 0, 1, 3, 3, 6, 4, 5, 7, -8, -7, -6, -4, -3, -3, -2, -1, -1, -1, 0, 1, 0, 0, 3, 3, 4, 5, 5, 8, -6, -5, -6, -3, -4, -3, -3, -2, -1, 1, 0, 0, 1, 2, 2, 4, 5, 5, 4, 5, -8, -4, -7, -6, -3, -2, -3, -3, 1, 0, 0, 1, 1, 2, 2, 4, 4, 5, 6, 6, -5, -5, -3, -5, -4, -4, -1, -1, -1, -1, 0, 1, 4, 4, 6, 3, 4, 4, 5, 7, -6, -7, -5, -4, -3, -4, -1, -2, 0, -1, 1, 1, 1, 3, 2, 3, 4, 3, 3, 5, -7, -8, -5, -5, -3, -3, -3, -3, -2, 0, 0, 2, 1, 2, 3, 2, 3, 4, 7, 6, -8, -5, -4, -4, -4, -4, -1, -4, 0, -1, 1, 0, 0, 1, 4, 1, 3, 4, 6, 6], [-7, -6, -4, -6, -4, -4, -4, -2, -2, 1, -1, 1, 3, 3, 3, 4, 3, 6, 6, 8, -7, -5, -6, -7, -4, -3, -4, -2, 0, -1, 0, 2, 2, 0, 3, 4, 5, 5, 6, 7, -7, -6, -7, -3, -4, -3, -1, -5, -1, 0, -1, 1, 1, 2, 2, 3, 5, 5, 8, 6, -6, -6, -6, -4, -3, -2, -4, -2, -2, 2, 1, 1, 2, 0, 3, 4, 5, 5, 5, 7, -7, -5, -4, -3, -7, -2, -2, -2, -1, 0, 1, 1, 1, 4, 4, 4, 5, 4, 4, 6, -5, -5, -5, -4, -2, -3, -4, -1, 0, -1, -2, 1, 0, 2, 3, 3, 5, 6, 7, 6, -7, -5, -5, -2, -3, -3, -3, 1, 0, -2, 0, -1, 2, 2, 3, 4, 4, 4, 6, 7, -8, -6, -6, -4, -4, -2, -2, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 2, 5, 5, -6, -4, -5, -4, -3, -3, -1, -1, -2, -2, 0, 1, 2, 2, 4, 5, 6, 5, 6, 5, -7, -5, -4, -2, -3, -4, -2, -2, -2, -1, 2, 1, 1, 2, 3, 3, 4, 4, 4, 6], ] class TestReproduce(DummyAlgorithmBase): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): super().__init__(n_agent, n_thread, space, mcv, team) self.episode = -1 def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] RST = State_Recall['Env-Suffered-Reset'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') if all(RST): self.episode += 1 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) self_agent_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team] for thread in range(self.n_thread): if ENV_PAUSE[thread]: continue sel_l_ = [] # 1 x_arr_ = np.array([d['agentLocationArr'][0] for d in np.array(State_Recall['Latest-Team-Info'][thread]['dataArr'])[self_agent_uid_range]]) # 1 for a in range(self.n_agent): sel = sel_l[self.episode][a] # 2 sel_ = (x_arr_[a] + 35) // 70 # 1 sel_l_.append(int(sel_)) # 1 actions[thread, a] = strActionToDigits(f'ActionSet1::ChangeColor;{pre_def_color[int(sel)%3]}') print(sel_l[self.episode][:10], sel_l_[:10]) print(sel_l_) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan return actions, {} ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/assignment.py ================================================ import copy import random import numpy as np import datetime import time from ALGORITHM.script_ai.module_evaluation import * from ALGORITHM.script_ai.global_params import * class TaskAssign(object): """docstring for TaskAssign""" def __init__(self, attackers, drone, defenders): super(TaskAssign, self).__init__() self.attackers = attackers self.drone = drone self.defenders = defenders self.evaluator = Evaluation_module() # params self.ratio_thres = 1 # UGV attack(get defender target id) def assign_attack(self, attack_IDlist): # simple strategy -- one opponent max_stlist = [] max_idlist = [] def_idlist = [] for ID, attr in self.defenders.items(): if 'dead' not in attr['state']: def_idlist.append(ID) for att in attack_IDlist: inner_stance_list = [self.evaluator.UAV2UAV_id('offensive', self.attackers[att], defender) for defender in self.defenders.values() if 'dead' not in defender['state']] # print(inner_stance_list) inner_max_stance = max(inner_stance_list) max_stlist.append(inner_max_stance) max_idlist.append(inner_stance_list.index(inner_max_stance)) max_stance = max(max_stlist) target_id = def_idlist[max_idlist[max_stlist.index(max_stance)]] return target_id # # UAV hold(hold 1) # def assign_drone_initial(self, ): # # check for key points # all_defender_pos = [] # min_dist = [] # for attr in self.defenders.values(): # all_defender_pos.append([attr['X'], attr['Y']]) # if len(all_defender_pos)>0: # for key_point in key_points: # min_dist.append(min([np.linalg.norm(np.array(key_point)-np.array(enemy_pos)) for enemy_pos in all_defender_pos])) # if max(min_dist) > DRIVE_AWAY_DIST: # return ['running', min_dist.index(max(min_dist))] # else: # return ['running', 0] # else: # return ['running', 0] # UAV hold(running back and forth) def assign_drone_ini(self, ): # check for key points all_defender_pos = [] min_dist = [] for attr in self.defenders.values(): all_defender_pos.append([attr['X'], attr['Y']]) if len(all_defender_pos) > 0: for key_point in key_points: min_dist.append( min([np.linalg.norm(np.array(key_point) - np.array(enemy_pos)) for enemy_pos in all_defender_pos])) return ['running', min_dist.index(max(min_dist))] else: return ['running', 0] # all_defender_pos = [] # drone_dicvalue=[] # drone_pos=[] # min_dist = [] # for attr in self.defenders.values(): # all_defender_pos.append([attr['X'], attr['Y']]) # for attr in self.drone.values(): # drone_dicvalue.append(attr) # drone_pos.append([drone_dicvalue[2],drone_dicvalue[3]]) # drone_pos.append([drone_dicvalue[2], drone_dicvalue[3]]) # if len(all_defender_pos)>0: # for key_point in key_points: # drone2point=[np.linalg.norm(np.array(key_point) - np.array(pos)) for pos in drone_pos] # defender2point=[np.linalg.norm(np.array(key_point) - np.array(enemy_pos)) for enemy_pos in all_defender_pos] # min_dist.append(min([a/b for a,b in zip(drone2point,defender2point)])) # return ['running', min_dist.index(min(min_dist))] # else: # return ['running', 0] def judge_expeled(self, ): all_defender_pos = [] # drone_pos = [self.drone['X'], self.drone['Y']] key_point_idx = self.drone['state'][1] key_point_pos = key_points[key_point_idx] for attr in self.defenders.values(): all_defender_pos.append([attr['X'], attr['Y']]) min_dist = min([np.linalg.norm(np.array(key_point_pos) - np.array(def_pos)) for def_pos in all_defender_pos]) if min_dist < DRIVE_AWAY_DIST: return True else: return False # # UGV defend(negetive defend) # def assign_defend(self, def_ID): # defend = self.defenders[def_ID] # defend_pos = [defend['X'], defend['Y']] # all_attacker_pos = [] # all_attacker_ids = [] # for ID, attr in self.attackers.items(): # all_attacker_pos.append([attr['X'], attr['Y']]) # all_attacker_ids.append(ID) # if len(all_attacker_pos) > 0: # dist_list = [np.linalg.norm(np.array(defend_pos)-np.array(attack_pos)) for attack_pos in all_attacker_pos] # min_dist = min(dist_list) # if min_dist < DEFEND_DIST: # return ['attack', all_attacker_ids[dist_list.index(min_dist)]] # else: # return None # else: # return None # UGV defend(active defend) # def defend(self, def_ID): # attack_ids = [ID for ID, attr in self.attackers.items() if 'dead' not in attr['state']] # defend_ids = [ID for ID, attr in self.defenders.items() if 'dead' not in attr['state']] # # alive_attack = len(attack_ids) # alive_defend = len(defend_ids) # # if alive_attack > 0 and alive_defend > 0 and def_ID in defend_ids: # if alive_attack == alive_defend: # idx = defend_ids.index(def_ID) # return ['attack', attack_ids[idx]] # elif alive_attack < alive_defend: # return ['attack', attack_ids[0]] # else: # return ['attack', attack_ids[0]] # else: # return ['idle'] # def2att_ids = [defender['state'][1] for defender in self.defenders.values() if defender['state'][0] == 'attack'] # avail_ids = [ID for ID in attack_ids if ID not in def2att_ids] # # if len(avail_ids)>0: # return ['attack', avail_ids[0]] # else: # return ['attack', def2att_ids[0]] # # UGV expel(nearest assign) # def assign_expel(self, def_ID): # drone_pos = [self.drone['X'], self.drone['Y']] # drone_min_dist = [] # for key_point in key_points: # drone_min_dist.append(np.linalg.norm(np.array(key_point)-np.array(drone_pos))) # if min(drone_min_dist) < DRIVE_AWAY_DIST: # min_dist = 1000 # min_id = list(self.defenders.keys())[0] # key_point_idx = drone_min_dist.index(min(drone_min_dist)) # for ID, attr in self.defenders.items(): # expel_dist = np.linalg.norm(np.array(key_points[key_point_idx])-np.array([attr['X'], attr['Y']])) # if expel_dist < min_dist: # min_dist = expel_dist # min_id = ID # if def_ID == min_id: # return ['expel', key_point_idx] # else: # return None # else: # return None # UGV expel(both) def expel(self, ): drone_pos = [self.drone['X'], self.drone['Y']] drone_dist = [] for key_point in key_points: drone_dist.append(np.linalg.norm(np.array(key_point)-np.array(drone_pos))) # if min(drone_dist) < DRIVE_AWAY_DIST: return ['expel', drone_dist.index(min(drone_dist))] # else: # return None # defender assign def assign_defend(self, def_ID): alive_attackers = [attacker for attacker in self.attackers.values() if 'dead' not in attacker['state']] alive_attackers_ids = [ID for ID, attr in self.attackers.items() if 'dead' not in attr['state']] dist = [np.linalg.norm(np.array([att['X'], att['Y']]) - np.array([self.defenders[def_ID]['X'], self.defenders[def_ID]['Y']])) for att in alive_attackers] if len(dist) > 0 and min(dist) < DEFEND_DIST: idx = dist.index(min(dist)) return ['attack', alive_attackers_ids[idx]] else: return self.expel() # alive_attacker_list = [att for att in self.attackers.values() if 'dead' not in att['state']] # # if len(alive_attacker_list) > 0: # UGV_stance = [self.evaluator.UAV2UAV_id('offensive', attacker, self.defenders[def_ID]) for attacker in alive_attacker_list] # # print(inner_stance_list) # max_UGV_stance = max(UGV_stance) # # drone_stance = [self.evaluator.Drone2Point_id(self.drone, keyPoint) for keyPoint in key_points] # max_drone_stance = max(drone_stance) # # print('UGV stance: ', max_UGV_stance) # print('drone stance: ', max_drone_stance) # print('ratio: ', max_UGV_stance/max_drone_stance) # # ratio = max_UGV_stance/max_drone_stance # # if ratio > self.ratio_thres: # assigned_state = self.defend(def_ID) # else: # assigned_state = self.expel() # # else: # assigned_state = self.expel() # # return assigned_state # judge retreat for attackers def is_retreat(self, att_ID, def_ID): if 'dead' in self.attackers[att_ID]['state'] or 'dead' in self.defenders[def_ID]['state']: return False else: # # dist version # attacker_pos = [self.attackers[att_ID]['X'], self.attackers[att_ID]['Y']] # dist_list = [np.linalg.norm(np.array(attacker_pos) - np.array([defender['X'], defender['Y']])) for defender \ # in self.defenders.values()] # if min(dist_list) < 800: # return True # else: # return False # stance_version stance = self.evaluator.UAV2UAV_id('offensive', self.attackers[att_ID], self.defenders[def_ID]) if stance > RETREAT_STANCE: print('retreat stance: ', stance) return True else: return False def is_attack(self, att_ID): if 'dead' in self.attackers[att_ID]['state']: return False else: # # dist version # attacker_pos = [self.attackers[att_ID]['X'], self.attackers[att_ID]['Y']] # dist_list = [np.linalg.norm(np.array(attacker_pos) - np.array([defender['X'], defender['Y']])) for defender \ # in self.defenders.values()] # if min(dist_list) > 1000: # return True # else: # return False # stance version stance_list = [self.evaluator.UAV2UAV_id('offensive', self.attackers[att_ID], self.defenders[def_ID]) for def_ID in \ self.defenders.keys() if 'dead' not in self.defenders[def_ID]['state']] if max(stance_list) < RETREAT_STANCE: print('attack stance: ', max(stance_list)) return True else: print('retreat stance: ', max(stance_list)) return False # test # if __name__ == '__main__': # # # assigner = TaskAssign() # # attack_goals, defend_goals, avoid_goals, uav_point = align.assign_all(ally_agents_data, enemy_agents_data, key_points) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/blue_strategy.py ================================================ import numpy as np import math def offense_combat(self_data, ally_agents_data, enemy_agents_data, key_points, blue_alive, red_alive, agent_type): # 防守方red小车最大速度 red_car_max_vel = 600 # 进攻方blue小车最大速度 blue_car_max_vel = 600 # 进攻方blue无人机最大速度 blue_drone_max_vel = 600 # 进攻方无人机占领夺控点胜利时间 time_to_win = 2.0 # 驱离载荷作用范围 expel_range = 1200 # 无人车打击距离 fire_dist = 2000 all_enemy_agent_pos = [] for agent_id, dict_value in enemy_agents_data.items(): all_enemy_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) """ all_enemy_agent_yaw = [] for agent_id, dict_value in enemy_agents_data.items(): if agent_id != '231': all_enemy_agent_yaw.append([dict_value['Yaw']]) """ blue_drone_current_pos = np.array([[ally_agents_data['231']['X'], ally_agents_data['231']['Y'], ally_agents_data['231']['Z']]]) ally_agents_data.pop('231') if '211' in self_data.keys(): friend_agents_data = dict(self_data, **ally_agents_data) if '211' in ally_agents_data.keys(): friend_agents_data = dict(ally_agents_data, **self_data) all_friend_agent_pos = [] for agent_id, dict_value in friend_agents_data.items(): all_friend_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) """ all_friend_agent_yaw = [] for agent_id, dict_value in friend_agents_data.items(): all_friend_agent_yaw.append([dict_value['Yaw']]) """ target_location = np.array(key_points) red_car_current_pos = np.array(all_enemy_agent_pos) blue_car_current_pos = np.array(all_friend_agent_pos) blue_car_current_pos = blue_car_current_pos if agent_type == 0: drone_keypoint_relative_dist = np.array([[np.linalg.norm(blue_drone_current_pos[0][:2] - target_location[b][:2]) for b in range(2)]]) if red_alive[0] is True and red_alive[1] is True: dist_red_car_blue_drone = np.array( [[np.linalg.norm(red_car_current_pos[a][:2] - blue_drone_current_pos[0][:2])] for a in range(2)]) if np.min(dist_red_car_blue_drone) < expel_range:# and np.argmin(drone_keypoint_relative_dist) < 600: flag = 'defense' else: flag = 'offense' elif red_alive[0] is True and red_alive[1] is False: dist_red_car_blue_drone = np.linalg.norm(red_car_current_pos[0][:2] - blue_drone_current_pos[0][:2]) if dist_red_car_blue_drone < expel_range:# and np.argmin(drone_keypoint_relative_dist) < 600: flag = 'defense' else: flag = 'offense' elif red_alive[0] is False and red_alive[1] is True: dist_red_car_blue_drone = np.linalg.norm(red_car_current_pos[1][:2] - blue_drone_current_pos[0][:2]) if dist_red_car_blue_drone < expel_range:# and np.argmin(drone_keypoint_relative_dist) < 600: flag = 'defense' else: flag = 'offense' elif red_alive[0] is False and red_alive[1] is False: flag = 'offense' if flag == 'offense': if blue_alive[0] is True or blue_alive[1] is True: # 无人车 if blue_alive[0] is True and blue_alive[1] is True: center_x = (blue_car_current_pos[0][0] + blue_car_current_pos[1][0]) / 2 center_y = (blue_car_current_pos[0][1] + blue_car_current_pos[1][1]) / 2 center_z = (blue_car_current_pos[0][2] + blue_car_current_pos[1][2]) / 2 elif blue_alive[0] is True and blue_alive[1] is False: center_x = blue_car_current_pos[0][0] center_y = blue_car_current_pos[0][1] center_z = blue_car_current_pos[0][2] elif blue_alive[0] is False and blue_alive[1] is True: center_x = blue_car_current_pos[1][0] center_y = blue_car_current_pos[1][1] center_z = blue_car_current_pos[1][2] blue_center = np.array([[center_x, center_y, center_z]]) if red_alive[0] is True and red_alive[1] is True: dist = np.array([[np.linalg.norm(blue_center[0][:2] - red_car_current_pos[b][:2]) for b in range(2)]]) target_pos = red_car_current_pos[np.argmin(dist)] target_id = np.argmin(dist) elif red_alive[0] is True and red_alive[1] is False: target_pos = red_car_current_pos[0] target_id = 0 elif red_alive[0] is False and red_alive[1] is True: target_pos = red_car_current_pos[1] target_id = 1 elif red_alive[0] is False and red_alive[1] is False: target_pos = target_location[0] target_id = 1 else: target_pos = blue_car_current_pos[0] target_id = 0 else: target_pos = blue_drone_current_pos[0] target_pos[0] = target_pos[0] + (-2000 * np.sign(blue_drone_current_pos[0][0])) target_pos[1] = target_pos[1] + (-3000 * np.sign(blue_drone_current_pos[0][1])) target_pos[2] = blue_car_current_pos[0][2] target_id = 0 return target_pos, target_id, flag elif agent_type == 1: # 无人机 drone_keypoint_relative_dist = np.array([[np.linalg.norm(blue_drone_current_pos[0][:2] - target_location[b][:2]) for b in range(2)]]) safe_target_location = [] for index in range(2): if red_alive[0] is True and red_alive[1] is True: dist = np.array([[np.linalg.norm(target_location[index][:2] - red_car_current_pos[b][:2]) for b in range(2)]]) if np.min(dist) > expel_range: safe_target_location.append(True) else: safe_target_location.append(False) elif red_alive[0] is True and red_alive[1] is False: dist = np.linalg.norm(target_location[index][:2] - red_car_current_pos[0][:2]) if dist > expel_range: safe_target_location.append(True) else: safe_target_location.append(False) elif red_alive[0] is False and red_alive[1] is True: dist = np.linalg.norm(target_location[index][:2] - red_car_current_pos[1][:2]) if dist > expel_range: safe_target_location.append(True) else: safe_target_location.append(False) elif red_alive[0] is False and red_alive[1] is False: safe_target_location = [True, True] if safe_target_location[0] is True and safe_target_location[1] is True: red_car_dist = np.array( [[np.linalg.norm(red_car_current_pos[a][:2] - target_location[b][:2]) - expel_range for b in range(2)] for a in range(2)]) if red_alive[0] is False: red_car_dist[0] = 100000 elif red_alive[1] is False: red_car_dist[1] = 100000 target_defense_index = np.argmax(np.max(red_car_dist, axis=0, keepdims=True)) blue_drone_dist_to_go = np.array( [[np.linalg.norm(blue_drone_current_pos[a][:2] - target_location[b][:2]) for b in range(2)] for a in range(1)]) # return target_location[np.argmin(blue_drone_dist_to_go)] return target_location[target_defense_index] elif safe_target_location[0] is True and safe_target_location[1] is False: return target_location[0] elif safe_target_location[0] is False and safe_target_location[1] is True: return target_location[1] elif safe_target_location[0] is False and safe_target_location[1] is False: return np.array([(target_location[0][0] + target_location[0][1]) / 2, (target_location[1][0] + target_location[1][1]) / 2]) if __name__ == '__main__': agent_type = 0 # 211和221是进攻方blue小车, 231是进攻方blue无人机 # 311和321是防守方red小车 # 当前要决策的blue小车的信息 self_data = {'211': {'X': 1500, 'Y': -2000, 'Z': 0, 'Yaw': 30, 'Blood': 100}} ally_agents_data = {'221': {'X': -2500, 'Y': -2500, 'Z': 0, 'Yaw': 40, 'Blood': 100}, '231': {'X': 700, 'Y': 3300, 'Z': 1500, 'Yaw': 0}} # 进攻方blue小车和无人机信息 enemy_agents_data = {'311': {'X': 2700.0, 'Y': 3300, 'Z': 0, 'Yaw': 20, 'Blood': 100}, '321': {'X': -1000, 'Y': -700, 'Z': 0, 'Yaw': 10, 'Blood': 100}} # 夺控点信息 key_points = [[700, 3300, 0], [-2500, -700, 0]] # 存活状态 blue_alive = [True, True] red_alive = [True, True] if agent_type == 0: target_position, target_id, flag = offense_combat(self_data, ally_agents_data, enemy_agents_data, key_points, blue_alive, red_alive, agent_type) print(target_position) print('\r\n') print(target_id) print('\r\n') print(flag) elif agent_type == 1: target_position = offense_combat(self_data, ally_agents_data, enemy_agents_data, key_points, blue_alive, red_alive, agent_type) print(target_position) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/decision.py ================================================ import copy import random import numpy as np import datetime import time from ALGORITHM.script_ai.assignment import * from ALGORITHM.script_ai.global_params import * class decision(): """docstring for decision""" def __init__(self, attackers, drone, defenders): super(decision, self).__init__() self.attackers = attackers self.drone = drone self.defenders = defenders self.assigner = TaskAssign(self.attackers, self.drone, self.defenders) # output actions def act(self, type=None): actions_list = {} self.alive_attack = len([att for att in list(self.attackers.values()) if 'dead' not in att['state']]) self.alive_defend = len([ded for ded in list(self.defenders.values()) if 'dead' not in ded['state']]) if not type: type = 'attackers' if type == 'attackers': self.attack_StateTrans() for ID, attr in self.attackers.items(): if 'dead' in attr['state']: des_pos = [attr['X'], attr['Y'], attr['Z']] actions_list[ID] = des_pos continue if 'attack' in attr['state'] and attr['state'][1] is not '0': def_ID = attr['state'][1] opp = self.defenders[def_ID] des_pos = [opp['X']-400, opp['Y']-400, opp['Z']] actions_list[ID] = des_pos elif 'retreat' in attr['state']: if self.drone['state'][0] is not 'idle': select_key_points = key_points[self.drone['state'][1]] agent_pos = [attr['X'], attr['Y'], attr['Z']] if select_key_points[1] - agent_pos[1] > 50 and select_key_points[0] - agent_pos[0] > 50: k = (select_key_points[1] - agent_pos[1]) / (select_key_points[0] - agent_pos[0]) des_pos = [agent_pos[0] - 1400, agent_pos[1] - 1400 * k, agent_pos[2]] else: des_pos = [agent_pos[0] - 1400, agent_pos[1], agent_pos[2]] else: des_pos = ATTA_RETREAT_POS actions_list[ID] = des_pos # des_pos = ATTA_RETREAT_POS # actions_list[ID] = des_pos elif 'idle' in attr['state']: des_pos = [attr['X'], attr['Y'], attr['Z']] actions_list[ID] = des_pos # attack target assign assign_attackers = [ID for ID, attr in self.attackers.items() if (len(attr['state'])>1 and attr['state'][1] == '0')] # attacker assignment if len(assign_attackers)>0: target_ID = self.assigner.assign_attack(assign_attackers) target = self.defenders[target_ID] for attacker_ID in assign_attackers: actions_list[attacker_ID] = [target['X']-300, target['Y']-300, target['Z']] self.attackers[attacker_ID]['state'][1] = target_ID # drone action self.drone_StateTrans() if 'idle' in self.drone['state']: des_pos = [self.drone['X'], self.drone['Y'], self.drone['Z']] elif 'running' in self.drone['state'] or 'hold' in self.drone['state']: des_pos = key_points[self.drone['state'][1]] actions_list['drone'] = des_pos # same as attackers elif type == 'defenders': self.defend_StateTrans() for ID, attr in self.defenders.items(): if 'dead' in attr['state']: des_pos = [attr['X'], attr['Y'], attr['Z']] actions_list[ID] = des_pos continue if 'attack' in attr['state']: def_ID = attr['state'][1] opp = self.attackers[def_ID] des_pos = [opp['X'], opp['Y'], opp['Z']] elif 'expel' in attr['state']: des_pos = key_points[attr['state'][1]] elif 'retreat' in attr['state']: des_pos = DEF_RETREAT_POS elif 'idle' in attr['state']: des_pos = key_points[0] # else: # des_pos = [attr['X'], attr['Y'], attr['Z']] actions_list[ID] = des_pos else: raise ValueError('invalid type!') return actions_list # state machine def attack_StateTrans(self, ): # attackers for ID in list(self.attackers.keys()): attr = self.attackers[ID] # check alive/dead(blood thres: 2) if attr['blood'] <= 2 and 'dead' not in attr['state']: attr['blood'] = 0 attr['state'] = ['dead'] continue # idle2attack if 'idle' in attr['state']: if self.alive_defend > 0: attr['state'] = ['attack'] def_ID = '0' attr['state'].append(def_ID) continue # attack2idle/retreat(blood thres: 10) if 'attack' in attr['state']: def_ID = attr['state'][1] is_attack = self.assigner.is_attack(ID) # 2idle if 'dead' in self.defenders[def_ID]['state']: attr['state'] = ['attack'] def_ID = '0' attr['state'].append(def_ID) elif not is_attack: attr['state'] = ['retreat'] continue # retreat2attack if 'retreat' in attr['state']: if self.alive_defend > 0 and attr['blood'] > 10: is_attack = self.assigner.is_attack(ID) if is_attack: attr['state'] = ['attack'] def_ID = '0' attr['state'].append(def_ID) # dist_list = [np.linalg.norm(np.array([attr['X'], attr['Y'], attr['Z']]) - np.array([ded['X'], ded['Y'], ded['Z']])) for ded in self.defenders.values()] # min_dist = min(dist_list) # if min_dist > 1500: # attr['state'] = ['attack'] # def_ID = '0' # attr['state'].append(def_ID) # if 'idle' in self.drone['state']: # self.drone['state'] = self.assigner.assign_2point() def drone_StateTrans(self, ): drone_pos = [self.drone['X'], self.drone['Y']] # initial assign if 'idle' in self.drone['state']: self.drone['state'] = self.assigner.assign_drone_ini() # # run2hold # elif 'running' in self.drone['state']: # cur_point_idx = self.drone['state'][1] # cur_point_pos = key_points[cur_point_idx] # if np.linalg.norm(np.array(drone_pos) - np.array(cur_point_pos)) < 10: # self.drone['state'] = ['hold', cur_point_idx] # else: # pass elif 'running' in self.drone['state']: cur_point_idx = self.drone['state'][1] cur_point_pos = key_points[cur_point_idx] if self.assigner.judge_expeled(): self.drone['state'] = ['running', int(1 - cur_point_idx)] self.drone['state'] = self.assigner.assign_drone_ini() else: pass # defender def defend_StateTrans(self, ): for ID in list(self.defenders.keys()): attr = self.defenders[ID] # check alive/dead if attr['blood'] <= 2 and 'dead' not in attr['state']: attr['blood'] = 0 attr['state'] = ['dead'] continue # # expel nearest # if self.assigner.assign_expel(ID) is not None: # attr['state'] = self.assigner.assign_expel(ID) # continue # # expel both # if self.assigner.assign_expel() is not None: # attr['state'] = self.assigner.assign_expel() # continue # # # idle2attack # if self.alive_attack>0 and self.assigner.assign_defend() is not None: # attr['state'] = self.assigner.assign_defend() # continue attr['state'] = self.assigner.assign_defend(ID) # attack2idle/retreat(blood thres: 10) if 'attack' in attr['state']: att_ID = attr['state'][1] # 2retreat if attr['blood'] <= 10 and att_ID in self.attackers.keys() and self.attackers[att_ID]['blood'] > 5: attr['state'] = ['retreat'] # 2idle elif 'dead' in self.attackers[att_ID]['state']: attr['state'] = ['idle'] # else: # attr['state'] = self.assigner.assign_defend(ID) def test(): # test initial data # ally_agents_data={"221": {"ammo": 100, "velocity": 0.5, "X":1, "Y":1, "Z":0, "Yaw":0, 'blood':100, 'state': ['idle']}, "231": {"ammo": 100, "velocity": 0.5, "X":2, "Y":2, "Z":1.5, "Yaw":0, 'blood':100, 'state': ['idle']}} # enemy_agents_data={"311": {"ammo": 100, "velocity": 0.8, "X":5, "Y":5, "Z":0, "Yaw":0, 'blood':100, 'state': ['idle']}, "321": {"ammo": 100, "velocity": 0.8, "X":6, "Y":6, "Z":0, "Yaw":0, 'blood':100, 'state': ['idle']}} # test dead detection √ # ally_agents_data={"221": {"ammo": 100, "velocity": 0.5, "X":1, "Y":1, "Z":0, "Yaw":0, 'blood':0, 'state': ['idle']}, "231": {"ammo": 100, "velocity": 0.5, "X":2, "Y":2, "Z":1.5, "Yaw":0, 'blood':1, 'state': ['retreat']}} # drone_data={"ammo": 100, "velocity": 0.5, "X":1, "Y":1, "Z":0, "Yaw":0, 'blood':0, 'state': ['idle']} # enemy_agents_data={"311": {"ammo": 100, "velocity": 0.8, "X":5, "Y":5, "Z":0, "Yaw":0, 'blood':1, 'state': ['attack']}, "321": {"ammo": 100, "velocity": 0.8, "X":6, "Y":6, "Z":0, "Yaw":0, 'blood':0, 'state': ['dead']}} # test idle2attack √ # ally_agents_data={"221": {"ammo": 100, "velocity": 0.5, "X":1, "Y":1, "Z":0, "Yaw":0, 'blood':100, 'state': ['idle']}, "231": {"ammo": 100, "velocity": 0.5, "X":2, "Y":2, "Z":1.5, "Yaw":0, 'blood':100, 'state': ['idle']}} # drone_data={"ammo": 100, "velocity": 0.5, "X":1, "Y":1, "Z":0, "Yaw":0, 'blood':0, 'state': ['idle']} # enemy_agents_data={"311": {"ammo": 100, "velocity": 0.8, "X":5, "Y":5, "Z":0, "Yaw":0, 'blood':100, 'state': ['idle']}, "321": {"ammo": 100, "velocity": 0.8, "X":6, "Y":6, "Z":0, "Yaw":0, 'blood':100, 'state': ['idle']}} # test attack2idle √ # ally_agents_data={"221": {"ammo": 100, "velocity": 0.5, "X":1, "Y":1, "Z":0, "Yaw":0, 'blood':100, 'state': ['attack', '311']}, "231": {"ammo": 100, "velocity": 0.5, "X":2, "Y":2, "Z":1.5, "Yaw":0, 'blood':100, 'state': ['attack', '311']}} # drone_data={"ammo": 100, "velocity": 0.5, "X":1, "Y":1, "Z":0, "Yaw":0, 'blood':0, 'state': ['idle']} # enemy_agents_data = {} # test attack2retreat √ ally_agents_data={"221": {"ammo": 100, "velocity": 0.5, "X":1, "Y":1, "Z":0, "Yaw":0, 'blood':11, 'state': ['attack', '311']}, "231": {"ammo": 100, "velocity": 0.5, "X":2, "Y":2, "Z":1.5, "Yaw":0, 'blood':10, 'state': ['attack', '311']}} drone_data={"ammo": 100, "velocity": 0.5, "X":-3, "Y":0, "Z":0, "Yaw":0, 'blood':0, 'state': ['idle']} enemy_agents_data={"311": {"ammo": 100, "velocity": 0.8, "X":2, "Y":1, "Z":0, "Yaw":0, 'blood':6, 'state': ['idle']}, "321": {"ammo": 100, "velocity": 0.8, "X":6, "Y":6, "Z":0, "Yaw":0, 'blood':100, 'state': ['idle']}} # enemy_agents_data={"311":{"ammo": 100, "velocity": 0.5, "X":2, "Y":2, "Z":0, "Yaw":0, 'blood':11, 'state': ['expel']}} DecisionMake = decision(ally_agents_data, drone_data, enemy_agents_data) attackers = DecisionMake.attackers defenders = DecisionMake.defenders drone = DecisionMake.drone # decision module test attack_actions = DecisionMake.act(type='attackers') defend_actions = DecisionMake.act(type='defenders') print('attack property: ', attackers) print('defend property: ', defenders) att_states = [] def_states = [] for k, v in attackers.items(): att_states.append({k:v['state']}) for k, v in defenders.items(): def_states.append({k:v['state']}) drone_state = drone['state'] print('attack states: ', att_states) print('defend states: ', def_states) print('drone states: ', drone_state) print('attack actions: ', attack_actions) print('defend actions: ', defend_actions) if __name__ == '__main__': test() # DecisionMake = decision(ally_agents_data, enemy_agents_data) # while(1): # attack_actions = DecisionMake.act(type='attackers') # defend_actions = DecisionMake.act(type='defenders') # time.sleep(0.05) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/dummy.py ================================================ import numpy as np from UTIL.tensor_ops import copy_clone class DummyAlgConfig(): reserve = "" class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from config import GlobalConfig self.n_agent = n_agent self.n_thread = n_thread self.ScenarioConfig = GlobalConfig.ScenarioConfig def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithm(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 4)) env0_step = State_Recall['Current-Obs-Step'] if env0_step%2==0: actions[..., 0] = 1 # AT for i in range(5): actions[:, i, 1] = i # TT actions[..., 2] = 0 # HT actions[..., 3] = 0 # SP else: actions[..., 0] = 5 # AT for i in range(5): actions[:, i, 1] = i # TT actions[..., 2] = 0 # HT actions[..., 3] = 0 # SP # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/dummy_uhmap.py ================================================ import numpy as np from UTIL.tensor_ops import copy_clone from MISSION.uhmap.actset_lookup import encode_action_as_digits from ALGORITHM.script_ai.decision import decision from ALGORITHM.script_ai.assignment import * from ALGORITHM.script_ai.global_params import * attact_states={'211':['idle'],'221':['idle']} drone_state={'231':['idle']} defend_states={'311':['idle'],'321':['idle']} print("===========================") print(attact_states) print(drone_state) print(defend_states) print("===========================") class DummyAlgConfig(): reserve = "" class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from config import GlobalConfig self.n_agent = n_agent self.n_thread = n_thread self.ScenarioConfig = GlobalConfig.ScenarioConfig def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} # 进攻方决策 class DummyAlgorithmT1(DummyAlgorithmBase): def interact_with_env(self, State_Recall): try: res = self.interact_with_env_(State_Recall) except: actions = np.zeros(shape=(self.n_thread, self.n_agent, 8)) actions[:] = encode_action_as_digits("N/A", "N/A", x=None, y=None, z=None, UID=None, T=None, T_index=None) actions = np.swapaxes(actions, 0, 1) res = (actions, None) return res def interact_with_env_(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8)) env0_step = State_Recall['Current-Obs-Step'] obs = State_Recall['Latest-Team-Info'] thread = 0 global attact_states global drone_state global defend_states if State_Recall['Env-Suffered-Reset']==[True]: attact_states = {'211': ['idle'], '221': ['idle']} drone_state = {'231': ['idle']} defend_states = {'311': ['idle'], '321': ['idle']} # 防守方red小车的信息 red_agents_data = {'311': {'ammo':100, 'velocity':0, 'X': obs[0]['dataArr'][3]['agentLocation']['x'], 'Y': obs[0]['dataArr'][3]['agentLocation']['y'], 'Z': obs[0]['dataArr'][3]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][3]['agentHp'], 'state':defend_states['311']}, '321': {'ammo':100, 'velocity':0, 'X': obs[0]['dataArr'][4]['agentLocation']['x'], 'Y': obs[0]['dataArr'][4]['agentLocation']['y'], 'Z': obs[0]['dataArr'][4]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][4]['agentHp'], 'state':defend_states['321']}} # 进攻方blue小车和无人机信息,其中231是无人机 blue_agents_data = {'211': {'ammo':100, 'velocity':0, 'X': obs[0]['dataArr'][0]['agentLocation']['x'], 'Y': obs[0]['dataArr'][0]['agentLocation']['y'], 'Z': obs[0]['dataArr'][0]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][0]['agentHp'], 'state':attact_states['211']}, '221': {'ammo':100, 'velocity':0, 'X': obs[0]['dataArr'][1]['agentLocation']['x'], 'Y': obs[0]['dataArr'][1]['agentLocation']['y'], 'Z': obs[0]['dataArr'][1]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][1]['agentHp'], 'state':attact_states['221']}} drone_data={'ammo':100, 'velocity':0, 'X': obs[0]['dataArr'][2]['agentLocation']['x'], 'Y': obs[0]['dataArr'][2]['agentLocation']['y'], 'Z': obs[0]['dataArr'][2]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][2]['agentHp'], 'state':drone_state['231']} blue_alive = [obs[0]['dataArr'][0]['agentAlive'], obs[0]['dataArr'][1]['agentAlive']] red_alive = [obs[0]['dataArr'][3]['agentAlive'], obs[0]['dataArr'][4]['agentAlive']] # 夺控点信息 在global——params.py修改 # key_points = [[700, -3300, 500], [-3000, 700, 500]] DecisionMake = decision(blue_agents_data,drone_data,red_agents_data) attackers = DecisionMake.attackers defenders = DecisionMake.defenders drone = DecisionMake.drone #decision module test attack_actions = DecisionMake.act(type='attackers') defend_actions = DecisionMake.act(type='defenders') att_states = [] def_states = [] for k, v in attackers.items(): att_states.append({k: v['state']}) for k, v in defenders.items(): def_states.append({k: v['state']}) # print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') # print(drone['state']) # print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') attact_states['211']=att_states[0]['211'] attact_states['221'] = att_states[1]['221'] defend_states['311'] = def_states[0]['311'] defend_states['321'] = def_states[1]['321'] drone_state['231'] = drone['state'] print('+++++++++++++++++++++++++ info +++++++++++++++++++++++++++++++++') print('211 state: ', attact_states['211']) print('221 state: ', attact_states['221']) print('311 state: ', defend_states['311']) print('321 state: ', defend_states['321']) print('drone state: ', drone['state']) print('attack actions: ', attack_actions) print('defend actions: ', defend_actions) print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') # 小车211决策 if attact_states['211'][0] == 'attack': if attact_states['211'][1] == '311': # actions[thread, 0] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None,UID=3,T=None, T_index=None) actions[thread, 0] = encode_action_as_digits("SpecificMoving", "N/A", x=attack_actions['211'][0], y=attack_actions['221'][1], z=500,UID=None,T=None, T_index=None) else: # actions[thread, 0] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=4,T=None, T_index=None) actions[thread, 0] = encode_action_as_digits("SpecificMoving", "N/A", x=attack_actions['211'][0], y=attack_actions['211'][1],z=500,UID=None,T=None, T_index=None) else: actions[thread, 0] = encode_action_as_digits("SpecificMoving", "N/A", x=attack_actions['211'][0], y=attack_actions['211'][1],z=500,UID=None,T=None, T_index=None) # 小车221决策 if attact_states['221'][0] == 'attack': if attact_states['221'][1] == '311': # actions[thread, 1] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=3,T=None, T_index=None) actions[thread, 1] = encode_action_as_digits("SpecificMoving", "N/A", x=attack_actions['221'][0], y=attack_actions['221'][1],z=500,UID=None,T=None, T_index=None) else: # actions[thread, 1] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=4,T=None, T_index=None) actions[thread, 1] = encode_action_as_digits("SpecificMoving", "N/A", x=attack_actions['221'][0], y=attack_actions['221'][1],z=500,UID=None,T=None, T_index=None) else: actions[thread, 1] = encode_action_as_digits("SpecificMoving", "N/A", x=attack_actions['221'][0], y=attack_actions['221'][1],z=500,UID=None,T=None, T_index=None) # 无人机231决策 actions[thread, 2] = encode_action_as_digits("SpecificMoving", "N/A", x=attack_actions['drone'][0], y=attack_actions['drone'][1], z=500, UID=None, T=None, T_index=None) # if env0_step < 2: # actions[thread, :] = self.act2digit_dictionary['ActionSet2::Idle;DynamicGuard'] # actions[thread, 0] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=4, T=None, T_index=None) # actions[thread, 0] = encode_action_as_digits("PatrolMoving", "N/A", x=0, y=0, z=379, UID=None, T=None, T_index=None) # actions[thread, 1] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=3, T=None, T_index=None) # actions[thread, 2] = encode_action_as_digits("SpecificMoving", "N/A", x=-3000, y=700, z=500, UID=None,T=None, T_index=None) # actions[thread, 2] = encode_action_as_digits("Idle", "DynamicGuard", x=700, y=-3300, z=500, UID=None, T=None, T_index=None) """ if env0_step%4 == 0: actions[thread, 2] = encode_action_as_digits("SpecificMoving", "Dir+X+Y", x=700, y=-3300, z=500, UID=None, T=None, T_index=None) if env0_step%4 == 1: actions[thread, 2] = encode_action_as_digits("SpecificMoving", "Dir+X-Y", x=700, y=-3300, z=500, UID=None, T=None, T_index=None) if env0_step%4 == 2: actions[thread, 2] = encode_action_as_digits("SpecificMoving", "Dir-X-Y", x=700, y=-3300, z=500, UID=None, T=None, T_index=None) if env0_step%4 == 3: actions[thread, 2] = encode_action_as_digits("SpecificMoving", "Dir-X+Y", x=700, y=-3300, z=500, UID=None, T=None, T_index=None) """ # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} # 防守方决策 class DummyAlgorithmT2(DummyAlgorithmBase): def interact_with_env(self, State_Recall): try: res = self.interact_with_env_(State_Recall) except: actions = np.zeros(shape=(self.n_thread, self.n_agent, 8)) actions[:] = encode_action_as_digits("N/A", "N/A", x=None, y=None, z=None, UID=None, T=None, T_index=None) actions = np.swapaxes(actions, 0, 1) res = (actions, None) return res def interact_with_env_(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8)) env0_step = State_Recall['Current-Obs-Step'] obs = State_Recall['Latest-Team-Info'] thread = 0 # 防守方red小车的信息 red_agents_data = {'311': {'ammo': 100, 'velocity': 0, 'X': obs[0]['dataArr'][3]['agentLocation']['x'], 'Y': obs[0]['dataArr'][3]['agentLocation']['y'], 'Z': obs[0]['dataArr'][3]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][3]['agentHp'], 'state': defend_states['311']}, '321': {'ammo': 100, 'velocity': 0, 'X': obs[0]['dataArr'][4]['agentLocation']['x'], 'Y': obs[0]['dataArr'][4]['agentLocation']['y'], 'Z': obs[0]['dataArr'][4]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][4]['agentHp'], 'state': defend_states['321']}} # 进攻方blue小车和无人机信息,其中231是无人机 blue_agents_data = {'211': {'ammo': 100, 'velocity': 0, 'X': obs[0]['dataArr'][0]['agentLocation']['x'], 'Y': obs[0]['dataArr'][0]['agentLocation']['y'], 'Z': obs[0]['dataArr'][0]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][0]['agentHp'], 'state': attact_states['211']}, '221': {'ammo': 100, 'velocity': 0, 'X': obs[0]['dataArr'][1]['agentLocation']['x'], 'Y': obs[0]['dataArr'][1]['agentLocation']['y'], 'Z': obs[0]['dataArr'][1]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][1]['agentHp'], 'state': attact_states['221']}} drone_data = {'ammo': 100, 'velocity': 0, 'X': obs[0]['dataArr'][2]['agentLocation']['x'], 'Y': obs[0]['dataArr'][2]['agentLocation']['y'], 'Z': obs[0]['dataArr'][2]['agentLocation']['z'], 'Yaw': 0, 'blood': obs[0]['dataArr'][2]['agentHp'], 'state': drone_state['231']} blue_alive = [obs[0]['dataArr'][0]['agentAlive'], obs[0]['dataArr'][1]['agentAlive']] red_alive = [obs[0]['dataArr'][3]['agentAlive'], obs[0]['dataArr'][4]['agentAlive']] # 夺控点信息 在global——params.py修改 # key_points = [[700, -3300, 500], [-3000, 700, 500]] DecisionMake = decision(blue_agents_data, drone_data, red_agents_data) attackers = DecisionMake.attackers defenders = DecisionMake.defenders drone = DecisionMake.drone # decision module test attack_actions = DecisionMake.act(type='attackers') defend_actions = DecisionMake.act(type='defenders') att_states = [] def_states = [] for k, v in attackers.items(): att_states.append({k: v['state']}) for k, v in defenders.items(): def_states.append({k: v['state']}) attact_states['211'] = att_states[0]['211'] attact_states['221'] = att_states[1]['221'] defend_states['311'] = def_states[0]['311'] defend_states['321'] = def_states[1]['321'] drone_state['231'] = drone['state'] # print("==============智能体速度位置信息======================") # print(red_agents_data) # print(blue_agents_data) # print(drone_data) # print("====================================") print("==============智能体state信息======================") print(attact_states) print(defend_states) # print(drone_state) print("====================================") # 小车311决策 if defend_states['311'][0] == 'attack': if defend_states['311'][1] == '211': # actions[thread, 0] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=0,T=None, T_index=None) actions[thread, 0] = encode_action_as_digits("SpecificMoving", "N/A", x=defend_actions['311'][0], y=defend_actions['311'][1],z=500,UID=None,T=None, T_index=None) else: # actions[thread, 0] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=1,T=None, T_index=None) actions[thread, 1] = encode_action_as_digits("SpecificMoving", "N/A", x=defend_actions['311'][0], y=defend_actions['311'][1],z=500,UID=None,T=None, T_index=None) else: actions[thread, 0] = encode_action_as_digits("SpecificMoving", "N/A", x=defend_actions['311'][0], y=defend_actions['311'][1],z=500,UID=None,T=None, T_index=None) # 小车321决策 if defend_states['321'][0] == 'attack': if defend_states['321'][1] == '211': # actions[thread, 1] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=0,T=None, T_index=None) actions[thread, 1] = encode_action_as_digits("SpecificMoving", "N/A", x=defend_actions['321'][0], y=defend_actions['321'][1],z=500,UID=None,T=None, T_index=None) else: # actions[thread, 1] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=1,T=None, T_index=None) actions[thread, 1] = encode_action_as_digits("SpecificMoving", "N/A", x=defend_actions['321'][0], y=defend_actions['321'][1],z=500,UID=None,T=None, T_index=None) else: actions[thread, 1] = encode_action_as_digits("SpecificMoving", "N/A", x=defend_actions['321'][0], y=defend_actions['321'][1],z=500,UID=None,T=None, T_index=None) # actions[thread, 0] = encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=1, T=None, T_index=None) # actions[thread, 0] = encode_action_as_digits("Idle", "AggressivePersue", x=10000, y=-10000, z=379, UID=None, T=None, T_index=None) # actions[thread, 0] = encode_action_as_digits("SpecificMoving", "N/A", x=10000, y=-10000, z=379, UID=None, T=None, T_index=None) # actions[thread, 1] = encode_action_as_digits("PatrolMoving", "N/A", x=444*5, y=444*5, z=379, UID=None, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} ''' if env0_step < 5: actions[thread, :] = self.act2digit_dictionary['ActionSet2::Idle;DynamicGuard'] elif env0_step < 15: actions[thread, :] = self.act2digit_dictionary['ActionSet2::SpecificAttacking;UID-3'] elif env0_step < 25: actions[thread, :] = self.act2digit_dictionary['ActionSet2::SpecificAttacking;UID-4'] elif env0_step < 35: actions[thread, :] = self.act2digit_dictionary['ActionSet2::SpecificAttacking;UID-5'] elif env0_step < 45: actions[thread, :] = self.act2digit_dictionary['ActionSet2::SpecificAttacking;UID-6'] elif env0_step < 55: actions[thread, :] = self.act2digit_dictionary['ActionSet2::SpecificAttacking;UID-7'] ''' ''' if env0_step < 5: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::Idle;DynamicGuard'] else: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::SpecificAttacking;UID-1'] ''' ''' if env0_step < 5: if env0_step%4 == 0: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir+X+Y'] if env0_step%4 == 1: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir+X-Y'] if env0_step%4 == 2: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir-X-Y'] if env0_step%4 == 3: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir-X+Y'] elif env0_step < 10: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::Idle;DynamicGuard'] elif env0_step < 15: if env0_step%4 == 0: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::SpecificMoving;Dir+X'] if env0_step%4 == 1: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::SpecificMoving;Dir+Y'] if env0_step%4 == 2: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::SpecificMoving;Dir-X'] if env0_step%4 == 3: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::SpecificMoving;Dir-Y'] elif env0_step < 20: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::Idle;StaticAlert'] elif env0_step < 30: if env0_step%4 == 0: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir+X+Y'] if env0_step%4 == 1: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir+X-Y'] if env0_step%4 == 2: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir-X-Y'] if env0_step%4 == 3: actions[thread, 2] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir-X+Y'] else: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::Idle;StaticAlert'] ''' """ thread = 0 if env0_step%4 == 0: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir+X+Y'] if env0_step%4 == 1: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir+X-Y'] if env0_step%4 == 2: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir-X-Y'] if env0_step%4 == 3: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::PatrolMoving;Dir-X+Y'] """ """ thread = 0 if env0_step%4 == 0: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::SpecificMoving;Dir+X+Y'] if env0_step%4 == 1: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::SpecificMoving;Dir+X-Y'] if env0_step%4 == 2: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::SpecificMoving;Dir-X-Y'] if env0_step%4 == 3: actions[thread, 0] = self.act2digit_dictionary['ActionSet2::SpecificMoving;Dir-X+Y'] """ ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/global_params.py ================================================ # # expel dist # DRIVE_AWAY_DIST = 1.2 # # # defend dist # DEFEND_DIST = 2 # # # retreat to the safe entrance # ATTA_RETREAT_POS = [-0.5, -3] # # # retreat to the dangerous entrance # DEF_RETREAT_POS = [3, 2] # # # key points # key_points = [[0.7, -3.3], [-3, 0.7]] # stance thres RETREAT_STANCE = 0.3 # expel dist DRIVE_AWAY_DIST = 1000 # defend dist DEFEND_DIST = 1000 # retreat to the safe entrance ATTA_RETREAT_POS = [-500, -3000] # retreat to the dangerous entrance DEF_RETREAT_POS = [3000, 2000] # key points key_points = [[700, -3300], [-3000, 700]] ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/manual.py ================================================ import numpy as np from UTIL.tensor_ops import my_view, copy_clone try: from numba import jit except: from UTIL.tensor_ops import dummy_decorator as jit def to_cpu_numpy(x): return x.cpu().numpy() if hasattr(x,'cpu') else x class CoopAlgConfig(): reserve = None class DummyAlgorithmFoundationHI3D(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from config import GlobalConfig super().__init__() self.n_agent = n_agent ScenarioConfig = GlobalConfig.ScenarioConfig self.num_entity = ScenarioConfig.num_entity self.landmark_uid = ScenarioConfig.uid_dictionary['landmark_uid'] self.agent_uid = ScenarioConfig.uid_dictionary['agent_uid'] self.entity_uid = ScenarioConfig.uid_dictionary['entity_uid'] self.pos_decs = ScenarioConfig.obs_vec_dictionary['pos'] self.vel_decs = ScenarioConfig.obs_vec_dictionary['vel'] self.num_landmarks = len(self.landmark_uid) self.invader_uid = ScenarioConfig.uid_dictionary['invader_uid'] self.n_entity = ScenarioConfig.num_entity self.n_basic_dim = ScenarioConfig.obs_vec_length self.n_thread = n_thread self.attack_target = [None] * self.n_thread def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def get_previous(self, team_intel): info = copy_clone(team_intel['Latest-Obs']) Env_Suffered_Reset = copy_clone(team_intel['Env-Suffered-Reset']) return info, Env_Suffered_Reset def interact_with_env(self, State_Recall): main_obs, Env_Suffered_Reset = self.get_previous(State_Recall) action = np.ones(shape=(main_obs.shape[0], main_obs.shape[1], 1)) * -1 n_thread = main_obs.shape[0] about_all_objects = main_obs[:,0,:] objects_emb = my_view(x=about_all_objects, shape=[0,-1,self.n_basic_dim]) # select one agent invader_emb = objects_emb[:, self.invader_uid, :] landmark_emb = objects_emb[:, self.landmark_uid,:] invader_pos = invader_emb[:, :, self.pos_decs] invader_vel = invader_emb[:, :, self.vel_decs] landmark_pos = landmark_emb[:, :, self.pos_decs] # 为每一个invader设置一个随机目标,当且仅当step == 0 时(episode刚刚开始) self.set_nearest_target(Env_Suffered_Reset, invader_pos, landmark_pos) n_thread = self.n_thread n_agent = self.n_agent attack_target = np.array(self.attack_target) action = self.get_action(action, attack_target, invader_pos, invader_vel, landmark_pos, n_agent, n_thread) assert not (action == -1).any() actions_list = [] for i in range(self.n_agent): actions_list.append(action[:, i]) return np.array(actions_list), None # @jit(nopython=True) # @staticmethod @jit(forceobj=True) def get_action(self, action, attack_target, invader_pos, invader_vel, landmark_pos, n_agent, n_thread): posit_vec = np.zeros_like(invader_vel) for thread in range(n_thread): for agent in range(n_agent): posit_vec[thread,agent] = landmark_pos[thread, attack_target[thread][agent]] - invader_pos[thread, agent] return self.dir_to_action3d(vec=posit_vec,vel=invader_vel) @staticmethod def dir_to_action3d(vec, vel): def np_mat3d_normalize_each_line(mat): return mat / np.expand_dims(np.linalg.norm(mat, axis=2) + 1e-16, axis=-1) desired_speed = 0.8 vec = np_mat3d_normalize_each_line(vec)*desired_speed return vec def set_nearest_target(self, Env_Suffered_Reset, invader_pos, landmark_pos): for thread, env_suffered_reset_ in enumerate(Env_Suffered_Reset): if env_suffered_reset_: invader_attack_target = [None] * self.n_agent for i in range(self.n_agent): posit_vec = np.array([landmark_pos[thread, j] - invader_pos[thread, i] for j in range(self.num_landmarks)]) dis_arr = np.linalg.norm(posit_vec, axis=-1) assigned_target = np.argmin(dis_arr) # assigned_target = np.random.randint(low=0, high=self.num_landmarks) invader_attack_target[i] = assigned_target self.attack_target[thread] = np.array(invader_attack_target) class DummyAlgorithmFoundationHI3D_old(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from config import GlobalConfig super().__init__() self.n_agent = n_agent ScenarioConfig = GlobalConfig.ScenarioConfig self.num_entity = ScenarioConfig.num_entity self.landmark_uid = ScenarioConfig.uid_dictionary['landmark_uid'] self.agent_uid = ScenarioConfig.uid_dictionary['agent_uid'] self.entity_uid = ScenarioConfig.uid_dictionary['entity_uid'] self.pos_decs = ScenarioConfig.obs_vec_dictionary['pos'] self.vel_decs = ScenarioConfig.obs_vec_dictionary['vel'] self.num_landmarks = len(self.landmark_uid) self.invader_uid = ScenarioConfig.uid_dictionary['invader_uid'] self.n_entity = ScenarioConfig.num_entity self.n_basic_dim = ScenarioConfig.obs_vec_length self.n_thread = n_thread self.attack_target = [None] * self.n_thread def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def get_previous(self, team_intel): info = copy_clone(team_intel['Latest-Obs']) Env_Suffered_Reset = copy_clone(team_intel['Env-Suffered-Reset']) return info, Env_Suffered_Reset def interact_with_env(self, State_Recall): main_obs, Env_Suffered_Reset = self.get_previous(State_Recall) action = np.ones(shape=(main_obs.shape[0], main_obs.shape[1], 1)) * -1 n_thread = main_obs.shape[0] about_all_objects = main_obs[:,0,:] objects_emb = my_view(x=about_all_objects, shape=[0,-1,self.n_basic_dim]) # select one agent invader_emb = objects_emb[:, self.invader_uid, :] landmark_emb = objects_emb[:, self.landmark_uid,:] invader_pos = invader_emb[:, :, self.pos_decs] invader_vel = invader_emb[:, :, self.vel_decs] landmark_pos = landmark_emb[:, :, self.pos_decs] # 为每一个invader设置一个随机目标,当且仅当step == 0 时(episode刚刚开始) self.set_random_target(Env_Suffered_Reset) n_thread = self.n_thread n_agent = self.n_agent attack_target = np.array(self.attack_target) action = self.get_action(action, attack_target, invader_pos, invader_vel, landmark_pos, n_agent, n_thread) assert not (action == -1).any() actions_list = [] for i in range(self.n_agent): actions_list.append(action[:, i]) return np.array(actions_list), None # @jit(nopython=True) # @staticmethod @jit(forceobj=True) def get_action(self, action, attack_target, invader_pos, invader_vel, landmark_pos, n_agent, n_thread): posit_vec = np.zeros_like(invader_vel) for thread in range(n_thread): for agent in range(n_agent): posit_vec[thread,agent] = landmark_pos[thread, attack_target[thread][agent]] - invader_pos[thread, agent] return self.dir_to_action3d(vec=posit_vec,vel=invader_vel) @staticmethod @jit(forceobj=True) def dir_to_action3d(vec, vel): def np_mat3d_normalize_each_line(mat): return mat / np.expand_dims(np.linalg.norm(mat, axis=2) + 1e-16, axis=-1) vec = np_mat3d_normalize_each_line(vec) e_u = np.array([0 ,1 , 0 ]) e_d = np.array([0 ,-1 , 0 ]) e_r = np.array([1 ,0 , 0 ]) e_l = np.array([-1 ,0 , 0 ]) e_a = np.array([0 ,0 , 1 ]) e_b = np.array([0 ,0 ,-1 ]) vel_u = np_mat3d_normalize_each_line(vel + e_u * 0.1) vel_d = np_mat3d_normalize_each_line(vel + e_d * 0.1) vel_r = np_mat3d_normalize_each_line(vel + e_r * 0.1) vel_l = np_mat3d_normalize_each_line(vel + e_l * 0.1) vel_a = np_mat3d_normalize_each_line(vel + e_a * 0.1) vel_b = np_mat3d_normalize_each_line(vel + e_b * 0.1) proj_u = (vel_u * vec).sum(-1) proj_d = (vel_d * vec).sum(-1) proj_r = (vel_r * vec).sum(-1) proj_l = (vel_l * vec).sum(-1) proj_a = (vel_a * vec).sum(-1) proj_b = (vel_b * vec).sum(-1) _u = ((vec * e_u).sum(-1)>0).astype(np.int) _d = ((vec * e_d).sum(-1)>0).astype(np.int) _r = ((vec * e_r).sum(-1)>0).astype(np.int) _l = ((vec * e_l).sum(-1)>0).astype(np.int) _a = ((vec * e_a).sum(-1)>0).astype(np.int) _b = ((vec * e_b).sum(-1)>0).astype(np.int) proj_u = proj_u + _u*2 proj_d = proj_d + _d*2 proj_r = proj_r + _r*2 proj_l = proj_l + _l*2 proj_a = proj_a + _a*2 proj_b = proj_b + _b*2 dot_stack = np.stack([proj_u, proj_d, proj_r, proj_l, proj_a, proj_b]) direct = np.argmax(dot_stack, 0) action = np.where(direct == 0, 2, 0) action += np.where(direct == 1, 4, 0) action += np.where(direct == 2, 1, 0) action += np.where(direct == 3, 3, 0) action += np.where(direct == 4, 5, 0) action += np.where(direct == 5, 6, 0) return np.expand_dims(action, axis=-1) def set_random_target(self, Env_Suffered_Reset): for thread, env_suffered_reset_ in enumerate(Env_Suffered_Reset): if env_suffered_reset_: invader_attack_target = [None] * self.n_agent for i in range(self.n_agent): assigned_target = np.random.randint(low=0, high=self.num_landmarks) invader_attack_target[i] = assigned_target self.attack_target[thread] = np.array(invader_attack_target) class IHDummyAlgorithmFoundation(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): from config import GlobalConfig super().__init__() self.n_agent = n_agent ScenarioConfig = GlobalConfig.ScenarioConfig self.num_entity = ScenarioConfig.num_entity self.landmark_uid = ScenarioConfig.uid_dictionary['landmark_uid'] self.agent_uid = ScenarioConfig.uid_dictionary['agent_uid'] self.invader_uid = ScenarioConfig.uid_dictionary['invader_uid'] self.n_entity = ScenarioConfig.num_entity self.n_basic_dim = ScenarioConfig.obs_vec_length self.n_object = ScenarioConfig.num_object self.n_thread = n_thread self.num_landmarks = ScenarioConfig.num_landmarks self.attack_target = [None] * self.n_thread def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def get_previous(self, team_intel): info = copy_clone(team_intel['Latest-Obs']) done = copy_clone(team_intel['Env-Suffered-Reset']) return info, done ''' info, done = self.get_previous(team_intel) current_step = info[:,0,-1] object_info = my_view(info[:,0,:-1],[0,-1,5]) worker_emb = object_info[:, self.worker_uid] cargo_emb = object_info[:, self.cargo_uid] worker_pos = worker_emb[:,:,self.dec_pos] worker_vel = worker_emb[:,:,self.dec_vel] worker_drag = worker_emb[:,:,self.dec_other] cargo_dropoff_pos = cargo_emb[:,:,self.dec_pos] cargo_dropoff_weight = cargo_emb[:,:,self.dec_other] cargo_pos = cargo_dropoff_pos[:, :self.n_cargo] dropoff_pos = cargo_dropoff_pos[:, self.n_cargo:] cargo_weight = cargo_dropoff_weight[:, :self.n_cargo] ''' def interact_with_env(self, State_Recall): info, done = self.get_previous(State_Recall) current_step = info[:,0,-1] entity_pure_emb = my_view(info[:,0,:-1],shape=[0,-1,5]) action = np.ones(shape=(info.shape[0], info.shape[1], 1)) * -1 entity_pos = entity_pure_emb[:, :, (0,1)] entity_vel = entity_pure_emb[:, :, (2,3)] invader_vel = entity_vel[:, self.invader_uid] invader_pos = entity_pos[:, self.invader_uid] landmark_pos = entity_pos[:, self.landmark_uid] # 为每一个invader设置一个随机目标,当且仅当step == 0 时(episode刚刚开始) self.set_random_target(current_step) n_thread = self.n_thread n_agent = self.n_agent attack_target = np.array(self.attack_target) self.get_action(action, attack_target, invader_pos, invader_vel, landmark_pos, n_agent, n_thread) assert not (action == -1).any() actions_list = [] for i in range(self.n_agent): actions_list.append(action[:, i]) return np.array(actions_list), None @staticmethod @jit(nopython=True) def get_action(action, attack_target, invader_pos, invader_vel, landmark_pos, n_agent, n_thread): def Norm(x): return np.linalg.norm(x) for thread in range(n_thread): for agent in range(n_agent): speed_vec = invader_vel[thread, agent] posit_vec = landmark_pos[thread, attack_target[thread][agent]] - invader_pos[thread, agent] posit_norm = Norm(posit_vec) if posit_norm != 0: posit_vec = posit_vec / posit_norm speed_norm = Norm(speed_vec) if speed_norm != 0: speed_vec = speed_vec / speed_norm up = np.sum(posit_vec * np.array([0, 1])) dn = np.sum(posit_vec * np.array([0, -1])) ri = np.sum(posit_vec * np.array([1, 0])) le = np.sum(posit_vec * np.array([-1, 0])) up_v = np.sum(speed_vec * np.array([0, 1])) dn_v = np.sum(speed_vec * np.array([0, -1])) ri_v = np.sum(speed_vec * np.array([1, 0])) le_v = np.sum(speed_vec * np.array([-1, 0])) dot_product = np.array([up, dn, ri, le]) dot_product_v = np.array([up_v, dn_v, ri_v, le_v]) # situation 1 bool_ = (dot_product > dot_product_v) & (dot_product > 0) direct = bool_.astype(np.int64) if np.sum(direct) != 1: # 向量重合,或者速度为0,不再对比速度方向 direct = np.argmax(dot_product) else: # assert sum(direct) == 1 #检查 direct = np.argmax(direct) # stay_no_acc?[0], left[1], right[2], DOWN[3], Up[4] if direct == 0: # Up action[thread, agent, 0] = 2 elif direct == 1: # DOWN action[thread, agent, 0] = 4 elif direct == 2: # right action[thread, agent, 0] = 1 elif direct == 3: # left action[thread, agent, 0] = 3 def set_random_target(self, step_env_cnt_cnt): for thread, step_env_cnt in enumerate(step_env_cnt_cnt): if step_env_cnt == 0: invader_attack_target = [None] * self.n_agent for i in range(self.n_agent): assigned_target = np.random.randint(low=0, high=self.num_landmarks) invader_attack_target[i] = assigned_target self.attack_target[thread] = np.array(invader_attack_target) class DummyAlgorithmFoundation(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): super().__init__() self.n_agent = n_agent self.n_thread = n_thread self.mcv = mcv self.act_space = space['act_space'] self.obs_space = space['obs_space'] self.n_cargo = GlobalConfig.ScenarioConfig.n_cargo self.worker_uid = GlobalConfig.ScenarioConfig.uid_dictionary['agent_uid'] self.cargo_uid = GlobalConfig.ScenarioConfig.uid_dictionary['entity_uid'] self.dec_pos = GlobalConfig.ScenarioConfig.obs_vec_dictionary['pos'] self.dec_vel = GlobalConfig.ScenarioConfig.obs_vec_dictionary['vel'] self.dec_other = GlobalConfig.ScenarioConfig.obs_vec_dictionary['mass'] self.vec_len = GlobalConfig.ScenarioConfig.obs_vec_length def interact_with_env(self, team_intel): info, done = self.get_previous(team_intel) current_step = info[:,0,-1] object_info = my_view(info[:,0,:-1],[0,-1,self.vec_len]) worker_emb = object_info[:, self.worker_uid] cargo_emb = object_info[:, self.cargo_uid] worker_pos = worker_emb[:,:,self.dec_pos] worker_vel = worker_emb[:,:,self.dec_vel] worker_drag = worker_emb[:,:,self.dec_other] cargo_dropoff_pos = cargo_emb[:,:,self.dec_pos] cargo_dropoff_weight = cargo_emb[:,:,self.dec_other] cargo_pos = cargo_dropoff_pos[:, :self.n_cargo] dropoff_pos = cargo_dropoff_pos[:, self.n_cargo:] cargo_weight = (cargo_dropoff_weight[:, :self.n_cargo]+1)*(self.n_agent/self.n_cargo) worker_target_sel = np.zeros(shape=(self.n_thread,self.n_agent, 1)) for t in range(self.n_thread): p = 0 for c, cw in enumerate(cargo_weight[t]): if cw > self.n_agent: continue for j in range(int(p), int(p+cw)): worker_target_sel[t,j] = c if worker_drag[t,j] < 0 else (c+self.n_cargo) p = p+cw target_pos = np.take_along_axis(cargo_dropoff_pos,worker_target_sel.astype(np.long),1) actions_list = [] act = np.random.randint(low=0,high=5,size=(self.n_thread, self.n_agent, 1)) act = self.dir_to_action(vec=target_pos-worker_pos, vel=worker_vel) for i in range(self.n_agent): actions_list.append(act[:, i]) return actions_list, None def get_previous(self, team_intel): info = copy_clone(team_intel['Latest-Obs']) done = copy_clone(team_intel['Env-Suffered-Reset']) return info, done @staticmethod def dir_to_action(vec, vel): def np_mat3d_normalize_each_line(mat): return mat / np.expand_dims(np.linalg.norm(mat, axis=2) + 1e-16, axis=-1) vec = np_mat3d_normalize_each_line(vec) e_u = np.array([0,1]) e_d = np.array([0,-1]) e_r = np.array([1,0]) e_l = np.array([-1,0]) vel_u = np_mat3d_normalize_each_line(vel + e_u * 0.1) vel_d = np_mat3d_normalize_each_line(vel + e_d * 0.1) vel_r = np_mat3d_normalize_each_line(vel + e_r * 0.1) vel_l = np_mat3d_normalize_each_line(vel + e_l * 0.1) proj_u = (vel_u * vec).sum(-1) proj_d = (vel_d * vec).sum(-1) proj_r = (vel_r * vec).sum(-1) proj_l = (vel_l * vec).sum(-1) _u = ((vec * e_u).sum(-1)>0).astype(np.int) _d = ((vec * e_d).sum(-1)>0).astype(np.int) _r = ((vec * e_r).sum(-1)>0).astype(np.int) _l = ((vec * e_l).sum(-1)>0).astype(np.int) proj_u = proj_u + _u*2 proj_d = proj_d + _d*2 proj_r = proj_r + _r*2 proj_l = proj_l + _l*2 dot_stack = np.stack([proj_u, proj_d, proj_r, proj_l]) direct = np.argmax(dot_stack, 0) action = np.where(direct == 0, 2, 0) action += np.where(direct == 1, 4, 0) action += np.where(direct == 2, 1, 0) action += np.where(direct == 3, 3, 0) return np.expand_dims(action, axis=-1) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/module_evaluation.py ================================================ import copy import random import numpy as np import datetime import time import math #态势评估模块 #接口输入:全局状态信息,包括进攻方无人车各种状态,防守方无人车各种状态以及地图状态 class Evaluation_module(): def __init__(self, critical_points=[[-3000, 700, 0], [700, -3300, 0]]): self.R0 = 750 # 距离态势缩放因子 self.V0 = 0.1 # 速度态势缩放因子 self.phi0 = 1 # 俯仰角态势系数 self.psi0 = 0 # 偏航角态势系数 self.ammo0 = 5 # 载荷态势缩放因子(增函数用) self.heal0 = 5 # 血量态势缩放因子(增函数用) self.AMMO0 = 5 # 载荷态势缩放因子(减函数用) self.HEAL0 = 5 # 血量态势缩放因子(减函数用) # 已知的环境信息 self.critical_points = critical_points # 夺控点位置 # 计算相对速度态势时使用的增函数,输出区间为(0,1) def SigmoidTen(self, x, c): y = np.exp(-x/c) return 1/(1+10*y) # 计算停留时间态势时使用的增函数,输出区间为[0,0.9) def SigmoidNine(self, x, c): y = np.exp(-x/c) return 1/(1+9*y) - 0.1 # 计算无人车相对坐标点的态势,以备最佳点规划使用,输入为点的坐标和此无人车的信息 # 在进行最佳点规划时,会计算待打击目标附近的几个敌方无人车的态势之和,以此来计算最佳点 def UAV2Point(self, p_position, position, velocity, phi, psi, ammo, health): # 相对距离威胁 p_position = np.array(p_position) position = np.array(position) velocity = np.array(velocity) r = p_position - position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist/self.R0) # 相对速度威胁 V = np.dot(r, velocity) / dist # 求速度在连线朝向上的投影 Sv = self.SigmoidTen(V, self.V0) # 俯仰角威胁 Sphi = np.exp(-np.abs(phi - self.phi0)) # 偏航角威胁 Spsi = np.exp(-np.abs(psi - self.psi0)) # 载荷威胁(增函数)载荷为0时威胁为0 # Sammo = self.SigmoidNine(ammo, self.ammo0) Sammo = 1 # 强健度威胁(增函数) 血量为0时威胁为0 Sheal = self.SigmoidNine(health, self.heal0) # 总态势计算 (系数之和不一定为1,每个系数直接在此处修改) # 载荷威胁和强健度威胁此处用乘法,算法需要 [在打击范围内] 寻找总态势最小的点作为坐标点 S_sum = (0.6 * Sr + 0.2 * Sv + 0.2 * Sphi + 0.0 * Spsi) * Sammo * Sheal return S_sum # 计算无人车相对无人车(智能体)的态势(威胁),以作为选取打击对象的依据(选取威胁大的但是优势低的) # 其中a_position等表示智能体无人车的参数,即计算态势时考虑的主体的参数 def UAV2UAV(self, identity, a_position,a_ammo,a_health, position,velocity, phi, psi, ammo, health): a_position = np.array(a_position) position = np.array(position) velocity = np.array(velocity) # 能力比例系数,如我方无人车面对敌方无人车时为1.5, 敌方无人车面对我方无人车时为0.67 # 己方载荷及健康态势计算(增函数) # Mammo = self.SigmoidNine(a_ammo, self.ammo0) Mammo = 1 Mhealth = self.SigmoidNine(a_health, self.heal0) # 对方载荷及健康优势计算(减函数) # Sammo = np.exp(-ammo / self.AMMO0) Sammo = 1 Shealth = np.exp(-health / self.HEAL0) # 进攻方优势计算(选择优势最大的进行打击,若相同则选择距离更近的进行打击) if identity == "offensive": # 相对距离威胁 r = a_position - position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist / self.R0) S_offensive = 10 * Mammo*Mhealth * Sammo*Shealth * Sr # 乘了系数10以致于S不过分小 return S_offensive # 防守方优势计算(优先打击距离夺控点近的无人车) if identity == "defensive": Sr_temp = 0 for critical_point in self.critical_points: r = critical_point - position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist / self.R0) if Sr >= Sr_temp: Sr_temp = Sr S_defensive = 10 * Mammo * Mhealth * Sammo * Shealth * Sr_temp return S_defensive # 计算无人机相对夺控点的态势,以此作为防守方是否进行驱离及选择谁对谁进行驱离的依据 def Drone2Point(self, p_position,p_ts, position, velocity): # 相对距离威胁 p_position = np.array(p_position) position = np.array(position) velocity = np.array(velocity) r = p_position - position dist = np.sqrt(np.sum(np.square(r))) Spr = np.exp(-dist / 1) # 此处的放缩系数采用与无人机参数相关的 # 相对速度威胁 V = np.dot(r, velocity) / dist # 求速度在连线朝向上的投影 Spv = self.SigmoidTen(V, 0.2) # 此处的放缩系数采用与无人机参数相关的 # 停留时间威胁 Spt = self.SigmoidNine(p_ts, 0.5) # 不能接受无人机停留3秒及以上 # 计算综合态势 Sp = Spt + 0.6 * Spr + 0.2 * Spv # 建议驱离阈值: Sp >= 0.5 print(Sp) return Sp def UAV2Point_id(self, attacker_dict, key_point): # 相对距离威胁 #进攻方无人车信息 ally_agent_pos = [attacker_dict['X'], attacker_dict['Y'], attacker_dict['Z']] ally_agent_blood = attacker_dict['blood'] ally_agent_velocityx = attacker_dict['vx'] ally_agent_velocityy = attacker_dict['vy'] ally_agent_ammo = attacker_dict['ammo'] ally_agent_velocity = [ally_agent_velocityx, ally_agent_velocityy, 0] p_position = np.array(key_point) position = np.array(ally_agent_pos) velocity = np.array(ally_agent_velocity) r = p_position - position phi = math.degrees(math.atan2((ally_agent_pos[0] - key_point[0]), (ally_agent_pos[1] - key_point[1]))) ammo = ally_agent_ammo health = ally_agent_blood # 相对速度威胁 dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist/self.R0) # V = np.dot(r, velocity) / dist # 求速度在连线朝向上的投影 #Sv = self.SigmoidTen(V, self.V0) # 偏航角威胁 # Sphi = np.exp(-np.abs(phi - self.phi0)) # 俯仰角威胁 # Spsi = np.exp(-np.abs(psi - self.psi0)) # 载荷威胁(增函数)载荷为0时威胁为0 # Sammo = self.SigmoidNine(ammo, self.ammo0) Sammo = 1 # 强健度威胁(增函数) 血量为0时威胁为0 Sheal = self.SigmoidNine(health, self.heal0) # 总态势计算 (系数之和不一定为1,每个系数直接在此处修改) # 载荷威胁和强健度威胁此处用乘法,算法需要 [在打击范围内] 寻找总态势最小的点作为坐标点 # S_sum = (0.6 * Sr + 0.2 * Sv + 0.2 * Sphi + 0.0 * Spsi) * Sammo * Sheal # S_sum = (0.6 * Sr + 0.2 * Sv + 0.2 * Sphi) * Sammo * Sheal S_sum = Sr * Sammo * Sheal return S_sum def UAV2UAV_id(self, identity, attacker_dict, defender_dict): #进攻方无人车信息 ally_agent_pos = [attacker_dict['X'], attacker_dict['Y'], attacker_dict['Z']] ally_agent_blood = attacker_dict['blood'] ally_agent_ammo = attacker_dict['ammo'] enemy_agent_pos = [defender_dict['X'], defender_dict['Y'], defender_dict['Z']] enemy_agent_blood = defender_dict['blood'] enemy_agent_ammo = defender_dict['ammo'] a_position = np.array(ally_agent_pos) position = np.array(enemy_agent_pos) a_ammo = ally_agent_ammo a_health = ally_agent_blood ammo = enemy_agent_ammo health = enemy_agent_blood # 进攻方优势计算(选择优势最大的进行打击,若相同则选择距离更近的进行打击) if identity == "offensive": # 相对距离威胁 # Mammo = self.SigmoidNine(a_ammo, self.ammo0) Mammo = 1 del_health = 100 - health Mhealth = health / 100 # Mhealth = self.SigmoidNine(a_health, self.heal0) # 对方载荷及健康优势计算(减函数) # Sammo = np.exp(ammo / self.AMMO0) Sammo = 1 # Shealth = np.exp(health / self.HEAL0) del_a_health = 100 - a_health Shealth = a_health / 100 r = a_position - position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist / self.R0) S_offensive = 1 * Mammo * Mhealth * Sammo * Shealth * Sr # 乘了系数10以致于S不过分小 return S_offensive # 防守方优势计算(优先打击距离夺控点近的无人车) if identity == "defensive": Sr_temp = 0 # Mammo = self.SigmoidNine(ammo, self.ammo0) Mammo = 1 # Mhealth = self.SigmoidNine(health, self.heal0) del_health = 100 - health Mhealth = health / 100 # 对方载荷及健康优势计算(减函数) # Sammo = np.exp(-a_ammo / self.AMMO0) Sammo = 1 del_a_health = 100 - a_health Shealth = a_health / 100 for critical_point in self.critical_points: r = critical_point - a_position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist / self.R0) if Sr >= Sr_temp: Sr_temp = Sr S_defensive = Mammo * Mhealth * Sammo * Shealth * Sr_temp return S_defensive def Drone2Point_id(self, drone_data, key_point): drone_pos = [drone_data['X'], drone_data['Y'], drone_data['Z']] drone_blood = drone_data['blood'] drone_velocityx = drone_data['vx'] drone_velocityy = drone_data['vy'] drone_velocity = [drone_velocityx, drone_velocityy, 0] # 相对距离威胁 p_position = np.array(key_point) position = np.array(drone_pos) velocity = np.array(drone_velocity) r = p_position - position dist = np.sqrt(np.sum(np.square(r))) Spr = np.exp(-dist / 1000) # 此处的放缩系数采用与无人机参数相关的 # 相对速度威胁 # V = np.dot(r, velocity) / dist # 求速度在连线朝向上的投影 # Spv = self.SigmoidTen(V, 0.2) # 此处的放缩系数采用与无人机参数相关的 # 停留时间威胁 # Spt = self.SigmoidNine(p_ts, 0.5) # 不能接受无人机停留3秒及以上 # 计算综合态势 # Sp = Spt + 0.6 * Spr + 0.2 * Spv # 建议驱离阈值: Sp >= 0.5 Sp = Spr return Sp #计算防守方无人车相对于进攻方无人车的态势矩阵 #矩阵横轴维度为进攻方无人车数量,纵轴维度为防守方无人车数量 def defend_to_attack(self, self_data, ally_agents_data, enemy_agents_data, key_points): #无人车 all_friend_agents_data = dict(self_data, **ally_agents_data) # 进攻方所有智能体数据 for agent_id, dict_value in all_friend_agents_data.items(): if 'blood' not in dict_value: temp = agent_id all_friend_agents_data.pop(temp) # 剔除无人机数据,只考虑地面无人车平台 #进攻方无人车信息 all_friend_agent_pos = [] all_friend_agent_blood = [] all_friend_agent_velocityx = [] all_friend_agent_velocityy = [] all_friend_agent_ammo = [] all_friend_agent_ID = [] all_friend_amount = 0 for agent_id, dict_value in all_friend_agents_data.items(): all_friend_agent_ID.append(agent_id) #编号接口,形式参照丘老师代码,正确性存疑 # all_friend_agent_ammo.append(dict_value['ammo']) #载荷接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocityx.append(dict_value['velocityx']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocityy.append(dict_value['velocityy']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_blood.append(dict_value['blood']) #血量接口,形式参照丘老师代码,正确性存疑 all_friend_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) #位置接口,参照丘老师代码编写 all_friend_amount += 1 #防守方无人车信息 all_enemy_agent_pos = [] all_enemy_agent_blood = [] all_enemy_agent_velocityx = [] all_enemy_agent_velocityy = [] all_enemy_agent_ammo = [] all_enemy_agent_ID = [] all_enemy_amount = 0 for agent_id, dict_value in enemy_agents_data.items(): all_enemy_agent_ID.append(agent_id) # all_enemy_agent_ammo.append(dict_value['ammo']) all_enemy_agent_velocityx.append(dict_value['velocityx']) all_enemy_agent_velocityy.append(dict_value['velocityy']) #速度接口,形式参照丘老师代码,正确性存疑 all_enemy_agent_blood.append(dict_value['blood']) all_enemy_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) all_enemy_amount += 1 evaluation = np.zeros((all_friend_amount, all_enemy_amount)) for i in range(all_friend_amount): for j in range(all_enemy_amount): yaw = math.degrees(math.atan2((all_enemy_agent_pos[j][0] - all_friend_agent_pos[i][0]), (all_enemy_agent_pos[j][1] - all_friend_agent_pos[i][1]))) # UAV2UAV(self, identity, a_position,a_ammo,a_health, position,velocity,phi,psi,ammo,health) all_enemy_agent_velocity = [all_enemy_agent_velocityx[j], all_enemy_agent_velocityy[j], 0] evaluation[i][j] = self.UAV2UAV("offensive", all_friend_agent_pos[i], 0, all_friend_agent_blood[i], all_enemy_agent_pos[j], all_enemy_agent_velocity, yaw, 0, 0, all_enemy_agent_blood[j]) return evaluation #计算进攻方无人车相对于防守方无人车的态势矩阵 #矩阵横轴维度为防守方无人车数量,纵轴维度为进攻方无人车数量 def attack_to_defend(self, self_data, ally_agents_data, enemy_agents_data, key_points): #无人车 all_friend_agents_data = dict(self_data, **ally_agents_data) # 进攻方所有智能体数据 for agent_id, dict_value in all_friend_agents_data.items(): if 'blood' not in dict_value: temp = agent_id all_friend_agents_data.pop(temp) # 剔除无人机数据,只考虑地面无人车平台 #进攻方无人车信息 all_friend_agent_pos = [] all_friend_agent_blood = [] all_friend_agent_velocityx = [] all_friend_agent_velocityy = [] all_friend_agent_ammo = [] all_friend_agent_ID = [] all_friend_amount = 0 for agent_id, dict_value in all_friend_agents_data.items(): all_friend_agent_ID.append(agent_id) #编号接口,形式参照丘老师代码,正确性存疑 #all_friend_agent_ammo.append(dict_value['ammo']) #载荷接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocityx.append(dict_value['velocityx']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocityy.append(dict_value['velocityy']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_blood.append(dict_value['blood']) #血量接口,形式参照丘老师代码,正确性存疑 all_friend_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) #位置接口,参照丘老师代码编写 all_friend_amount += 1 #防守方无人车信息 all_enemy_agent_pos = [] all_enemy_agent_blood = [] all_enemy_agent_velocityx = [] all_enemy_agent_velocityy = [] all_enemy_agent_ammo = [] all_enemy_agent_ID = [] all_enemy_amount = 0 for agent_id, dict_value in enemy_agents_data.items(): all_enemy_agent_ID.append(agent_id) #all_enemy_agent_ammo.append(dict_value['ammo']) all_enemy_agent_velocityx.append(dict_value['velocityx']) all_enemy_agent_velocityy.append(dict_value['velocityy']) all_enemy_agent_blood.append(dict_value['blood']) all_enemy_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) all_enemy_amount += 1 evaluation = np.zeros((all_enemy_amount, all_friend_amount)) for i in range(all_enemy_amount): for j in range(all_friend_amount): yaw = math.degrees(math.atan2((all_enemy_agent_pos[j][0] - all_friend_agent_pos[i][0]), (all_enemy_agent_pos[j][1] - all_friend_agent_pos[i][1]))) # UAV2UAV(self, identity, a_position,a_ammo,a_health, position,velocity,phi,psi,ammo,health) all_friend_agent_velocity = [all_friend_agent_velocityx[j], all_friend_agent_velocityy[j], 0] evaluation[i][j] = self.UAV2UAV("defensive", all_enemy_agent_pos[i], 0, all_enemy_agent_blood[i], all_friend_agent_pos[j], all_friend_agent_velocity, yaw, 0, 0, all_friend_agent_blood[j]) return evaluation #计算无人机对于夺控点位置的态势矩阵 #矩阵横轴代表夺控点,纵轴代表无人机 def uav_to_defend(self, self_data, ally_agents_data, enemy_agents_data, key_points): all_friend_agents_data = dict(self_data, **ally_agents_data) # 进攻方所有智能体数据 for agent_id, dict_value in all_friend_agents_data.items(): if 'blood' not in dict_value: temp1 = agent_id temp2 = dict_value drone_data = {} drone_data[temp1] = temp2 #无人机信息 drone_pos = [] drone_velocityx = [] drone_velocityy = [] drone_ID = [] drone_amount = 0 for agent_id, dict_value in drone_data.items(): drone_ID.append(agent_id) #编号接口,形式参照丘老师代码,正确性存疑 drone_velocityx.append(dict_value['velocityx']) #速度接口,形式参照丘老师代码,正确性存疑 drone_velocityy.append(dict_value['velocityy']) #速度接口,形式参照丘老师代码,正确性存疑 drone_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) #位置接口,参照丘老师代码编写 drone_amount += 1 #夺控点位置 key_point_amount = 0 key_point_pos = [] for key_point in key_points: key_point_pos.append(key_point) key_point_amount += 1 evaluation = np.zeros((key_point_amount, drone_amount)) for i in range(key_point_amount): for j in range(drone_amount): # Drone2Point(self, p_position,p_ts, position, velocity) # print(self.Drone2Point(key_point_pos[i], 0, drone_pos[j], drone_velocity[j])) drone_velocity = [drone_velocityx[j], drone_velocityy[j], 0] evaluation[i][j] = self.Drone2Point(key_point_pos[i], 0, drone_pos[j], drone_velocity) return evaluation ''' #计算无人车对于周围位置点的态势评估矩阵 # def attack_to_point(self_data, ally_agents_data, enemy_agents_data, key_points): #无人车 all_friend_agents_data = dict(self_data, **ally_agents_data) # 进攻方所有智能体数据 all_friend_agents_data.pop("231") # 剔除无人机数据,只考虑地面无人车平台 #进攻方无人车信息 all_friend_agent_pos = [] all_friend_agent_blood = [] all_friend_agent_velocity = [] all_friend_agent_ammo = [] all_friend_agent_ID = [] all_friend_amount = 0 for agent_id, dict_value in all_friend_agents_data.items(): all_friend_agent_ID.append(dict_value['ID']) #编号接口,形式参照丘老师代码,正确性存疑 all_friend_agent_ammo.append(dict_value['ammo']) #载荷接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocity.append(dict_value['velocity']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_blood.append(dict_value['blood']) #血量接口,形式参照丘老师代码,正确性存疑 all_friend_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) #位置接口,参照丘老师代码编写 all_friend_amount += 1 evaluation = np.zeros((all_friend_amount, all_enemy_amount)) for i in range(all_enemy_amount): for j in range(all_friend_amount): yaw = math.degrees(math.atan2((all_enemy_agent_pos[j][0] - all_friend_agent_pos[i][0]), (all_enemy_agent_pos[j][1] - all_friend_agent_pos[i][1]))) #UAV2UAV(self, identity, a_position,a_ammo,a_health, position,velocity,phi,psi,ammo,health) evaluation[i][j] = self.UAV2UAV("defensive", all_enemy_agent_pos[i], all_enemy_agent_ammo[i], all_enemy_agent_blood[i], all_friend_agent_pos[j], all_friend_agent_velocity[j], yaw, 0, all_friend_agent_ammo[j], all_friend_agent_blood[j]) return evaluation ''' #态势评估主函数 def evaluate(self, self_data, ally_agents_data, enemy_agents_data, key_points): d2a = self.defend_to_attack(self_data, ally_agents_data, enemy_agents_data, key_points) a2d = self.attack_to_defend(self_data, ally_agents_data, enemy_agents_data, key_points) u2d = self.uav_to_defend(self_data, ally_agents_data, enemy_agents_data, key_points) return d2a, a2d, u2d def test(): evaluator = Evaluation_module() # test if __name__ == '__main__': test() ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/red_strategy.py ================================================ import numpy as np import math from scipy.optimize import linear_sum_assignment def defense_combat(self_data, ally_agents_data, enemy_agents_data, key_points, blue_alive, red_alive): # 防守方red小车最大速度 red_car_max_vel = 600 # 进攻方blue小车最大速度 blue_car_max_vel = 600 # 进攻方blue无人机最大速度 blue_drone_max_vel = 600 # 进攻方无人机占领夺控点胜利时间 time_to_win = 2.0 # 驱离载荷作用范围 expel_range = 1200 # 无人车打击距离 fire_dist = 2000 all_enemy_agent_pos = [] for agent_id, dict_value in enemy_agents_data.items(): all_enemy_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) """ all_enemy_agent_yaw = [] for agent_id, dict_value in enemy_agents_data.items(): if agent_id != '231': all_enemy_agent_yaw.append([dict_value['Yaw']]) """ if '311' in self_data.keys(): friend_agents_data = dict(self_data, **ally_agents_data) if '311' in ally_agents_data.keys(): friend_agents_data = dict(ally_agents_data, **self_data) all_friend_agent_pos = [] for agent_id, dict_value in friend_agents_data.items(): all_friend_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) """ all_friend_agent_yaw = [] for agent_id, dict_value in friend_agents_data.items(): all_friend_agent_yaw.append([dict_value['Yaw']]) """ target_location = np.array(key_points) red_car_current_pos = np.array(all_friend_agent_pos) blue_car_current_pos = np.array(all_enemy_agent_pos[0:2]) blue_drone_current_pos = np.array([all_enemy_agent_pos[-1]]) #red_car_current_yaw = np.array(all_friend_agent_yaw) #blue_car_current_yaw = np.array(all_enemy_agent_yaw) """ blue_alive = [] for agent_id, dict_value in enemy_agents_data.items(): if agent_id is not '231': if dict_value['Blood'] == 0: blue_alive.append(False) else: blue_alive.append(True) red_alive = [] for agent_id, dict_value in friend_agents_data.items(): if dict_value['Blood'] == 0: red_alive.append(False) else: red_alive.append(True) """ blue_drone_dist_to_go = np.array([[np.linalg.norm(blue_drone_current_pos[a][:2] - target_location[b][:2]) for b in range(2)] for a in range(1)]) blue_drone_time_to_go = blue_drone_dist_to_go / blue_drone_max_vel if blue_drone_time_to_go[0][0] < 0.2: blue_drone_time_to_go[0][0] = -1000 if blue_drone_time_to_go[0][1] < 0.2: blue_drone_time_to_go[0][1] = -1000 red_car_dist_to_go = np.array( [[np.linalg.norm(red_car_current_pos[a][:2] - target_location[b][:2]) - expel_range for b in range(2)] for a in range(2)]) red_car_time_to_go = red_car_dist_to_go / red_car_max_vel if red_alive[0] is False: red_car_time_to_go[0] = 100000 elif red_alive[1] is False: red_car_time_to_go[1] = 100000 red_car_next_pos = red_car_current_pos if 0: #np.sum(red_alive) == 0: target_id = np.array([1, 1]) #red_car_next_yaw = red_car_current_yaw #red_car_next_fire_yaw = np.array([[0], [0]]) #red_car_fire_flag = [False, False] else: if np.sum(red_alive) == 2 and np.sum(blue_alive) == 2: red_blue_car_relative_dist = np.array( [[np.linalg.norm(red_car_current_pos[a][:2] - blue_car_current_pos[b][:2]) for b in range(2)] for a in range(2)]) _, col_index = linear_sum_assignment(red_blue_car_relative_dist) target_pos = blue_car_current_pos[col_index] target_id = col_index elif np.sum(red_alive) == 1 and np.sum(blue_alive) == 2: if red_alive[0] is True: red_blue_car_relative_dist = np.array( [[np.linalg.norm(red_car_current_pos[0][:2] - blue_car_current_pos[b][:2]) for b in range(2)]]) target_pos = np.array( [blue_car_current_pos[np.argmin(red_blue_car_relative_dist)], red_car_current_pos[1]]) elif red_alive[1] is True: red_blue_car_relative_dist = np.array( [[np.linalg.norm(red_car_current_pos[1][:2] - blue_car_current_pos[b][:2]) for b in range(2)]]) target_pos = np.array( [red_car_current_pos[0], blue_car_current_pos[np.argmin(red_blue_car_relative_dist)]]) target_id = np.array([np.argmin(red_blue_car_relative_dist), np.argmin(red_blue_car_relative_dist)]) elif np.sum(red_alive) == 2 and np.sum(blue_alive) == 1: if blue_alive[0] is True: target_pos = np.array([blue_car_current_pos[0], blue_car_current_pos[0]]) target_id = np.array([0, 0]) elif blue_alive[1] is True: target_pos = np.array([blue_car_current_pos[1], blue_car_current_pos[1]]) target_id = np.array([1, 1]) elif np.sum(red_alive) == 1 and np.sum(blue_alive) == 1: if red_alive[0] is True: if blue_alive[0] is True: target_pos = np.array([blue_car_current_pos[0], red_car_current_pos[1]]) target_id = np.array([0, 0]) elif blue_alive[1] is True: target_pos = np.array([blue_car_current_pos[1], red_car_current_pos[1]]) target_id = np.array([1, 1]) elif red_alive[1] is True: if blue_alive[0] is True: target_pos = np.array([red_car_current_pos[0], blue_car_current_pos[0]]) target_id = np.array([0, 0]) elif blue_alive[1] is True: target_pos = np.array([red_car_current_pos[0], blue_car_current_pos[1]]) target_id = np.array([1, 1]) else: red_car_next_pos = red_car_current_pos target_id = np.array([1, 1]) blue_success_time = blue_drone_time_to_go + time_to_win*0.0 if (blue_success_time - np.min(red_car_time_to_go, axis=0, keepdims=True) >= 0).all() and np.sum( blue_alive) > 0: # 'offense' red_car_next_pos = target_pos flag = 'offense' else: # 'defense' target_defense_index = np.argmin( blue_success_time - np.min(red_car_time_to_go, axis=0, keepdims=True)) if red_alive[0] is True and red_alive[1] is True: #red_car_next_pos = np.array( # [target_location[target_defense_index], target_location[target_defense_index]]) red_car_next_pos[0][:2] = blue_drone_current_pos[0][0:2] red_car_next_pos[1][:2] = blue_drone_current_pos[0][0:2] elif red_alive[0] is True and red_alive[1] is False: #red_car_next_pos = np.array( # [target_location[target_defense_index], red_car_current_pos[1]]) red_car_next_pos[0][:2] = blue_drone_current_pos[0][0:2] elif red_alive[0] is False and red_alive[1] is True: #red_car_next_pos = np.array( # [red_car_current_pos[0], target_location[target_defense_index]]) red_car_next_pos[1][:2] = blue_drone_current_pos[0][0:2] flag = 'defense' """ agent_yaw = [0, 0] fire_yaw = [0, 0] fire_flag = [False, False] for index in range(2): relative_dist = np.linalg.norm(red_car_current_pos[index] - target_pos[index]) relative_yaw = math.degrees(math.atan2((target_pos[index][1] - red_car_current_pos[index][1]), (target_pos[index][0] - red_car_current_pos[index][0]))) if red_alive[index] is True: if relative_dist < fire_dist: red_car_next_pos[index] = red_car_current_pos[index] if ((relative_yaw < red_car_current_yaw[index] - 90) or ( relative_yaw > red_car_current_yaw[index] + 90)): # 在车的基础上,旋转打击载荷 agent_yaw[index] = relative_yaw # 直接旋转车朝向 fire_yaw[index] = 0 else: agent_yaw[index] = red_car_current_yaw[index] fire_yaw[index] = relative_yaw - red_car_current_yaw[index] fire_flag[index] = True else: fire_flag[index] = False agent_yaw[index] = red_car_current_yaw[index] fire_yaw[index] = 0 else: fire_flag[index] = False agent_yaw[index] = red_car_current_yaw[index] fire_yaw[index] = 0 red_car_next_yaw = np.array(agent_yaw) red_car_next_fire_yaw = np.array(fire_yaw) """ if '311' in self_data.keys(): return red_car_next_pos[0], target_id[0], flag #return [red_car_next_pos[0], red_car_next_yaw [0], red_car_next_fire_yaw[0], fire_flag[0]] else: return red_car_next_pos[1], target_id[1], flag #return [red_car_next_pos[1], red_car_next_yaw [1], red_car_next_fire_yaw[1], fire_flag[1]] if __name__ == '__main__': # 211和221是进攻方blue小车, 231是进攻方blue无人机 # 311和321是防守方red小车 # 当前要决策的防守方red小车的信息 self_data = {'321': {'X': -3.0, 'Y': 2.5, 'Z': 0, 'Yaw': 10, 'Blood': 100}} # 其余防守方red小车的信息 ally_agents_data = {'311': {'X': 0.0, 'Y': 1.5, 'Z': 0, 'Yaw': 20, 'Blood': 100}} # 进攻方blue小车和无人机信息 enemy_agents_data = {'211': {'X': 1.5, 'Y': -2.0, 'Z': 0, 'Yaw': 30, 'Blood': 100}, '221': {'X': -2.5, 'Y': -2.5, 'Z': 0, 'Yaw': 40, 'Blood': 100}, '231': {'X': 0.7, 'Y': 3.3, 'Z': 1.5, 'Yaw': 0}} # 夺控点信息 key_points = [[0.7, 3.3, 0], [-3.0, -0.7, 0]] # 存活状态 blue_alive = [True, True] red_alive = [True, True] target_position, target_id, flag = defense_combat(self_data, ally_agents_data, enemy_agents_data, key_points, blue_alive, red_alive) print(target_position) print('\r\n') print(target_id) print('\r\n') print(flag) ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/stance.py ================================================ import copy import random import numpy as np import datetime import time import math #态势评估模块 #接口输入:全局状态信息,包括进攻方无人车各种状态,防守方无人车各种状态以及地图状态 class Evaluation_module(): def __init__(self, critical_points=[[-3, -0.7, 0], [0.7, 3.3, 0]]): self.R0 = 1.5 * 100 # 距离态势缩放因子 self.V0 = 0.1 # 速度态势缩放因子 self.phi0 = np.pi / 4 # 俯仰角态势系数 self.psi0 = 0 # 偏航角态势系数 self.ammo0 = 0.15 # 载荷态势缩放因子(增函数用) self.heal0 = 0.15 # 血量态势缩放因子(增函数用) self.AMMO0 = 0.5 # 载荷态势缩放因子(减函数用) self.HEAL0 = 0.5 # 血量态势缩放因子(减函数用) # 已知的环境信息 self.critical_points = critical_points # 夺控点位置 # 计算相对速度态势时使用的增函数,输出区间为(0,1) def SigmoidTen(self, x, c): y = np.exp(-x/c) return 1/(1+10*y) # 计算停留时间态势时使用的增函数,输出区间为[0,0.9) def SigmoidNine(self, x, c): y = np.exp(-x/c) return 1/(1+9*y) - 0.1 # 计算无人车相对坐标点的态势,以备最佳点规划使用,输入为点的坐标和此无人车的信息 # 在进行最佳点规划时,会计算待打击目标附近的几个敌方无人车的态势之和,以此来计算最佳点 def UAV2Point(self, p_position, position, velocity, phi, psi, ammo, health): # 相对距离威胁 p_position = np.array(p_position) position = np.array(position) velocity = np.array(velocity) r = p_position - position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist/self.R0) # 相对速度威胁 V = np.dot(r, velocity) / dist # 求速度在连线朝向上的投影 Sv = self.SigmoidTen(V, self.V0) # 俯仰角威胁 Sphi = np.exp(-np.abs(phi - self.phi0)) # 偏航角威胁 Spsi = np.exp(-np.abs(psi - self.psi0)) # 载荷威胁(增函数)载荷为0时威胁为0 # Sammo = self.SigmoidNine(ammo, self.ammo0) Sammo = 1 # 强健度威胁(增函数) 血量为0时威胁为0 Sheal = self.SigmoidNine(health, self.heal0) # 总态势计算 (系数之和不一定为1,每个系数直接在此处修改) # 载荷威胁和强健度威胁此处用乘法,算法需要 [在打击范围内] 寻找总态势最小的点作为坐标点 S_sum = (0.6 * Sr + 0.2 * Sv + 0.2 * Sphi + 0.0 * Spsi) * Sammo * Sheal return S_sum # 计算无人车相对无人车(智能体)的态势(威胁),以作为选取打击对象的依据(选取威胁大的但是优势低的) # 其中a_position等表示智能体无人车的参数,即计算态势时考虑的主体的参数 def UAV2UAV(self, identity, a_position,a_ammo,a_health, position,velocity, phi, psi, ammo, health): a_position = np.array(a_position) position = np.array(position) velocity = np.array(velocity) # 能力比例系数,如我方无人车面对敌方无人车时为1.5, 敌方无人车面对我方无人车时为0.67 # 己方载荷及健康态势计算(增函数) # Mammo = self.SigmoidNine(a_ammo, self.ammo0) Mammo = 1 Mhealth = self.SigmoidNine(a_health, self.heal0) # 对方载荷及健康优势计算(减函数) # Sammo = np.exp(-ammo / self.AMMO0) Sammo = 1 Shealth = np.exp(-health / self.HEAL0) # 进攻方优势计算(选择优势最大的进行打击,若相同则选择距离更近的进行打击) if identity == "offensive": # 相对距离威胁 r = a_position - position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist / self.R0) S_offensive = 10 * Mammo*Mhealth * Sammo*Shealth * Sr # 乘了系数10以致于S不过分小 return S_offensive # 防守方优势计算(优先打击距离夺控点近的无人车) if identity == "defensive": Sr_temp = 0 for critical_point in self.critical_points: r = critical_point - position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist / self.R0) if Sr >= Sr_temp: Sr_temp = Sr S_defensive = 10 * Mammo * Mhealth * Sammo * Shealth * Sr_temp return S_defensive # 计算无人机相对夺控点的态势,以此作为防守方是否进行驱离及选择谁对谁进行驱离的依据 def Drone2Point(self, p_position,p_ts, position, velocity): # 相对距离威胁 p_position = np.array(p_position) position = np.array(position) velocity = np.array(velocity) r = p_position - position dist = np.sqrt(np.sum(np.square(r))) Spr = np.exp(-dist / 1) # 此处的放缩系数采用与无人机参数相关的 # 相对速度威胁 V = np.dot(r, velocity) / dist # 求速度在连线朝向上的投影 Spv = self.SigmoidTen(V, 0.2) # 此处的放缩系数采用与无人机参数相关的 # 停留时间威胁 Spt = self.SigmoidNine(p_ts, 0.5) # 不能接受无人机停留3秒及以上 # 计算综合态势 Sp = Spt + 0.6 * Spr + 0.2 * Spv # 建议驱离阈值: Sp >= 0.5 print(Sp) return Sp def UAV2Point_id(self, attacker_dict, key_point): # 相对距离威胁 #进攻方无人车信息 ally_agent_pos = [attacker_dict['X'], attacker_dict['Y'], attacker_dict['Z']] ally_agent_blood = attacker_dict['blood'] ally_agent_velocityx = attacker_dict['vx'] ally_agent_velocityy = attacker_dict['vy'] ally_agent_ammo = attacker_dict['ammo'] ally_agent_velocity = [ally_agent_velocityx, ally_agent_velocityy, 0] p_position = np.array(key_point) position = np.array(ally_agent_pos) velocity = np.array(ally_agent_velocity) r = p_position - position phi = math.degrees(math.atan2((ally_agent_pos[0] - key_point[0]), (ally_agent_pos[1] - key_point[1]))) ammo = ally_agent_ammo health = ally_agent_blood # 相对速度威胁 dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist/self.R0) V = np.dot(r, velocity) / dist # 求速度在连线朝向上的投影 Sv = self.SigmoidTen(V, self.V0) # 偏航角威胁 Sphi = np.exp(-np.abs(phi - self.phi0)) # 俯仰角威胁 # Spsi = np.exp(-np.abs(psi - self.psi0)) # 载荷威胁(增函数)载荷为0时威胁为0 Sammo = self.SigmoidNine(ammo, self.ammo0) # 强健度威胁(增函数) 血量为0时威胁为0 Sheal = self.SigmoidNine(health, self.heal0) # 总态势计算 (系数之和不一定为1,每个系数直接在此处修改) # 载荷威胁和强健度威胁此处用乘法,算法需要 [在打击范围内] 寻找总态势最小的点作为坐标点 # S_sum = (0.6 * Sr + 0.2 * Sv + 0.2 * Sphi + 0.0 * Spsi) * Sammo * Sheal S_sum = (0.6 * Sr + 0.2 * Sv + 0.2 * Sphi) * Sammo * Sheal return S_sum def UAV2UAV_id(self, identity, attacker_dict, defender_dict): #进攻方无人车信息 ally_agent_pos = [attacker_dict['X'], attacker_dict['Y'], attacker_dict['Z']] ally_agent_blood = attacker_dict['blood'] # ally_agent_ammo = attacker_dict['ammo'] enemy_agent_pos = [defender_dict['X'], defender_dict['Y'], defender_dict['Z']] enemy_agent_blood = defender_dict['blood'] # enemy_agent_ammo = defender_dict['ammo'] a_position = np.array(ally_agent_pos) position = np.array(enemy_agent_pos) # a_ammo = ally_agent_ammo a_health = ally_agent_blood # ammo = enemy_agent_ammo health = enemy_agent_blood # 进攻方优势计算(选择优势最大的进行打击,若相同则选择距离更近的进行打击) if identity == "offensive": # 相对距离威胁 # Mammo = self.SigmoidNine(a_ammo, self.ammo0) # Mhealth = np.exp(a_health / self.heal0) Mhealth = np.exp(a_health/100) # 对方载荷及健康优势计算(减函数) # Sammo = np.exp(-ammo / self.AMMO0) # Shealth = np.exp(-health / self.HEAL0) Shealth = np.exp(-health/100) r = a_position - position dist = np.sqrt(np.sum(np.square(r))) # Sr = np.exp(-dist / self.R0) Sr = np.exp(-dist / 1000) # S_offensive = 10 * Mammo*Mhealth * Sammo*Shealth * Sr # 乘了系数10以致于S不过分小 S_offensive = 0.1 * Mhealth * Shealth * Sr # 乘了系数10以致于S不过分小 return S_offensive # 防守方优势计算(优先打击距离夺控点近的无人车) if identity == "defensive": Sr_temp = 0 Mammo = self.SigmoidNine(ammo, self.ammo0) Mhealth = self.SigmoidNine(health, self.heal0) # 对方载荷及健康优势计算(减函数) Sammo = np.exp(-a_ammo / self.AMMO0) Shealth = np.exp(-a_health / self.HEAL0) for critical_point in self.critical_points: r = critical_point - position dist = np.sqrt(np.sum(np.square(r))) Sr = np.exp(-dist / self.R0) if Sr >= Sr_temp: Sr_temp = Sr S_defensive = 10 * Mammo * Mhealth * Sammo * Shealth * Sr_temp return S_defensive def Drone2Point_id(self, drone_data, key_point): drone_pos = [drone_data['X'], drone_data['Y']] # drone_blood = drone_data['blood'] # drone_velocityx = drone_data['vx'] # drone_velocityy = drone_data['vy'] # drone_velocity = [drone_velocityx, drone_velocityy, 0] # 相对距离威胁 p_position = np.array(key_point) position = np.array(drone_pos) # velocity = np.array(drone_velocity) r = p_position - position dist = np.sqrt(np.sum(np.square(r))) Spr = np.exp(-dist / 100) # 此处的放缩系数采用与无人机参数相关的 # 相对速度威胁 # V = np.dot(r, velocity) / dist # 求速度在连线朝向上的投影 # Spv = self.SigmoidTen(V, 0.2) # 此处的放缩系数采用与无人机参数相关的 # 停留时间威胁 # Spt = self.SigmoidNine(p_ts, 0.5) # 不能接受无人机停留3秒及以上 # 计算综合态势 # Sp = Spt + 0.6 * Spr + 0.2 * Spv # 建议驱离阈值: Sp >= 0.5 # Sp = 0.6 * Spr + 0.4 * Spv Sp = Spr return Sp #计算防守方无人车相对于进攻方无人车的态势矩阵 #矩阵横轴维度为进攻方无人车数量,纵轴维度为防守方无人车数量 def defend_to_attack(self, self_data, ally_agents_data, enemy_agents_data, key_points): #无人车 all_friend_agents_data = dict(self_data, **ally_agents_data) # 进攻方所有智能体数据 for agent_id, dict_value in all_friend_agents_data.items(): if 'blood' not in dict_value: temp = agent_id all_friend_agents_data.pop(temp) # 剔除无人机数据,只考虑地面无人车平台 #进攻方无人车信息 all_friend_agent_pos = [] all_friend_agent_blood = [] all_friend_agent_velocityx = [] all_friend_agent_velocityy = [] all_friend_agent_ammo = [] all_friend_agent_ID = [] all_friend_amount = 0 for agent_id, dict_value in all_friend_agents_data.items(): all_friend_agent_ID.append(agent_id) #编号接口,形式参照丘老师代码,正确性存疑 # all_friend_agent_ammo.append(dict_value['ammo']) #载荷接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocityx.append(dict_value['velocityx']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocityy.append(dict_value['velocityy']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_blood.append(dict_value['blood']) #血量接口,形式参照丘老师代码,正确性存疑 all_friend_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) #位置接口,参照丘老师代码编写 all_friend_amount += 1 #防守方无人车信息 all_enemy_agent_pos = [] all_enemy_agent_blood = [] all_enemy_agent_velocityx = [] all_enemy_agent_velocityy = [] all_enemy_agent_ammo = [] all_enemy_agent_ID = [] all_enemy_amount = 0 for agent_id, dict_value in enemy_agents_data.items(): all_enemy_agent_ID.append(agent_id) # all_enemy_agent_ammo.append(dict_value['ammo']) all_enemy_agent_velocityx.append(dict_value['velocityx']) all_enemy_agent_velocityy.append(dict_value['velocityy']) #速度接口,形式参照丘老师代码,正确性存疑 all_enemy_agent_blood.append(dict_value['blood']) all_enemy_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) all_enemy_amount += 1 evaluation = np.zeros((all_friend_amount, all_enemy_amount)) for i in range(all_friend_amount): for j in range(all_enemy_amount): yaw = math.degrees(math.atan2((all_enemy_agent_pos[j][0] - all_friend_agent_pos[i][0]), (all_enemy_agent_pos[j][1] - all_friend_agent_pos[i][1]))) # UAV2UAV(self, identity, a_position,a_ammo,a_health, position,velocity,phi,psi,ammo,health) all_enemy_agent_velocity = [all_enemy_agent_velocityx[j], all_enemy_agent_velocityy[j], 0] evaluation[i][j] = self.UAV2UAV("offensive", all_friend_agent_pos[i], 0, all_friend_agent_blood[i], all_enemy_agent_pos[j], all_enemy_agent_velocity, yaw, 0, 0, all_enemy_agent_blood[j]) return evaluation #计算进攻方无人车相对于防守方无人车的态势矩阵 #矩阵横轴维度为防守方无人车数量,纵轴维度为进攻方无人车数量 def attack_to_defend(self, self_data, ally_agents_data, enemy_agents_data, key_points): #无人车 all_friend_agents_data = dict(self_data, **ally_agents_data) # 进攻方所有智能体数据 for agent_id, dict_value in all_friend_agents_data.items(): if 'blood' not in dict_value: temp = agent_id all_friend_agents_data.pop(temp) # 剔除无人机数据,只考虑地面无人车平台 #进攻方无人车信息 all_friend_agent_pos = [] all_friend_agent_blood = [] all_friend_agent_velocityx = [] all_friend_agent_velocityy = [] all_friend_agent_ammo = [] all_friend_agent_ID = [] all_friend_amount = 0 for agent_id, dict_value in all_friend_agents_data.items(): all_friend_agent_ID.append(agent_id) #编号接口,形式参照丘老师代码,正确性存疑 #all_friend_agent_ammo.append(dict_value['ammo']) #载荷接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocityx.append(dict_value['velocityx']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocityy.append(dict_value['velocityy']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_blood.append(dict_value['blood']) #血量接口,形式参照丘老师代码,正确性存疑 all_friend_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) #位置接口,参照丘老师代码编写 all_friend_amount += 1 #防守方无人车信息 all_enemy_agent_pos = [] all_enemy_agent_blood = [] all_enemy_agent_velocityx = [] all_enemy_agent_velocityy = [] all_enemy_agent_ammo = [] all_enemy_agent_ID = [] all_enemy_amount = 0 for agent_id, dict_value in enemy_agents_data.items(): all_enemy_agent_ID.append(agent_id) #all_enemy_agent_ammo.append(dict_value['ammo']) all_enemy_agent_velocityx.append(dict_value['velocityx']) all_enemy_agent_velocityy.append(dict_value['velocityy']) all_enemy_agent_blood.append(dict_value['blood']) all_enemy_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) all_enemy_amount += 1 evaluation = np.zeros((all_enemy_amount, all_friend_amount)) for i in range(all_enemy_amount): for j in range(all_friend_amount): yaw = math.degrees(math.atan2((all_enemy_agent_pos[j][0] - all_friend_agent_pos[i][0]), (all_enemy_agent_pos[j][1] - all_friend_agent_pos[i][1]))) # UAV2UAV(self, identity, a_position,a_ammo,a_health, position,velocity,phi,psi,ammo,health) all_friend_agent_velocity = [all_friend_agent_velocityx[j], all_friend_agent_velocityy[j], 0] evaluation[i][j] = self.UAV2UAV("defensive", all_enemy_agent_pos[i], 0, all_enemy_agent_blood[i], all_friend_agent_pos[j], all_friend_agent_velocity, yaw, 0, 0, all_friend_agent_blood[j]) return evaluation #计算无人机对于夺控点位置的态势矩阵 #矩阵横轴代表夺控点,纵轴代表无人机 def uav_to_defend(self, self_data, ally_agents_data, enemy_agents_data, key_points): all_friend_agents_data = dict(self_data, **ally_agents_data) # 进攻方所有智能体数据 for agent_id, dict_value in all_friend_agents_data.items(): if 'blood' not in dict_value: temp1 = agent_id temp2 = dict_value drone_data = {} drone_data[temp1] = temp2 #无人机信息 drone_pos = [] drone_velocityx = [] drone_velocityy = [] drone_ID = [] drone_amount = 0 for agent_id, dict_value in drone_data.items(): drone_ID.append(agent_id) #编号接口,形式参照丘老师代码,正确性存疑 drone_velocityx.append(dict_value['velocityx']) #速度接口,形式参照丘老师代码,正确性存疑 drone_velocityy.append(dict_value['velocityy']) #速度接口,形式参照丘老师代码,正确性存疑 drone_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) #位置接口,参照丘老师代码编写 drone_amount += 1 #夺控点位置 key_point_amount = 0 key_point_pos = [] for key_point in key_points: key_point_pos.append(key_point) key_point_amount += 1 evaluation = np.zeros((key_point_amount, drone_amount)) for i in range(key_point_amount): for j in range(drone_amount): # Drone2Point(self, p_position,p_ts, position, velocity) # print(self.Drone2Point(key_point_pos[i], 0, drone_pos[j], drone_velocity[j])) drone_velocity = [drone_velocityx[j], drone_velocityy[j], 0] evaluation[i][j] = self.Drone2Point(key_point_pos[i], 0, drone_pos[j], drone_velocity) return evaluation ''' #计算无人车对于周围位置点的态势评估矩阵 # def attack_to_point(self_data, ally_agents_data, enemy_agents_data, key_points): #无人车 all_friend_agents_data = dict(self_data, **ally_agents_data) # 进攻方所有智能体数据 all_friend_agents_data.pop("231") # 剔除无人机数据,只考虑地面无人车平台 #进攻方无人车信息 all_friend_agent_pos = [] all_friend_agent_blood = [] all_friend_agent_velocity = [] all_friend_agent_ammo = [] all_friend_agent_ID = [] all_friend_amount = 0 for agent_id, dict_value in all_friend_agents_data.items(): all_friend_agent_ID.append(dict_value['ID']) #编号接口,形式参照丘老师代码,正确性存疑 all_friend_agent_ammo.append(dict_value['ammo']) #载荷接口,形式参照丘老师代码,正确性存疑 all_friend_agent_velocity.append(dict_value['velocity']) #速度接口,形式参照丘老师代码,正确性存疑 all_friend_agent_blood.append(dict_value['blood']) #血量接口,形式参照丘老师代码,正确性存疑 all_friend_agent_pos.append([dict_value['X'], dict_value['Y'], dict_value['Z']]) #位置接口,参照丘老师代码编写 all_friend_amount += 1 evaluation = np.zeros((all_friend_amount, all_enemy_amount)) for i in range(all_enemy_amount): for j in range(all_friend_amount): yaw = math.degrees(math.atan2((all_enemy_agent_pos[j][0] - all_friend_agent_pos[i][0]), (all_enemy_agent_pos[j][1] - all_friend_agent_pos[i][1]))) #UAV2UAV(self, identity, a_position,a_ammo,a_health, position,velocity,phi,psi,ammo,health) evaluation[i][j] = self.UAV2UAV("defensive", all_enemy_agent_pos[i], all_enemy_agent_ammo[i], all_enemy_agent_blood[i], all_friend_agent_pos[j], all_friend_agent_velocity[j], yaw, 0, all_friend_agent_ammo[j], all_friend_agent_blood[j]) return evaluation ''' #态势评估主函数 def evaluate(self, self_data, ally_agents_data, enemy_agents_data, key_points): d2a = self.defend_to_attack(self_data, ally_agents_data, enemy_agents_data, key_points) a2d = self.attack_to_defend(self_data, ally_agents_data, enemy_agents_data, key_points) u2d = self.uav_to_defend(self_data, ally_agents_data, enemy_agents_data, key_points) return d2a, a2d, u2d ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/uhmap_bb.py ================================================ import copy from math import sqrt import numpy as np from MISSION.uhmap.actset_lookup import encode_action_as_digits from config import GlobalConfig class DummyAlgConfig(): reserve = "" class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.ScenarioConfig = GlobalConfig.ScenarioConfig self.attack_order = {} def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmT2(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue AirCarrier = State_Recall['Latest-Team-Info'][thread]['dataArr'][AirCarrierUID] if AirCarrier['agentAlive']: assert 'RLA_UAV' in AirCarrier['type'] landmarks = State_Recall['Latest-Team-Info'][thread]['dataGlobal']['keyObjArr'] squredis = lambda a,b: sqrt( (a['agentLocation']['x']-b['location']['x'])**2 + (a['agentLocation']['y']-b['location']['y'])**2 + (a['agentLocation']['z']-b['location']['z'])**2 ) AirCarrirSquareDisToEachLandmark = [squredis(AirCarrier, landmark) for landmark in landmarks] nearLandmark = np.argmin(AirCarrirSquareDisToEachLandmark) pos_lm = np.array([ landmarks[nearLandmark]['location']['x'], landmarks[nearLandmark]['location']['y'], landmarks[nearLandmark]['location']['z'], ]) pos_ac_proj = np.array([ AirCarrier['agentLocation']['x'], AirCarrier['agentLocation']['y'], landmarks[nearLandmark]['location']['z'], ]) unit_2ac_prj = (pos_ac_proj - pos_lm) / np.linalg.norm(pos_ac_proj - pos_lm) p = unit_2ac_prj*400 + pos_lm actions[thread, :] = encode_action_as_digits('PatrolMoving', 'N/A', x=p[0], y=p[1], z=p[2], UID=None, T=None, T_index=None) else: actions[thread, :] = encode_action_as_digits('N/A', 'N/A', x=None, y=None, z=None, UID=None, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmT1(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) for thread in range(self.n_thread): landmarks = State_Recall['Latest-Team-Info'][thread]['dataGlobal']['keyObjArr'] px = landmarks[0]['location']['x'] py = landmarks[0]['location']['y'] for a in range(self.n_agent): if not State_Recall['Latest-Team-Info'][thread]['dataArr'][a]['agentAlive']: continue pz = State_Recall['Latest-Team-Info'][thread]['dataArr'][a]['agentLocation']['z'] actions[thread, a] = encode_action_as_digits('SpecificMoving', 'N/A', x=px, y=py, z=pz, UID=None, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmIdle(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue # AirCarrier = State_Recall['Latest-Team-Info'][thread]['dataArr'][AirCarrierUID] # if AirCarrier['agentAlive']: # assert 'RLA_UAV' in AirCarrier['type'] # landmarks = State_Recall['Latest-Team-Info'][thread]['dataGlobal']['keyObjArr'] # squredis = lambda a,b: sqrt( # (a['agentLocation']['x']-b['location']['x'])**2 + # (a['agentLocation']['y']-b['location']['y'])**2 + # (a['agentLocation']['z']-b['location']['z'])**2 ) # AirCarrirSquareDisToEachLandmark = [squredis(AirCarrier, landmark) for landmark in landmarks] # nearLandmark = np.argmin(AirCarrirSquareDisToEachLandmark) # px = landmarks[nearLandmark]['location']['x'] # py = landmarks[nearLandmark]['location']['y'] # pz = landmarks[nearLandmark]['location']['z'] # actions[thread, :] = encode_action_as_digits('PatrolMoving', 'N/A', x=px, y=py, z=pz, UID=None, T=None, T_index=None) # else: # actions[thread, :] = encode_action_as_digits('N/A', 'N/A', x=None, y=None, z=None, UID=None, T=None, T_index=None) if State_Recall['Env-Suffered-Reset'][thread]: actions[thread, :] = encode_action_as_digits('N/A', 'N/A', x=None, y=None, z=None, UID=None, T=None, T_index=None) else: actions[thread, :] = encode_action_as_digits('Idle', 'StaticAlert', x=None, y=None, z=None, UID=None, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/uhmap_island.py ================================================ from cmath import isinf, pi from turtle import done import numpy as np import math from MISSION.uhmap.actionset_v3 import strActionToDigits, ActDigitLen from config import GlobalConfig class DummyAlgConfig(): reserve = "" class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.team = team self.ScenarioConfig = GlobalConfig.ScenarioConfig self.attack_order = {} self.team_agent_uid = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[team] self.demo_type = GlobalConfig.ScenarioConfig.DemoType if self.demo_type == 'AirShow' or 'AirAttack': self.phase = 1 if self.demo_type == 'AirAttack': self.TargetPosition = [] def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmIdle(DummyAlgorithmBase): ''' 福建省东北角大致方位 (-17500,-19500) 福建省西南角大致方位 (-22500,-5000) 注意:0°对应x轴正方向,90°对应y轴正方向 plane_rotaion = [方位角,俯仰角,翻滚角] ''' def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, ActDigitLen)) for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue # 此处代码仅做demo用 act_dic = ActionDictionary if self.demo_type == "AirShow": ''' 飞行场景设计: 初始位置: x/y按照索引设定, 初始高度一致为1000米, 初始俯仰角为0, 初始滚转角为0, 初始偏航角按照索引设定 1.上升到统一高度5000m, 欧拉角全部设为0 2.方位角先变到-90°, 后变到0°, 在此期间高度上升至10000m 3.方位角变到180° 4.方位角先变到-90°, 后变到180°, 在此期间高度下降至5000m 5.方位角变到0° 6.重复2~5动作 ''' for id in range(self.n_agent): cruise_height = 15000 cruise_speed = 600 plane_location = State_Recall['Latest-Team-Info'][thread]['dataArr'][id]['agentLocationArr'] plane_rotaion = State_Recall['Latest-Team-Info'][thread]['dataArr'][id]['agentRotationArr'] if self.phase == 1: if np.abs(0 - plane_rotaion[1]) < 1: actions[thread, id] = act_dic.select_act('PlaneAgent', 2) print("Change Height!") else: actions[thread, id] = act_dic.select_act('PlaneAgent', 6) print("Change Direction!") if np.abs(5000 - plane_location[2]) < 0.1 and np.abs(0 - plane_rotaion[0]) < 1: self.phase += 1 print("Stage1 Done") elif self.phase == 2: print("Stage2!") if np.abs(0 - plane_rotaion[1]) < 1: actions[thread, id] = act_dic.select_act('PlaneAgent', 3) else: actions[thread, id] = act_dic.select_act('PlaneAgent', 12) if np.abs(-90 - plane_rotaion[0]) < 1: self.phase += 0.5 elif self.phase == 2.5: actions[thread, id] = act_dic.select_act('PlaneAgent', 6) if np.abs(10000 - plane_location[2]) < 0.1 and np.abs(0 - plane_rotaion[0]) < 1: self.phase += 0.5 print("Stage2 Done") elif self.phase == 3: actions[thread, id] = act_dic.select_act('PlaneAgent', 10) if np.abs(180 - plane_rotaion[0]) < 1: self.phase += 1 print("Stage3 Done") elif self.phase == 4: print("Stage4!") if np.abs(0 - plane_rotaion[1]) < 1: actions[thread, id] = act_dic.select_act('PlaneAgent', 2) else: actions[thread, id] = act_dic.select_act('PlaneAgent', 12) if np.abs(-90 - plane_rotaion[0]) < 1: self.phase += 0.5 elif self.phase == 4.5: print(self.phase) actions[thread, id] = act_dic.select_act('PlaneAgent', 10) if np.abs(5000 - plane_location[2]) < 10 and np.abs(180 - plane_rotaion[0]) < 1: self.phase += 0.5 print("Stage4 Done") elif self.phase == 5: actions[thread, id] = act_dic.select_act('PlaneAgent', 6) if np.abs(0 - plane_rotaion[0]) < 1: self.phase = 2 print("Stage5 Done") elif self.demo_type == "AirAttack": ''' 全流程demo演示: 1. 飞机一直跟踪至目标方位,并上升到巡航高度(暂定15000m) 2. 在开始上升后,开始加速动作,一直到巡航速度(暂定600m/s) 3. 到达巡航高度后,飞机进行巡航,慢慢抵达目标 4. 接近目标后,降低高度到打击高度(暂定5000m) (预计打击半径暂定为50,000m) 5. 到达指定高度后,始终将目标方向作为期望航迹方位角方向 6. 到达目标后,与目标进行交互打击,若目标被摧毁则在原地盘旋 7. 打击后汇合 ''' # 获取目标坐标及参数设置 if self.phase <= 1: for id in range(100,105): self.TargetPosition.append(State_Recall['Latest-Team-Info'][thread]['dataArr'][id]['agentLocationArr']) cruise_height = 15000 cruise_speed = 600 attack_height = 5000 ready_radius = 50000 # 执行飞行器脚本 for id in range(self.n_agent - 5): if State_Recall['Latest-Team-Info'][thread]['dataArr'][id]['agentAlive']: plane_location = State_Recall['Latest-Team-Info'][thread]['dataArr'][id]['agentLocationArr'] plane_rotaion = State_Recall['Latest-Team-Info'][thread]['dataArr'][id]['agentRotationArr'] # 分配目标 index = math.floor(id / (self.n_agent - 5) * 5) target_location = np.array((self.TargetPosition[index])) delta_location = target_location - plane_location target_pitch = self.DeltaLocation2Angle(delta_location[0], delta_location[1]) # 1. if self.phase == 1: if np.abs(cruise_height - plane_location[2]) < 5: self.phase += 1 print("Stage1 Done") elif np.abs(0 - plane_rotaion[1]) < 1: action_num = self.TargetHeight2Action(cruise_height) actions[thread, id] = act_dic.select_act('PlaneAgent', action_num) print("Change Height to 15000m!") else: # 跟踪目标位置 delta_location = target_location - plane_location target_pitch = self.DeltaLocation2Angle(delta_location[0], delta_location[1]) action_num = self.TargetAngle2Action(target_pitch) actions[thread, id] = act_dic.select_act('PlaneAgent', action_num) # 2. if self.phase == 2: speed = float(State_Recall['Latest-Team-Info'][thread]['dataArr'][id]['rSVD1']) if np.abs(cruise_speed - speed) < 1: self.phase += 1 print("Stage2 Done") else: actions[thread, id] = act_dic.select_act('PlaneAgent', 14) # 3. if self.phase == 3: delta_location = target_location - plane_location if np.sqrt(np.sum(np.square((delta_location[0], delta_location[1])))) <= ready_radius: self.phase += 1 print("Stage3 Done") else: # 跟踪目标位置 delta_location = target_location - plane_location target_pitch = self.DeltaLocation2Angle(delta_location[0], delta_location[1]) action_num = self.TargetAngle2Action(target_pitch) actions[thread, id] = act_dic.select_act('PlaneAgent', action_num) # 4. if self.phase == 4: if np.abs(attack_height - plane_location[2]) < 5: self.phase += 1 print("Stage4 Done") elif np.abs(0 - plane_rotaion[1]) < 1: action_num = self.TargetHeight2Action(attack_height) actions[thread, id] = act_dic.select_act('PlaneAgent', action_num) print("Change Height to 5000m!") else: # 跟踪目标位置 delta_location = target_location - plane_location target_pitch = self.DeltaLocation2Angle(delta_location[0], delta_location[1]) action_num = self.TargetAngle2Action(target_pitch) actions[thread, id] = act_dic.select_act('PlaneAgent', action_num) # 5. if self.phase == 5: # 跟踪目标位置 delta_location = target_location - plane_location target_pitch = self.DeltaLocation2Angle(delta_location[0], delta_location[1]) action_num = self.TargetAngle2Action(target_pitch) actions[thread, id] = act_dic.select_act('PlaneAgent', action_num) # 5.接近目标,对目标发射导弹 # dist_2D = np.sqrt(np.sum(np.square((delta_location[0], delta_location[1])))) # if dist_2D < 10000: # actions[thread, id] = strActionToDigits('ActionSet3::LaunchMissile;NONE') # print(target_pitch) # actions[thread, :] = strActionToDigits('ActionSet3::ChangeDirection;{}'.format(target_pitch)) # print(State_Recall['Latest-Team-Info'][thread]['dataArr'][0]['agentRotationArr']) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} def DeltaLocation2Angle(self, delta_x, delta_y): ''' 将输入的距离差向量转换为方位角度 此处为角度制 ''' # assert len(delta_location) == 2 or 3 # delta_x = delta_location[0] # delta_y = delta_location[1] if delta_x == 0 and delta_y != 0: theta = 90 if delta_y > 0 else -90 else: abs_theta = np.arctan(np.abs(delta_y) / np.abs(delta_x)) * 180 / pi if delta_x > 0 and delta_y >= 0: theta = abs_theta elif delta_x < 0 and delta_y >= 0: theta = 180 - abs_theta elif delta_x > 0 and delta_y < 0: theta = - abs_theta elif delta_x < 0 and delta_y < 0: theta = abs_theta - 180 return theta def TargetAngle2Action(self, target_yaw): ''' 将输入的期望角度转化为对应的离散动作 此处为角度制 ''' action_yaw_set = np.array([0, 45, 90, 135, 180, -135, -90, -45]) delta_action_yaw_set = np.abs(action_yaw_set - target_yaw) output_num = None for i, element in enumerate(delta_action_yaw_set): if element <= 22.5 or element >= (180+135+22.5): output_num = i break # if output_num is None: # print('离散方位角动作设置或者程序逻辑有问题!') # print(target_yaw) # print(delta_action_yaw_set) assert output_num is not None, '离散方位角动作设置或者程序逻辑有问题!' return output_num + 6 def TargetHeight2Action(self, target_height): ''' 将输入的期望高度转化为对应的离散动作 ''' action_height_set = np.array([1000, 5000, 10000, 15000, 20000]) delta_action_height_set = np.abs(action_height_set - target_height) height_threshold = np.abs(action_height_set[-1] - action_height_set[-2]) / 2 output_num = None for i, element in enumerate(delta_action_height_set): if element <= height_threshold: output_num = i break assert output_num is not None, '离散高度动作设置或者程序逻辑有问题!' return output_num + 1 class ActionDictionary(): ''' Height Space(5): 20000m, 15000m, 10000m, 5000m, 1000m Direction Space(8): 45°, 90°, 135°, 180°, -135°, -90°, -45°, 0° Speed Space(2): Positive, Negative ''' # Direction Space(16): 22.5°, 45°, 67.5°, 90°, 112.5°, 135°, 157.5°, 180°, -157.5°, -135°, -112.5°, -90°, -67.5°, -45°, -22.5°, 0° # Speed Space(10): 150, 200, 250, 300, 350, 400, 450, 500, 550, 600 dictionary_args = [ 'N/A;N/A', # 0 'ChangeHeight;1000', # 1 'ChangeHeight;5000', # 2 'ChangeHeight;10000', # 3 'ChangeHeight;15000', # 4 'ChangeHeight;20000', # 5 'ChangeDirection;0', # 6 'ChangeDirection;45', # 7 'ChangeDirection;90', # 8 'ChangeDirection;135', # 9 'ChangeDirection;180', # 10 'ChangeDirection;-135', # 11 'ChangeDirection;-90', # 12 'ChangeDirection;-45', # 13 'ChangeSpeed;Positive', # 14 'ChangeSpeed;Negative', # 15 ] @staticmethod def select_act(type, a): if type =='PlaneAgent': args = ActionDictionary.dictionary_args[a] return strActionToDigits(f'ActionSet3::{args}') @staticmethod def get_avail_act(): pass ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/uhmap_ls.py ================================================ import copy import numpy as np from UTIL.tensor_ops import distance_mat_between from scipy.optimize import linear_sum_assignment from MISSION.uhmap.actset_lookup import encode_action_as_digits from config import GlobalConfig class DummyAlgConfig(): reserve = "" class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.team = team self.ScenarioConfig = GlobalConfig.ScenarioConfig self.attack_order = {} self.team_agent_uid = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[team] def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmSeqFire(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue # 如果,该线程没有停止 if State_Recall['Env-Suffered-Reset'][thread]: # 如果该线程刚刚reset opp_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[1-self.team] opp_uid_range = list(copy.deepcopy(opp_uid_range)) np.random.shuffle(opp_uid_range) self.attack_order[thread] = opp_uid_range # 当前的Episode步数 step_cnt = State_Recall['Current-Obs-Step'][thread] # 当前的info info = State_Recall['Latest-Team-Info'] raw_info = State_Recall['Latest-Team-Info'][thread]['dataArr'] # 判断agent是否存活 def uid_alive(uid): return raw_info[uid]['agentAlive'] for uid in self.attack_order[thread]: if uid_alive(uid): # 如果该敌方存活,则集火攻击(:) actions[thread, :] = encode_action_as_digits('SpecificAttacking', 'N/A', x=None, y=None, z=None, UID=uid, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmIdle(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue if State_Recall['Env-Suffered-Reset'][thread]: actions[thread, :] = encode_action_as_digits('Idle', 'AggressivePersue', x=None, y=None, z=None, UID=None, T=None, T_index=None) else: actions[thread, :] = encode_action_as_digits('N/A', 'N/A', x=None, y=None, z=None, UID=None, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmMarch(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) if not hasattr(self, 'march_direction'): self.march_direction = '+Y' for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue if State_Recall['Env-Suffered-Reset'][thread]: a_agent_uid = self.team_agent_uid[0] self.march_direction = '+Y' if State_Recall['Latest-Team-Info'][thread]['dataArr'][a_agent_uid]['agentLocation']['y'] <0 else '-Y' actions[thread, :] = encode_action_as_digits('Idle', 'AggressivePersue', x=None, y=None, z=None, UID=None, T=None, T_index=None) else: if self.march_direction == '+Y': actions[thread, :] = encode_action_as_digits('PatrolMoving', 'Dir+Y', x=None, y=None, z=None, UID=None, T=None, T_index=None) else: actions[thread, :] = encode_action_as_digits('PatrolMoving', 'Dir-Y', x=None, y=None, z=None, UID=None, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} def assign_opponent(opp_pos_arr, opp_id_arr, leader_pos_arr, leader_id_arr): result = {} dis_mat = distance_mat_between(leader_pos_arr, opp_pos_arr) dis_mat[dis_mat == np.inf] = 1e10 indices, assignments = linear_sum_assignment(dis_mat) for i, j, a in zip(range(len(indices)), indices, assignments): assert i == j result[leader_id_arr[i]] = opp_id_arr[a] return result class DummyAlgorithmLinedAttack(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue actions[thread] = self.decide_each_thread( thread = thread, step_cnt = State_Recall['Current-Obs-Step'][thread], raw_info = State_Recall['Latest-Team-Info'][thread]['dataArr'], Env_Suffered_Reset = State_Recall['Env-Suffered-Reset'][thread] ) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} # 判断agent是否存活 def uid_alive(raw_info, uid): return raw_info[uid]['agentAlive'] def decide_each_thread(self, **kwargs): act_each_agent = np.zeros(shape=( self.n_agent, 8 )) self_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team] Env_Suffered_Reset = kwargs['Env_Suffered_Reset'] thread = kwargs['thread'] # 当前的Episode步数 step_cnt = kwargs['step_cnt'] raw_info = kwargs['raw_info'] # # 如果,该线程没有停止 # if Env_Suffered_Reset: # # 如果该线程刚刚reset # opp_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[1-self.team] # opp_uid_range = list(copy.deepcopy(opp_uid_range)) # np.random.shuffle(opp_uid_range) # self.attack_order[thread] = opp_uid_range opp_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[1-self.team] pos_arr_2d = np.array([_info['agentLocationArr'][:2] for _info in raw_info]) opp_pos_arr = pos_arr_2d[opp_uid_range] self_air_uid_range = [info['uId'] for info in raw_info if info['agentAlive'] and info['agentTeam'] == self.team and info['type']=='RLA_UAV_Support'] N_leader = len(self_air_uid_range) self_ground_uid_range = [info['uId'] for info in raw_info if info['agentAlive'] and info['agentTeam'] == self.team and info['type']!='RLA_UAV_Support'] if N_leader > 0: self_air_pos_arr = pos_arr_2d[self_air_uid_range] assignments = assign_opponent( opp_pos_arr=opp_pos_arr, opp_id_arr=opp_uid_range, leader_pos_arr = self_air_pos_arr, leader_id_arr=self_air_uid_range ) for group in range(N_leader): attack_uid = assignments[self_air_uid_range[group]] group_member_uids = [uid for uid in self_ground_uid_range if uid%N_leader==group] for group_member_uid in group_member_uids: agent_team_index = raw_info[group_member_uid]['indexInTeam'] act_each_agent[agent_team_index] = encode_action_as_digits('SpecificAttacking', 'N/A', x=None, y=None, z=None, UID=attack_uid, T=None, T_index=None) leader_uid = self_air_uid_range[group] agent_team_index = raw_info[leader_uid]['indexInTeam'] z_leader = raw_info[leader_uid]['agentLocationArr'][2] if len(group_member_uids) > 0: team_center_pos = pos_arr_2d[group_member_uid] act_each_agent[agent_team_index] = encode_action_as_digits('PatrolMoving', 'N/A', x=team_center_pos[0], y=team_center_pos[1], z=z_leader, UID=None, T=None, T_index=None) else: act_each_agent[agent_team_index] = encode_action_as_digits('SpecificAttacking', 'N/A', x=None, y=None, z=None, UID=attack_uid, T=None, T_index=None) return act_each_agent else: center_pos_kd = pos_arr_2d[self_ground_uid_range].mean(0, keepdims=True) dis = distance_mat_between(center_pos_kd, opp_pos_arr) target_index = np.argmin(dis.squeeze()) attack_uid = opp_uid_range[target_index] group_member_uids = self_ground_uid_range for group_member_uid in group_member_uids: agent_team_index = raw_info[group_member_uid]['indexInTeam'] act_each_agent[agent_team_index] = encode_action_as_digits('SpecificAttacking', 'N/A', x=None, y=None, z=None, UID=attack_uid, T=None, T_index=None) return act_each_agent def vector_shift_towards(pos, toward_pos, offset): delta = toward_pos - pos delta = delta / (np.linalg.norm(delta) + 1e-10) return pos + delta * offset ================================================ FILE: PythonExample/hmp_minimal_modules/ALGORITHM/script_ai/uhmap_ls_mp.py ================================================ import copy, atexit import numpy as np from UTIL.tensor_ops import distance_mat_between from scipy.optimize import linear_sum_assignment from MISSION.uhmap.actset_lookup import encode_action_as_digits from config import GlobalConfig class DummyAlgConfig(): reserve = "" class DummyAlgorithmBase(): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): self.n_agent = n_agent self.n_thread = n_thread self.team = team self.ScenarioConfig = GlobalConfig.ScenarioConfig self.attack_order = {} self.team_agent_uid = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[team] def forward(self, inp, state, mask=None): raise NotImplementedError def to(self, device): return self def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8)) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmSeqFire(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue # 如果,该线程没有停止 if State_Recall['Env-Suffered-Reset'][thread]: # 如果该线程刚刚reset opp_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[1-self.team] opp_uid_range = list(copy.deepcopy(opp_uid_range)) np.random.shuffle(opp_uid_range) self.attack_order[thread] = opp_uid_range # 当前的Episode步数 step_cnt = State_Recall['Current-Obs-Step'][thread] # 当前的info info = State_Recall['Latest-Team-Info'] raw_info = State_Recall['Latest-Team-Info'][thread]['dataArr'] # 判断agent是否存活 def uid_alive(uid): return raw_info[uid]['agentAlive'] for uid in self.attack_order[thread]: if uid_alive(uid): # 如果该敌方存活,则集火攻击(:) actions[thread, :] = encode_action_as_digits('SpecificAttacking', 'N/A', x=None, y=None, z=None, UID=uid, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmIdle(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue if State_Recall['Env-Suffered-Reset'][thread]: actions[thread, :] = encode_action_as_digits('Idle', 'AggressivePersue', x=None, y=None, z=None, UID=None, T=None, T_index=None) else: actions[thread, :] = encode_action_as_digits('N/A', 'N/A', x=None, y=None, z=None, UID=None, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} class DummyAlgorithmMarch(DummyAlgorithmBase): def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) AirCarrierUID = 2 # assert len(State_Recall['Latest-Obs']) == n_active_thread, ('make sure we have the right batch of obs') actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) if not hasattr(self, 'march_direction'): self.march_direction = '+Y' for thread in range(self.n_thread): if ENV_PAUSE[thread]: # 如果,该线程停止,不做任何处理 continue if State_Recall['Env-Suffered-Reset'][thread]: a_agent_uid = self.team_agent_uid[0] self.march_direction = '+Y' if State_Recall['Latest-Team-Info'][thread]['dataArr'][a_agent_uid]['agentLocation']['y'] <0 else '-Y' actions[thread, :] = encode_action_as_digits('Idle', 'AggressivePersue', x=None, y=None, z=None, UID=None, T=None, T_index=None) else: if self.march_direction == '+Y': actions[thread, :] = encode_action_as_digits('PatrolMoving', 'Dir+Y', x=None, y=None, z=None, UID=None, T=None, T_index=None) else: actions[thread, :] = encode_action_as_digits('PatrolMoving', 'Dir-Y', x=None, y=None, z=None, UID=None, T=None, T_index=None) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} def assign_opponent(opp_pos_arr, opp_id_arr, leader_pos_arr, leader_id_arr): result = {} dis_mat = distance_mat_between(leader_pos_arr, opp_pos_arr) dis_mat[dis_mat == np.inf] = 1e10 indices, assignments = linear_sum_assignment(dis_mat) for i, j, a in zip(range(len(indices)), indices, assignments): assert i == j result[leader_id_arr[i]] = opp_id_arr[a] return result class ThreadDecisionMaker(): def apply_context(self, kwargs): for k in kwargs: setattr(self, k, kwargs[k]) def decide_each_thread(self, kwargs): act_each_agent = np.zeros(shape=( self.n_agent, 8 )) if kwargs['env_pause']: return act_each_agent self_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team] Env_Suffered_Reset = kwargs['Env_Suffered_Reset'] thread = kwargs['thread'] # 当前的Episode步数 step_cnt = kwargs['step_cnt'] raw_info = kwargs['raw_info'] # # 如果,该线程没有停止 # if Env_Suffered_Reset: # # 如果该线程刚刚reset # opp_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[1-self.team] # opp_uid_range = list(copy.deepcopy(opp_uid_range)) # np.random.shuffle(opp_uid_range) # self.attack_order[thread] = opp_uid_range opp_uid_range = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[1-self.team] pos_arr_2d = np.array([_info['agentLocationArr'][:2] for _info in raw_info]) opp_pos_arr = pos_arr_2d[opp_uid_range] self_air_uid_range = [info['uId'] for info in raw_info if info['agentAlive'] and info['agentTeam'] == self.team and info['type']=='RLA_UAV_Support'] N_leader = len(self_air_uid_range) self_ground_uid_range = [info['uId'] for info in raw_info if info['agentAlive'] and info['agentTeam'] == self.team and info['type']!='RLA_UAV_Support'] if N_leader > 0: self_air_pos_arr = pos_arr_2d[self_air_uid_range] assignments = assign_opponent( opp_pos_arr=opp_pos_arr, opp_id_arr=opp_uid_range, leader_pos_arr = self_air_pos_arr, leader_id_arr=self_air_uid_range ) for group in range(N_leader): attack_uid = assignments[self_air_uid_range[group]] group_member_uids = [uid for uid in self_ground_uid_range if uid%N_leader==group] for group_member_uid in group_member_uids: agent_team_index = raw_info[group_member_uid]['indexInTeam'] act_each_agent[agent_team_index] = encode_action_as_digits('SpecificAttacking', 'N/A', x=None, y=None, z=None, UID=attack_uid, T=None, T_index=None) leader_uid = self_air_uid_range[group] agent_team_index = raw_info[leader_uid]['indexInTeam'] z_leader = raw_info[leader_uid]['agentLocation']['z'] if len(group_member_uids) > 0: team_center_pos = pos_arr_2d[group_member_uid] act_each_agent[agent_team_index] = encode_action_as_digits('PatrolMoving', 'N/A', x=team_center_pos[0], y=team_center_pos[1], z=z_leader, UID=None, T=None, T_index=None) else: act_each_agent[agent_team_index] = encode_action_as_digits('SpecificAttacking', 'N/A', x=None, y=None, z=None, UID=attack_uid, T=None, T_index=None) return act_each_agent else: center_pos_kd = pos_arr_2d[self_ground_uid_range].mean(0, keepdims=True) dis = distance_mat_between(center_pos_kd, opp_pos_arr) target_index = np.argmin(dis.squeeze()) attack_uid = opp_uid_range[target_index] group_member_uids = self_ground_uid_range for group_member_uid in group_member_uids: agent_team_index = raw_info[group_member_uid]['indexInTeam'] act_each_agent[agent_team_index] = encode_action_as_digits('SpecificAttacking', 'N/A', x=None, y=None, z=None, UID=attack_uid, T=None, T_index=None) return act_each_agent class DummyAlgorithmLinedAttack(DummyAlgorithmBase): def __init__(self, n_agent, n_thread, space, mcv=None, team=None): super().__init__(n_agent, n_thread, space, mcv, team) sync_state = [self.__dict__.copy()]*self.n_thread # multi-thread decision making from UTIL.shm_pool import SmartPool self.process_pool = SmartPool(fold=1, proc_num=self.n_thread, base_seed=0) self.process_pool.add_target(name='DT%d'%self.team, lam=ThreadDecisionMaker) atexit.register(self.process_pool.party_over) # failsafe, handles shm leak self.process_pool.exec_target( name='DT%d'%self.team, dowhat='apply_context', args_list=sync_state ) def interact_with_env(self, State_Recall): assert State_Recall['Latest-Obs'] is not None, ('make sure obs is ok') ENV_PAUSE = State_Recall['ENV-PAUSE'] ENV_ACTIVE = ~ENV_PAUSE assert self.n_thread == len(ENV_ACTIVE), ('the number of thread is wrong?') n_active_thread = sum(ENV_ACTIVE) actions = np.zeros(shape=(self.n_thread, self.n_agent, 8 )) kwargs_L = [{ "env_pause": ENV_PAUSE[thread], "thread" : thread, "step_cnt" : State_Recall['Current-Obs-Step'][thread], "raw_info" : State_Recall['Latest-Team-Info'][thread]['dataArr'], "Env_Suffered_Reset" : State_Recall['Env-Suffered-Reset'][thread] } for thread in range(self.n_thread)] actions = self.process_pool.exec_target( name='DT%d'%self.team, dowhat='decide_each_thread', args_list=kwargs_L ) actions = np.stack(actions) # set actions of in-active threads to NaN (will be done again in multi_team.py, this line is not necessary) actions[ENV_PAUSE] = np.nan # swap (self.n_thread, self.n_agent) -> (self.n_agent, self.n_thread) actions = np.swapaxes(actions, 0, 1) return actions, {} # 判断agent是否存活 def uid_alive(raw_info, uid): return raw_info[uid]['agentAlive'] def vector_shift_towards(pos, toward_pos, offset): delta = toward_pos - pos delta = delta / (np.linalg.norm(delta) + 1e-10) return pos + delta * offset ================================================ FILE: PythonExample/hmp_minimal_modules/LICENSE ================================================ MIT License Copyright (c) 2020 Ankur Deka Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/common/base_env.py ================================================ import numpy as np class BaseEnv(object): def __init__(self, rank) -> None: self.observation_space = None self.action_space = None self.rank = rank def step(self, act): # obs: a Tensor with shape (n_agent, ...) # reward: a Tensor with shape (n_agent, 1) or (n_team, 1) # done: a Bool # info: a dict raise NotImplementedError # Warning: if you have only one team and RewardAsUnity, # you must make sure that reward has shape=[n_team=1, 1] # e.g. # >> RewardForTheOnlyTeam = +1 # >> RewardForAllTeams = np.array([RewardForTheOnlyTeam, ]) # >> return (ob, RewardForAllTeams, done, info) return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity return (ob, RewardForAllAgents, done, info) # choose this if not RewardAsUnity def reset(self): # obs: a Tensor with shape (n_agent, ...) # info: a dict raise NotImplementedError return ob, info class RawObsArray(object): raw_obs_size = {} # shared def __init__(self, key='default'): self.key = key if self.key not in self.raw_obs_size: self.guards_group = [] self.nosize = True else: self.guards_group = np.zeros(shape=(self.raw_obs_size[self.key]), dtype=np.float32) self.nosize = False self.p = 0 def append(self, buf): if self.nosize: self.guards_group.append(buf) else: L = len(buf) self.guards_group[self.p:self.p+L] = buf[:] self.p += L def get(self): if self.nosize: self.guards_group = np.concatenate(self.guards_group) self.raw_obs_size[self.key] = len(self.guards_group) return self.guards_group def get_group_size(self): return len(self.guards_group) def get_raw_obs_size(self): assert self.key in self.raw_obs_size return self.raw_obs_size[self.key] ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/env_router.py ================================================ import_path_ref = { "collective_assult": ("MISSION.collective_assult.collective_assult_parallel_run", 'ScenarioConfig'), "dca_multiteam": ("MISSION.dca_multiteam.collective_assult_parallel_run", 'ScenarioConfig'), "collective_assult_debug": ("MISSION.collective_assult_debug.collective_assult_parallel_run", 'ScenarioConfig'), "air_fight": ("MISSION.air_fight.environment.air_fight_compat", 'ScenarioConfig'), "native_gym": ("MISSION.native_gym.native_gym_config", 'ScenarioConfig'), "starcraft2": ("MISSION.starcraft.sc2_env_wrapper", 'ScenarioConfig'), "sc2": ("MISSION.starcraft.sc2_env_wrapper", 'ScenarioConfig'), "unity_game": ("MISSION.unity_game.unity_game_wrapper", 'ScenarioConfig'), "sr_tasks->cargo": ("MISSION.sr_tasks.multiagent.scenarios.cargo", 'ScenarioConfig'), "sr_tasks->hunter_invader": ("MISSION.sr_tasks.multiagent.scenarios.hunter_invader", 'ScenarioConfig'), "sr_tasks->hunter_invader3d": ("MISSION.sr_tasks.multiagent.scenarios.hunter_invader3d", 'ScenarioConfig'), "sr_tasks->hunter_invader3d_v2": ("MISSION.sr_tasks.multiagent.scenarios.hunter_invader3d_v2",'ScenarioConfig'), "bvr": ("MISSION.bvr_sim.init_env", 'ScenarioConfig'), "mathgame": ("MISSION.math_game.env", 'ScenarioConfig'), "uhmap": ("MISSION.uhmap.uhmap_env_wrapper", 'ScenarioConfig'), } env_init_function_ref = { "collective_assult": ("MISSION.collective_assult.collective_assult_parallel_run", 'make_collective_assult_env'), "dca_multiteam": ("MISSION.dca_multiteam.collective_assult_parallel_run", 'make_collective_assult_env'), "collective_assult_debug": ("MISSION.collective_assult_debug.collective_assult_parallel_run", 'make_collective_assult_env'), "air_fight": ("MISSION.air_fight.environment.air_fight_compat", 'make_air_fight_env'), "native_gym": ("MISSION.native_gym.native_gym_config", 'env_init_function'), "starcraft2": ("MISSION.starcraft.sc2_env_wrapper", 'make_sc2_env'), "sc2": ("MISSION.starcraft.sc2_env_wrapper", 'make_sc2_env'), "unity_game": ("MISSION.unity_game.unity_game_wrapper", 'make_env'), "sr_tasks": ("MISSION.sr_tasks.multiagent.scenario", 'sr_tasks_env'), "bvr": ("MISSION.bvr_sim.init_env", 'make_bvr_env'), "mathgame": ("MISSION.math_game.env", 'make_math_env'), "uhmap": ("MISSION.uhmap.uhmap_env_wrapper", 'make_uhmap_env'), } ################################################################################################################################## ################################################################################################################################## from config import GlobalConfig import importlib, os from UTIL.colorful import print亮蓝 def load_ScenarioConfig(): if GlobalConfig.env_name not in import_path_ref: assert False, ('need to find path of ScenarioConfig') import_path, ScenarioConfig = import_path_ref[GlobalConfig.env_name] GlobalConfig.ScenarioConfig = getattr(importlib.import_module(import_path), ScenarioConfig) def make_env_function(env_name, rank): load_ScenarioConfig() ref_env_name = env_name if 'native_gym' in env_name: assert '->' in env_name ref_env_name, env_name = env_name.split('->') elif 'sr_tasks' in env_name: assert '->' in env_name ref_env_name, env_name = env_name.split('->') import_path, func_name = env_init_function_ref[ref_env_name] env_init_function = getattr(importlib.import_module(import_path), func_name) return lambda: env_init_function(env_name, rank) def make_parallel_envs(process_pool, marker=''): from UTIL.shm_env import SuperpoolEnv from config import GlobalConfig from MISSION.env_router import load_ScenarioConfig load_ScenarioConfig() env_args_dict_list = [({ 'env_name':GlobalConfig.env_name, 'proc_index':i if 'test' not in marker else -(i+1), 'marker':marker },) for i in range(GlobalConfig.num_threads)] if GlobalConfig.env_name == 'air_fight': # This particular env has a dll file # that must be loaded in main process # 艹tmd有个dll必须在主进程加载 from MISSION.air_fight.environment.pytransform import pyarmor_runtime pyarmor_runtime() if GlobalConfig.env_name == 'bvr': # 1、如果没用hmp的docker,请设置好 YOUR_ROOT_PASSWORD,不止这一处,请全局搜索"YOUR_ROOT_PASSWORD"替换所有 # 2、用docker的sock挂载到容器中,方法在SetupDocker.md中 print亮蓝('[env_router]: here goes the docker in docker check.') YOUR_ROOT_PASSWORD = 'clara' # the sudo password os.system("echo %s|sudo -S date"%YOUR_ROOT_PASSWORD) # get sudo power res = os.popen("sudo docker ps").read() if "CONTAINER ID" not in res: print亮蓝('[env_router]: Error checking docker in docker, can not control host docker interface!') raise "Error checking docker in docker, can not control host docker interface!" pass if GlobalConfig.env_name == 'collective_assult_debug': # This particular env has a cython file that needs to be compiled in main process # that must be loaded in main process from MISSION.collective_assult_debug.cython_func import laser_hit_improve3 if GlobalConfig.env_name == 'dca_multiteam': # This particular env has a cython file that needs to be compiled in main process # that must be loaded in main process from MISSION.dca_multiteam.cython_func import laser_hit_improve3 if GlobalConfig.env_name == 'uhmap': # This particular env has a cython file that needs to be compiled in main process # that must be loaded in main process from MISSION.uhmap.SubTasks.cython_func import tear_number_apart if GlobalConfig.num_threads > 1: envs = SuperpoolEnv(process_pool, env_args_dict_list) else: envs = SuperpoolEnv(process_pool, env_args_dict_list) return envs ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/readme.md ================================================ # Task Configuration Core Fields: ## Parameter Internal Relationship * You may notice some configuration field ends with ```_cv```, they are parameters chained with other parameters. For example, when changing the ```map```, the limit of ```episode_length``` and the number of agents ```N_AGENT_EACH_TEAM``` are implicated and also need to be changed. To make it simple, we add ```episode_length_cv``` and ```N_AGENT_EACH_TEAM_cv``` to record this link with lambda function. * When parameters (e.g. ```map```) that are bind to other parameters are changed, the Transparent Parameter Control (TPC) module will scan and parse variables with twin variables that end with ```_cv```, and automatically modify their values. (refer to ./UTIL/config_args.py) Generally, you can safely ignore them and only pay attention to fields below. ## Fields | Field | Value | Explaination | zh Explaination | | ---- | ---- | ---- | ---- | | N_TEAM | ```int``` | the number of agent teams in the tasks, information cannot be shared between different team | 队伍数量,每个队伍被一个ALGORITHM模块控制,队伍之间不可共享信息。大多数任务中,队伍之间是敌对关系 | | N_AGENT_EACH_TEAM | ```list (of int)``` | the number of agents in each team | 每个队伍的智能体数量 | | AGENT_ID_EACH_TEAM | ```list of list (of int)``` | the ID of agents in each team, double layer list, must agree with N_AGENT_EACH_TEAM! | 每个队伍的智能体的ID,双层列表,必须与N_AGENT_EACH_TEAM对应! | | TEAM_NAMES | ```list (of string)``` | use which ALGORITHM to control each team, fill the path of chosen algorithm and its main class name, e.g.```"ALGORITHM.conc.foundation->ReinforceAlgorithmFoundation"``` | 选择每支队伍的控制算法,填写控制算法主模块的路径和类名| | RewardAsUnity | ```bool``` | Shared reward, or each agent has individual reward signal | 每个队伍的智能体共享集体奖励(True),或者每个队伍的智能体都独享个体奖励(False) | | ObsAsUnity | ```bool``` | Agents do not has individual observation, only shared collective observation | 没有个体观测值,整个群体的观测值获取方式如同单智能体问题一样 | | StateProvided | ```bool``` | Whether the global state is provided in training. If True, the Algorithm can access both ```obs``` and ```state``` during training | 是否在训练过程中提供全局state | # * How to Introduce a New Mission Environment ### Step 1: Declare Mission Info (how many agents and actions, maximum episode steps et.al.) - make a folder under ```./MISSION```, e.g. ```./MISSION/uhmap.``` - make a py file, e.g. ```./MISSION/uhmap/uhmap_env_wrapper.py``` - in ```uhmap_env_wrapper.py```, copy and paste following template: ```python from UTIL.config_args import ChainVar # please register this ScenarioConfig into MISSION/env_router.py class ScenarioConfig(object): ''' ScenarioConfig: This config class will be 'injected' with new settings from JSONC. (E.g., override configs with ```python main.py --cfg example.jsonc```) (As the name indicated, ChainVars will change WITH vars it 'chained_with' during config injection) (please see UTIL.config_args to find out how this advanced trick works out.) ''' n_team1agent = 5 # Needed by the hmp core # N_TEAM = 1 N_AGENT_EACH_TEAM = [n_team1agent,] N_AGENT_EACH_TEAM_cv = ChainVar(lambda n_team1agent: [n_team1agent,], chained_with=['n_team1agent']) AGENT_ID_EACH_TEAM = [range(0,n_team1agent),] AGENT_ID_EACH_TEAM_cv = ChainVar(lambda n_team1agent: [range(0,n_team1agent),], chained_with=['n_team1agent']) TEAM_NAMES = ['ALGORITHM.None->None',] ''' ## If the length of action array == the number of teams, set ActAsUnity to True ## If the length of action array == the number of agents, set ActAsUnity to False ''' ActAsUnity = False ''' ## If the length of reward array == the number of agents, set RewardAsUnity to False ## If the length of reward array == 1, set RewardAsUnity to True ''' RewardAsUnity = True ''' ## If the length of obs array == the number of agents, set ObsAsUnity to False ## If the length of obs array == the number of teams, set ObsAsUnity to True ''' ObsAsUnity = False # Needed by env itself # MaxEpisodeStep = 100 render = False # Needed by some ALGORITHM # StateProvided = False AvailActProvided = False EntityOriented = False n_actions = 2 obs_vec_length = 10 ``` ### Step 2: Writing Environment - For convenience, please copy and paste ```class BaseEnv(object)``` into your script: ```python class BaseEnv(object): def __init__(self, rank) -> None: self.observation_space = None self.action_space = None self.rank = rank def step(self, act): # obs: a Tensor with shape (n_agent, ...) # reward: a Tensor with shape (n_agent, 1) or (n_team, 1) # done: a Bool # info: a dict raise NotImplementedError # Warning: if you have only one team and RewardAsUnity, # you must make sure that reward has shape=[n_team=1, 1] # e.g. # >> RewardForTheOnlyTeam = +1 # >> RewardForAllTeams = np.array([RewardForTheOnlyTeam, ]) # >> return (ob, RewardForAllTeams, done, info) return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity return (ob, RewardForAllAgents, done, info) # choose this if not RewardAsUnity def reset(self): # obs: a Tensor with shape (n_agent, ...) # done: a Bool raise NotImplementedError return ob, info ``` - Then create a class that inherit from it (```class UhmapEnv(BaseEnv)```): ```python class UhmapEnv(BaseEnv): def __init__(self, rank) -> None: super().__init__(rank) self.id = rank self.render = ScenarioConfig.render and (self.id==0) self.n_agents = ScenarioConfig.n_team1agent # self.observation_space = ? # self.action_space = ? if ScenarioConfig.StateProvided: # self.observation_space['state_shape'] = ? pass if self.render: # render init pass ``` - Next, it is time to write your own code of ```step()``` and ```reset()``` function. There is little we can help about that, as it is your custom environment after all. ### Step 3: Write a Function to Initialize the Environment A empty function getting a instance of environment, it will used in step 4. But don'y worry, two lines of code will do: ```python # please register this into MISSION/env_router.py def make_uhmap_env(env_id, rank): return UhmapEnv(rank) ``` ### Step 4: Make Everything Kiss Together This step will make HMP aware of the existence of this new MISSION. - Open ```MISSION/env_router.py``` - Add the path of environment's configuration in ```import_path_ref``` ``` python import_path_ref = { "uhmap": ("MISSION.uhmap.uhmap_env_wrapper", 'ScenarioConfig'), } ``` - Add the path of environment's init function in ```env_init_function_ref```, e.g.: ``` python env_init_function_ref = { "uhmap": ("MISSION.uhmap.uhmap_env_wrapper", "make_uhmap_env"), } ``` ### Step 5: Write a Config Override to Start Experiment Create a ```exp.jsonc``` or ```json``` file, copy and paste following content, and please pay attention to lines marked with ```***```, they are the most important ones: ```jsonc { // config HMP core "config.py->GlobalConfig": { "note": "uhmp-dev", "env_name": "uhmap", // *** the selection of MISSION "env_path": "MISSION.uhmap", // *** confirm the path of env (a fail safe) "draw_mode": "Img", "num_threads": "1", "report_reward_interval": "1", "test_interval": "128", "test_epoch": "4", "device": "cuda", "max_n_episode": 500000, "fold": "4", "backup_files": [ ] }, // config MISSION "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { // *** must kiss with "env_name" and "env_path" // remember this? declared in ScenarioConfig class in ./MISSION/math_game/uhmap.py. "n_team1agent": 4, "n_actions": 10, "StateProvided": false, "TEAM_NAMES": [ "ALGORITHM.conc_4hist.foundation->ReinforceAlgorithmFoundation" // *** select ALGORITHMs ] }, // config ALGORITHMs "ALGORITHM.conc_4hist.foundation.py->AlgorithmConfig": { // must kiss with "TEAM_NAMES" "train_traj_needed": "16", "prevent_batchsize_oom": "True", "n_focus_on": 3, "lr": 0.0005, "ppo_epoch": 24, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "gamma": 0.99 } } ``` At last, run experiment with ```python main.py --cfg ./path-to-exp-json/exp.jsonc```. ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/SubtaskCommonFn.py ================================================ import json, copy, re, os, inspect, os import numpy as np from UTIL.tensor_ops import my_view, repeat_at from ...common.base_env import RawObsArray from ..actionset_v3 import digitsToStrAction from ..agent import Agent from ..uhmap_env_wrapper import UhmapEnv, ScenarioConfig from .cython_func import tear_num_arr from ..actset_lookup import digit2act_dictionary, AgentPropertyDefaults from ..actset_lookup import decode_action_as_string, decode_action_as_string class UhmapCommonFn(UhmapEnv): def reset(self): """ Reset function, it delivers reset command to unreal engine to spawn all agents 环境复位,每个episode的开始会执行一次此函数中会初始化所有智能体 """ super().reset() self.t = 0 pos_ro = np.random.rand()*2*np.pi # spawn agents AgentSettingArray = [] # count the number of agent in each team n_team_agent = {} for i, agent_info in enumerate(self.SubTaskConfig.agent_list): team = agent_info['team'] if team not in n_team_agent: n_team_agent[team] = 0 self.SubTaskConfig.agent_list[i]['uid'] = i self.SubTaskConfig.agent_list[i]['tid'] = n_team_agent[team] n_team_agent[team] += 1 self.n_team_agent = n_team_agent # push agent init info one by one for i, agent_info in enumerate(self.SubTaskConfig.agent_list): team = agent_info['team'] agent_info['n_team_agent'] = n_team_agent[team] init_fn = getattr(self, agent_info['init_fn_name']) AgentSettingArray.append(init_fn(agent_info, pos_ro)) self.agents = [Agent(team=a['team'], team_id=a['tid'], uid=a['uid']) for a in self.SubTaskConfig.agent_list] # refer to struct.cpp, FParsedDataInput resp = self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'reset', 'NumAgents' : len(self.SubTaskConfig.agent_list), 'AgentSettingArray': AgentSettingArray, # refer to struct.cpp, FAgentProperty 'TimeStepMax': ScenarioConfig.MaxEpisodeStep, 'TimeStep' : 0, 'Actions': None, })) resp = json.loads(resp) # make sure the map (level in UE) is correct # assert resp['dataGlobal']['levelName'] == 'UhmapLargeScale' assert len(resp['dataArr']) == len(AgentSettingArray), "Illegal agent initial position. 非法的智能体初始化位置,一部分智能体没有生成." return self.parse_response_ob_info(resp) def step(self, act): """ step 函数,act中包含了所有agent的决策 """ assert len(act) == self.n_agents # translate actions to the format recognized by unreal engine if self.SubTaskConfig.ActionFormat == 'Single-Digit': act_send = [digit2act_dictionary[a] for a in act] elif self.SubTaskConfig.ActionFormat == 'Multi-Digit': act_send = [decode_action_as_string(a) for a in act] elif self.SubTaskConfig.ActionFormat == 'ASCII': act_send = [digitsToStrAction(a) for a in act] else: act_send = [digitsToStrAction(a) for a in act] # simulation engine IO resp = json.loads(self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'step', 'TimeStep': self.t, 'Actions': None, 'StringActions': act_send, }))) # get obs for RL, info for script AI ob, info = self.parse_response_ob_info(resp) # generate reward, get the episode ending infomation RewardForAllTeams, WinningResult = self.gen_reward_and_win(resp) if WinningResult is not None: info.update(WinningResult) assert resp['dataGlobal']['episodeDone'] done = True else: done = False if resp['dataGlobal']['timeCnt'] >= ScenarioConfig.MaxEpisodeStep: assert done return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity def parse_event(self, event): """ 解析环境返回的一些关键事件, 如智能体阵亡,某队伍胜利等等。 关键事件需要在ue中进行定义. 该设计极大地简化了python端奖励的设计流程, 减小了python端的运算量。 """ if not hasattr(self, 'pattern'): self.pattern = re.compile(r'<([^<>]*)>([^<>]*)') return {k:v for k,v in re.findall(self.pattern, event)} def extract_key_gameobj(self, resp): """ 获取非智能体的仿真物件,例如重要landmark等 """ keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): """ 奖励的设计在此定义, (UE端编程死板,虽然预留了相关字段, 但请不要在UE端提供奖励的定义。) 建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event """ reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) # if event_parsed['Event'] == 'Destroyed': # team = self.find_agent_by_uid(event_parsed['UID']).team # reward[team] -= 0.05 # this team # reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) WaterdropWin = False WaterdropRank = False WaterdropReward = 0 ShipWin = -1 ShipRank = -1 ShipReward = 0 EndReason = event_parsed['EndReason'] # According to MISSION\uhmap\SubTasks\UhmapWaterdropConf.py, team 0 is Ship team, team 1 is Waterdrop team if EndReason == "ShipNumLessThanTheshold" or EndReason == "Team_0_AllDead": WaterdropWin = True; WaterdropRank = 0; WaterdropReward = 1 ShipWin = False; ShipRank = 1; ShipReward = -1 elif EndReason == "TimeMaxCntReached" or EndReason == "Team_1_AllDead": WaterdropWin = False; WaterdropRank = 1; WaterdropReward = -1 ShipWin = True; ShipRank = 0; ShipReward = 1 else: print('unexpected end reaon:', EndReason) WinningResult = {"team_ranking": [ShipRank, WaterdropRank], "end_reason": EndReason} reward = [ShipReward, WaterdropReward] # print(reward) return reward, WinningResult def step_skip(self): """ 跳过一次决策,无用的函数 """ return self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'skip_frame', })) def find_agent_by_uid(self, uid): """ 用uid查找智能体(带缓存加速机制) """ if not hasattr(self, 'uid_to_agent_dict'): self.uid_to_agent_dict = {} self.uid_to_agent_dict.update({agent.uid:agent for agent in self.agents}) if isinstance(uid, str): self.uid_to_agent_dict.update({str(agent.uid):agent for agent in self.agents}) return self.uid_to_agent_dict[uid] def parse_response_ob_info(self, resp): """ 粗解析智能体的观测,例如把死智能体的位置替换为inf(无穷远), 将智能体的agentLocation从字典形式转变为更简洁的(x,y,z)tuple形式 """ assert resp['valid'] resp['dataGlobal']['distanceMat'] = np.array(resp['dataGlobal']['distanceMat']['flat_arr']).reshape(self.n_agents,self.n_agents) if len(resp['dataGlobal']['events'])>0: tmp = [kv.split('>') for kv in resp['dataGlobal']['events'][0].split('<') if kv] info_parse = {t[0]:t[1] for t in tmp} info_dict = resp for info in info_dict['dataArr']: alive = info['agentAlive'] if alive: agentLocation = info.pop('agentLocation') agentRotation = info.pop('agentRotation') agentVelocity = info.pop('agentVelocity') agentScale = info.pop('agentScale') info['agentLocationArr'] = (agentLocation['x'], agentLocation['y'], agentLocation['z']) info['agentVelocityArr'] = (agentVelocity['x'], agentVelocity['y'], agentVelocity['z']) info['agentRotationArr'] = (agentRotation['yaw'], agentRotation['pitch'], agentRotation['roll']) info['agentScaleArr'] = (agentScale['x'], agentScale['y'], agentScale['z']) info.pop('previousAction') info.pop('availActions') # info.pop('rSVD1') info.pop('interaction') else: inf = float('inf') info['agentLocationArr'] = (inf, inf, inf) info['agentVelocityArr'] = (inf, inf, inf) info['agentRotationArr'] = (inf, inf, inf) info = resp['dataArr'] for i, agent_info in enumerate(info): self.agents[i].update_agent_attrs(agent_info) self.key_obj = self.extract_key_gameobj(resp) # return ob, info return self.make_obs(resp), info_dict @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 15000 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=(self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM)) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def extract_key_gameobj(self, resp): keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'Destroyed': team = self.find_agent_by_uid(event_parsed['UID']).team reward[team] -= 0.05 # this team reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) EndReason = event_parsed['EndReason'] WinTeam = int(event_parsed['WinTeam']) if WinTeam<0: # end due to timeout agents_left_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: agents_left_each_team[a.team] += 1 WinTeam = np.argmax(agents_left_each_team) # <<1>> The alive agent number is EQUAL if agents_left_each_team[WinTeam] == agents_left_each_team[1-WinTeam]: hp_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: hp_each_team[a.team] += a.hp WinTeam = np.argmax(hp_each_team) # <<2>> The alive agent HP sum is EQUAL if hp_each_team[WinTeam] == hp_each_team[1-WinTeam]: WinTeam = -1 if WinTeam >= 0: WinningResult = { "team_ranking": [0,1] if WinTeam==0 else [1,0], "end_reason": EndReason } reward[WinTeam] += 1 reward[1-WinTeam] -= 1 else: WinningResult = { "team_ranking": [-1, -1], "end_reason": EndReason } reward = [-1 for _ in range(self.n_teams)] # print(reward) return reward, WinningResult @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 1500 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=( self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM )) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) 0: OBJ_UID_OFFSET = 32768 obs_arr = RawObsArray(key = 'GameObj') for i, obj in enumerate(self.key_obj): assert obj['uId'] - OBJ_UID_OFFSET == i obs_arr.append( -self.uid_binary[i] # reverse uid binary, self.uid_binary[i] ) obs_arr.append([ obj['uId'] - OBJ_UID_OFFSET, #agent.index, -1, #agent.team, True, #agent.alive, obj['uId'] - OBJ_UID_OFFSET, #agent.uid_remote, ]) # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) obs_arr.append( [ obj['location']['x'], obj['location']['y'], obj['location']['z'] # agent.pos3d ] # tear_num_arr([ # obj['location']['x'], obj['location']['y'], obj['location']['z'] # agent.pos3d # ], 6, ScenarioConfig.ObsBreakBase, 0) ) obs_arr.append([ obj['velocity']['x'], obj['velocity']['y'], obj['velocity']['z'] # agent.vel3d ]+ [ -1, # hp obj['rotation']['yaw'], # yaw 0, # max_speed ]) OBS_GameObj = my_view(obs_arr.get(), [len(self.key_obj), -1]) OBS_GameObj = OBS_GameObj[:MAX_OBJ_NUM_ACCEPT, :] OBS_GameObj = repeat_at(OBS_GameObj, insert_dim=0, n_times=self.n_agents) OBS_ALL_AGENTS = np.concatenate((OBS_ALL_AGENTS, OBS_GameObj), axis=1) return OBS_ALL_AGENTS def init_ground(self, agent_info, pos_ro): N_COL = 2 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 10 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = (400* (tid%N_COL) + 2000) * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 500 # 500 is slightly above the ground yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 600, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 0.75, 'y': 0.75, 'z': 0.75, }, # probability of escaping dmg 闪避 "DodgeProb": 0.0, # ms explode dmg "ExplodeDmg": 45, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # Weapon CD 'WeaponCD': 1, # open fire range "PerceptionRange": 2500, "GuardRange": 1700, "FireRange": 1400, # debugging 'RSVD1': '', # regular 'RSVD2': '-InitAct=ActionSet2::Idle;AsFarAsPossible', # agent hp 'AgentHp':np.random.randint(low=90,high=110), # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property def init_ground_tank(self, agent_info, pos_ro): N_COL = 2 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 10 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = (400* (tid%N_COL) + 2000) * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 500 # 500 is slightly above the ground yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 400, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 0.75, 'y': 0.75, 'z': 0.75, }, # probability of escaping dmg 闪避 "DodgeProb": 0.0, # ms explode dmg "ExplodeDmg": 75, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # Weapon CD 'WeaponCD': 1, # open fire range "PerceptionRange": 2000, "GuardRange": 1400, "FireRange": 750 , # debugging 'RSVD1': '', # regular 'RSVD2': '-InitAct=ActionSet2::Idle;AsFarAsPossible', # agent hp 'AgentHp':np.random.randint(low=180,high=220), # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property def init_air(self, agent_info, pos_ro): N_COL = 2 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 10 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = 2000 * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 1000 yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 900, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 0.75, 'y': 0.75, 'z': 0.75, }, # probability of escaping dmg 闪避 "DodgeProb": 0.0, # ms explode dmg "ExplodeDmg": 10, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # Weapon CD 'WeaponCD': 3, # open fire range "PerceptionRange": 2500, "GuardRange": 1800, "FireRange": 1700, # debugging 'RSVD1': '-ring1=2500 -ring2=1800 -ring3=1700', # regular 'RSVD2': '-InitAct=ActionSet2::Idle;StaticAlert', # agent hp 'AgentHp':np.random.randint(low=40,high=60), # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/UhmapAdversialConf.py ================================================ class SubTaskConfig(): agent_list = [ { "team": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 0, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground_tank"}, { "team": 0, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground_tank"}, { "team": 0, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground_tank"}, { "team": 0, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground_tank"}, { "team": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground_tank"}, { "team": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground_tank"}, { "team": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground_tank"}, { "team": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground_tank"}, { "team": 1, "type": "RLA_UAV_Support", "init_fn_name": "init_air" } ] obs_vec_length = 23 obs_n_entity = 11 ActionFormat = 'Multi-Digit' ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/UhmapAttackPost.py ================================================ import json, copy, re, os, inspect, os import numpy as np from UTIL.tensor_ops import my_view, repeat_at from ...common.base_env import RawObsArray from ..actionset_v3 import digitsToStrAction from ..agent import Agent from ..uhmap_env_wrapper import UhmapEnv, ScenarioConfig from .UhmapAttackPostConf import SubTaskConfig from .cython_func import tear_num_arr def init_position_helper(x_max, x_min, y_max, y_min, total, this): n_col = np.ceil(np.sqrt(np.abs(x_max-x_min) * total / np.abs(y_max-y_min))) n_row = np.ceil(total / n_col) which_row = this // n_col which_col = this % n_col x = x_min + (which_col/n_col)*(x_max-x_min) y = y_min + (which_row/n_row)*(y_max-y_min) return x, y class UhmapAttackPost(UhmapEnv): def __init__(self, rank) -> None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig inspect.getfile(SubTaskConfig) assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def reset(self): """ Reset function, it delivers reset command to unreal engine to spawn all agents 环境复位,每个episode的开始会执行一次此函数中会初始化所有智能体 """ super().reset() self.t = 0 pos_ro = np.random.rand()*2*np.pi # spawn agents AgentSettingArray = [] # count the number of agent in each team n_team_agent = {} for i, agent_info in enumerate(SubTaskConfig.agent_list): team = agent_info['team'] if team not in n_team_agent: n_team_agent[team] = 0 SubTaskConfig.agent_list[i]['uid'] = i SubTaskConfig.agent_list[i]['tid'] = n_team_agent[team] n_team_agent[team] += 1 # push agent init info one by one for i, agent_info in enumerate(SubTaskConfig.agent_list): team = agent_info['team'] agent_info['n_team_agent'] = n_team_agent[team] init_fn = getattr(self, agent_info['init_fn_name']) AgentSettingArray.append(init_fn(agent_info, pos_ro)) self.agents = [Agent(team=a['team'], team_id=a['tid'], uid=a['uid']) for a in SubTaskConfig.agent_list] # refer to struct.cpp, FParsedDataInput resp = self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'reset', 'NumAgents' : len(SubTaskConfig.agent_list), 'AgentSettingArray': AgentSettingArray, # refer to struct.cpp, FAgentProperty 'TimeStepMax': ScenarioConfig.MaxEpisodeStep, 'TimeStep' : 0, 'Actions': None, })) resp = json.loads(resp) # make sure the map (level in UE) is correct # assert resp['dataGlobal']['levelName'] == 'UhmapLargeScale' assert len(resp['dataArr']) == len(AgentSettingArray) return self.parse_response_ob_info(resp) def step(self, act): """ step 函数,act中包含了所有agent的决策 """ assert len(act) == self.n_agents # translate actions to the format recognized by unreal engine if ScenarioConfig.ActionFormat == 'Single-Digit': act_send = [digit2act_dictionary[a] for a in act] elif ScenarioConfig.ActionFormat == 'Multi-Digit': act_send = [decode_action_as_string(a) for a in act] elif ScenarioConfig.ActionFormat == 'ASCII': act_send = [digitsToStrAction(a) for a in act] else: raise "ActionFormat is wrong!" # simulation engine IO resp = json.loads(self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'step', 'TimeStep': self.t, 'Actions': None, 'StringActions': act_send, }))) # get obs for RL, info for script AI ob, info = self.parse_response_ob_info(resp) # generate reward, get the episode ending infomation RewardForAllTeams, WinningResult = self.gen_reward_and_win(resp) if WinningResult is not None: info.update(WinningResult) assert resp['dataGlobal']['episodeDone'] done = True else: done = False if resp['dataGlobal']['timeCnt'] >= ScenarioConfig.MaxEpisodeStep: assert done return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity def parse_event(self, event): """ 解析环境返回的一些关键事件, 如智能体阵亡,某队伍胜利等等。 关键事件需要在ue中进行定义. 该设计极大地简化了python端奖励的设计流程, 减小了python端的运算量。 """ if not hasattr(self, 'pattern'): self.pattern = re.compile(r'<([^<>]*)>([^<>]*)') return {k:v for k,v in re.findall(self.pattern, event)} def extract_key_gameobj(self, resp): """ 获取非智能体的仿真物件,例如重要landmark等 """ keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): """ 奖励的设计在此定义, (UE端编程死板,虽然预留了相关字段, 但请不要在UE端提供奖励的定义。) 建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event """ reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) # if event_parsed['Event'] == 'Destroyed': # team = self.find_agent_by_uid(event_parsed['UID']).team # reward[team] -= 0.05 # this team # reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) PredatorWin = False PredatorRank = False PredatorReward = 0 PreyWin = -1 PreyRank = -1 PreyReward = 0 EndReason = event_parsed['EndReason'] # According to MISSION\uhmap\SubTasks\UhmapAttackPostConf.py, team 0 is prey team, team 1 is predator team if EndReason == "AllPreyCaught" or EndReason == "Team_0_AllDead": PredatorWin = True; PredatorRank = 0; PredatorReward = 1 PreyWin = False; PreyRank = 1; PreyReward = -1 elif EndReason == "TimeMaxCntReached" or EndReason == "Team_1_AllDead": PredatorWin = False; PredatorRank = 1; PredatorReward = -1 PreyWin = True; PreyRank = 0; PreyReward = 1 else: print('unexpected end reaon:', EndReason) WinningResult = {"team_ranking": [PreyRank, PredatorRank], "end_reason": EndReason} reward = [PreyReward, PredatorReward] # print(reward) return reward, WinningResult def step_skip(self): """ 跳过一次决策,无用的函数 """ return self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'skip_frame', })) def find_agent_by_uid(self, uid): """ 用uid查找智能体(带缓存加速机制) """ if not hasattr(self, 'uid_to_agent_dict'): self.uid_to_agent_dict = {} self.uid_to_agent_dict.update({agent.uid:agent for agent in self.agents}) if isinstance(uid, str): self.uid_to_agent_dict.update({str(agent.uid):agent for agent in self.agents}) return self.uid_to_agent_dict[uid] def parse_response_ob_info(self, resp): """ 粗解析智能体的观测,例如把死智能体的位置替换为inf(无穷远), 将智能体的agentLocation从字典形式转变为更简洁的(x,y,z)tuple形式 """ assert resp['valid'] resp['dataGlobal']['distanceMat'] = np.array(resp['dataGlobal']['distanceMat']['flat_arr']).reshape(self.n_agents,self.n_agents) if len(resp['dataGlobal']['events'])>0: tmp = [kv.split('>') for kv in resp['dataGlobal']['events'][0].split('<') if kv] info_parse = {t[0]:t[1] for t in tmp} info_dict = resp for info in info_dict['dataArr']: alive = info['agentAlive'] if alive: agentLocation = info.pop('agentLocation') agentRotation = info.pop('agentRotation') agentVelocity = info.pop('agentVelocity') agentScale = info.pop('agentScale') info['agentLocationArr'] = (agentLocation['x'], agentLocation['y'], agentLocation['z']) info['agentVelocityArr'] = (agentVelocity['x'], agentVelocity['y'], agentVelocity['z']) info['agentRotationArr'] = (agentRotation['yaw'], agentRotation['pitch'], agentRotation['roll']) info['agentScaleArr'] = (agentScale['x'], agentScale['y'], agentScale['z']) info.pop('previousAction') info.pop('availActions') # info.pop('rSVD1') info.pop('interaction') else: inf = float('inf') info['agentLocationArr'] = (inf, inf, inf) info['agentVelocityArr'] = (inf, inf, inf) info['agentRotationArr'] = (inf, inf, inf) info = resp['dataArr'] for i, agent_info in enumerate(info): self.agents[i].update_agent_attrs(agent_info) self.key_obj = self.extract_key_gameobj(resp) # return ob, info return self.make_obs(resp), info_dict @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 15000 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=(self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM)) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def reset(self): super().reset() self.t = 0 AgentPropertyDefaults.update({ 'MaxMoveSpeed': 600, 'AgentScale' : { 'x': 1, 'y': 1, 'z': 1, }, # also influence object mass, please change it with causion! "DodgeProb": 0.0, # probability of escaping dmg 闪避概率, test ok "ExplodeDmg": 20, # ms explode dmg. test ok }) # 500 is slightly above the ground, # but agent will be spawn to ground automatically ####################### spawn all ########################### AgentSettingArray = [] agent_uid_cnt = 0 # "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM for i in range(ScenarioConfig.N_AGENT_EACH_TEAM[0]-1): # For attacking, drones on the ground x = 3254.0 y = 3891.0 + i *100 z = 500 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'ClassName': 'RLA_CAR', # FString ClassName = ""; 'AgentTeam': 0, # int AgentTeam = 0; 'IndexInTeam': i, # int IndexInTeam = 0; 'UID': agent_uid_cnt, # int UID = 0; 'MaxMoveSpeed': 600, "ExplodeDmg": 10, "DodgeProb": 0.1, 'AgentHp': 100, "WeaponCD": 1, 'Color':'(R=0,G=1,B=0,A=1)', 'InitLocation': { 'x': x, 'y': y, 'z': z, }, }) AgentSettingArray.append(agent_property); agent_uid_cnt += 1 x = 4000.0 y = 4000.0 z = 1000 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'ClassName': 'RLA_UAV_VIP', # FString ClassName = ""; 'AgentTeam': 0, # int AgentTeam = 0; 'IndexInTeam': agent_uid_cnt, # under most situations IndexInTeam=agent_uid_cnt for team 0 'UID': agent_uid_cnt, # int UID = 0; 'MaxMoveSpeed': 1000, "DodgeProb": 0.5, "ExplodeDmg": 10, 'AgentHp': 1, "WeaponCD": 10000000000, 'Color':'(R=0,G=1,B=0,A=1)', 'InitLocation': { 'x': x, 'y': y, 'z': z, }, }) AgentSettingArray.append(agent_property); agent_uid_cnt += 1 # "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM for i in range(ScenarioConfig.N_AGENT_EACH_TEAM[1]): x = 0 + 500*(i+1) * (-1)**(i+1) y = 0 z = 500 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'ClassName': 'RLA_CAR_RED', 'AgentTeam': 1, 'IndexInTeam': i, 'UID': agent_uid_cnt, 'MaxMoveSpeed': 700, "DodgeProb": 0.1, 'AgentHp':100, "ExplodeDmg": 10, "WeaponCD": 0.5, 'Color':'(R=1,G=0,B=0,A=1)', 'InitLocation': { 'x': x, 'y': y, 'z': z, }, }) AgentSettingArray.append(agent_property); agent_uid_cnt += 1 # refer to struct.cpp, FParsedDataInput resp = self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'reset', 'AgentSettingArray': AgentSettingArray, # refer to struct.cpp, FAgentProperty 'TimeStepMax': ScenarioConfig.MaxEpisodeStep, 'TimeStep' : 0, 'Actions': None, })) resp = json.loads(resp) # make sure the map (level in UE) is correct assert resp['dataGlobal']['levelName'] == 'UhmapBreakingBad' assert len(resp['dataArr']) == len(AgentSettingArray) return self.parse_response_ob_info(resp) def step(self, act): assert len(act) == self.n_agents # translate actions to the format recognized by unreal engine if ScenarioConfig.ActionFormat == 'Single-Digit': act_send = [digit2act_dictionary[a] for a in act] elif ScenarioConfig.ActionFormat == 'Multi-Digit': act_send = [decode_action_as_string(a) for a in act] # simulation engine IO resp = json.loads(self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'step', 'TimeStep': self.t, 'Actions': None, 'StringActions': act_send, }))) # get obs for RL, info for script AI ob, info = self.parse_response_ob_info(resp) # generate reward, get the episode ending infomation RewardForAllTeams, WinningResult = self.gen_reward_and_win(resp) if WinningResult is not None: info.update(WinningResult) assert resp['dataGlobal']['episodeDone'] done = True else: done = False if resp['dataGlobal']['timeCnt'] >= ScenarioConfig.MaxEpisodeStep: assert done return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity def parse_event(self, event): if not hasattr(self, 'pattern'): self.pattern = re.compile(r'<([^<>]*)>([^<>]*)') return {k:v for k,v in re.findall(self.pattern, event)} def extract_key_gameobj(self, resp): keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): WIN_OR_LOSE_REWARD = 5 DRAW_REWARD = 2.5 KILL_REWARD = 0.1 BE_KILLED_REWARD = 0 reward = np.array([0.0]*self.n_teams,dtype=float) events = resp['dataGlobal']['events'] WinningResult = None # reward according to distance to either of the landmarks landmarks_pos3darr = np.array([[ lm['location']['x'], lm['location']['y'], lm['location']['z'] ] for lm in resp['dataGlobal']['keyObjArr']]) agent_pos3darr = np.array([agent.pos3d for agent in self.agents]) res = distance_mat_between(agent_pos3darr, landmarks_pos3darr) penalty = -np.min(res, axis = -1) / 100000 reward += np.array([sum(penalty[ ScenarioConfig.AGENT_ID_EACH_TEAM[i] ]) for i in range(self.n_teams)]) # reward according to event (including win or lose event) for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'Destroyed': team = self.find_agent_by_uid(event_parsed['UID']).team reward[team] -= BE_KILLED_REWARD # this team reward[1-team] += KILL_REWARD # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) EndReason = event_parsed['EndReason'] WinTeam = int(event_parsed['WinTeam']) if WinTeam<0: # end due to timeout WinTeam = 1 if WinTeam >= 0: WinningResult = { "team_ranking": [0,1] if WinTeam==0 else [1,0], "end_reason": EndReason } reward[WinTeam] += WIN_OR_LOSE_REWARD reward[1-WinTeam] -= WIN_OR_LOSE_REWARD else: WinningResult = { "team_ranking": [-1, -1], "end_reason": EndReason } reward = [-DRAW_REWARD for _ in range(self.n_teams)] # print(reward) return reward, WinningResult def step_skip(self): return self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'skip_frame', })) def find_agent_by_uid(self, uid): if not hasattr(self, 'uid_to_agent_dict'): self.uid_to_agent_dict = {} self.uid_to_agent_dict.update({agent.uid:agent for agent in self.agents}) if isinstance(uid, str): self.uid_to_agent_dict.update({str(agent.uid):agent for agent in self.agents}) return self.uid_to_agent_dict[uid] def parse_response_ob_info(self, resp): assert resp['valid'] if len(resp['dataGlobal']['events'])>0: tmp = [kv.split('>') for kv in resp['dataGlobal']['events'][0].split('<') if kv] info_parse = {t[0]:t[1] for t in tmp} # print('pass') info_dict = resp info = resp['dataArr'] for i, agent_info in enumerate(info): self.agents[i].update_agent_attrs(agent_info) self.key_obj = self.extract_key_gameobj(resp) # return ob, info return self.make_obs(resp), info_dict @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) pointer = 0 for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 1500 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 4 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = np.array(resp['dataGlobal']['distanceMat']['flat_arr']).reshape(self.n_agents,self.n_agents) alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] ) obs_arr.append([ agent.index, agent.team, agent.alive, agent.uid_remote, ]) obs_arr.append( agent.pos3d ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=( self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM )) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def extract_key_gameobj(self, resp): keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'Destroyed': team = self.find_agent_by_uid(event_parsed['UID']).team reward[team] -= 0.05 # this team reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) EndReason = event_parsed['EndReason'] WinTeam = int(event_parsed['WinTeam']) if WinTeam<0: # end due to timeout agents_left_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: agents_left_each_team[a.team] += 1 WinTeam = np.argmax(agents_left_each_team) # <<1>> The alive agent number is EQUAL if agents_left_each_team[WinTeam] == agents_left_each_team[1-WinTeam]: hp_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: hp_each_team[a.team] += a.hp WinTeam = np.argmax(hp_each_team) # <<2>> The alive agent HP sum is EQUAL if hp_each_team[WinTeam] == hp_each_team[1-WinTeam]: WinTeam = -1 if WinTeam >= 0: WinningResult = { "team_ranking": [0,1] if WinTeam==0 else [1,0], "end_reason": EndReason } reward[WinTeam] += 1 reward[1-WinTeam] -= 1 else: WinningResult = { "team_ranking": [-1, -1], "end_reason": EndReason } reward = [-1 for _ in range(self.n_teams)] # print(reward) return reward, WinningResult @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 1500 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=( self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM )) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) 0: OBJ_UID_OFFSET = 32768 obs_arr = RawObsArray(key = 'GameObj') for i, obj in enumerate(self.key_obj): assert obj['uId'] - OBJ_UID_OFFSET == i obs_arr.append( -self.uid_binary[i] # reverse uid binary, self.uid_binary[i] ) obs_arr.append([ obj['uId'] - OBJ_UID_OFFSET, #agent.index, -1, #agent.team, True, #agent.alive, obj['uId'] - OBJ_UID_OFFSET, #agent.uid_remote, ]) # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) obs_arr.append( [ obj['location']['x'], obj['location']['y'], obj['location']['z'] # agent.pos3d ] # tear_num_arr([ # obj['location']['x'], obj['location']['y'], obj['location']['z'] # agent.pos3d # ], 6, ScenarioConfig.ObsBreakBase, 0) ) obs_arr.append([ obj['velocity']['x'], obj['velocity']['y'], obj['velocity']['z'] # agent.vel3d ]+ [ -1, # hp obj['rotation']['yaw'], # yaw 0, # max_speed ]) OBS_GameObj = my_view(obs_arr.get(), [len(self.key_obj), -1]) OBS_GameObj = OBS_GameObj[:MAX_OBJ_NUM_ACCEPT, :] OBS_GameObj = repeat_at(OBS_GameObj, insert_dim=0, n_times=self.n_agents) OBS_ALL_AGENTS = np.concatenate((OBS_ALL_AGENTS, OBS_GameObj), axis=1) return OBS_ALL_AGENTS def init_drone(self, agent_info, pos_ro): N_COL = 2 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 10 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = (400* (tid%N_COL) + 2000) * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 500 # 500 is slightly above the ground yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 400, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 0.7, 'y': 0.7, 'z': 0.7, }, # probability of escaping dmg 闪避 "DodgeProb": 0.0, # ms explode dmg "ExplodeDmg": 75, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # Weapon CD 'WeaponCD': 1, # open fire range "PerceptionRange": 2000, "GuardRange": 1400, "FireRange": 1300 , # debugging 'RSVD1': f'-CarrierName=T0-0 -NumDrone={self.n_team_agent[team]-1}' if team==0 else f'-CarrierName=T1-0 -NumDrone={self.n_team_agent[team]-1}', # regular 'RSVD2': '-InitAct=ActionSet2::Idle;StaticAlert', # agent hp 'AgentHp': 110, # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property def init_carrier(self, agent_info, pos_ro): N_COL = 2 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 10 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = 2000 * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 1000 yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 900, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 1.0, 'y': 1.0, 'z': 1.0, }, # probability of escaping dmg 闪避 "DodgeProb": 0.0, # ms explode dmg "ExplodeDmg": 100, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # Weapon CD 'WeaponCD': 3, # open fire range "PerceptionRange": 5000, "GuardRange": 4800, "FireRange": 4800, # debugging 'RSVD1': '', # regular 'RSVD2': '-InitAct=ActionSet2::Idle;StaticAlert', # agent hp 'AgentHp': 500, # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/UhmapCarrierConf.py ================================================ class SubTaskConfig(): agent_list = [ { "team": 0, "type": "Carrier", "init_fn_name": "init_carrier" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 0, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "Carrier", "init_fn_name": "init_carrier" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, { "team": 1, "type": "SmallDrone", "init_fn_name": "init_drone" }, ] obs_vec_length = 23 obs_n_entity = 11 ActionFormat = 'ASCII' ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/UhmapEscape.py ================================================ import json, copy, re, os, inspect, os import numpy as np from UTIL.tensor_ops import my_view, repeat_at from ...common.base_env import RawObsArray from ..actionset_v3 import digitsToStrAction from ..agent import Agent from ..uhmap_env_wrapper import UhmapEnv, ScenarioConfig from .UhmapEscapeConf import SubTaskConfig from .cython_func import tear_num_arr def init_position_helper(x_max, x_min, y_max, y_min, total, this): n_col = np.ceil(np.sqrt(np.abs(x_max-x_min) * total / np.abs(y_max-y_min))) n_row = np.ceil(total / n_col) which_row = this // n_col which_col = this % n_col x = x_min + (which_col/n_col)*(x_max-x_min) y = y_min + (which_row/n_row)*(y_max-y_min) return x, y class UhmapEscape(UhmapEnv): def __init__(self, rank) -> None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig inspect.getfile(SubTaskConfig) assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def reset(self): """ Reset function, it delivers reset command to unreal engine to spawn all agents 环境复位,每个episode的开始会执行一次此函数中会初始化所有智能体 """ super().reset() self.t = 0 pos_ro = np.random.rand()*2*np.pi # spawn agents AgentSettingArray = [] # count the number of agent in each team n_team_agent = {} for i, agent_info in enumerate(SubTaskConfig.agent_list): team = agent_info['team'] if team not in n_team_agent: n_team_agent[team] = 0 SubTaskConfig.agent_list[i]['uid'] = i SubTaskConfig.agent_list[i]['tid'] = n_team_agent[team] n_team_agent[team] += 1 # push agent init info one by one for i, agent_info in enumerate(SubTaskConfig.agent_list): team = agent_info['team'] agent_info['n_team_agent'] = n_team_agent[team] init_fn = getattr(self, agent_info['init_fn_name']) AgentSettingArray.append(init_fn(agent_info, pos_ro)) self.agents = [Agent(team=a['team'], team_id=a['tid'], uid=a['uid']) for a in SubTaskConfig.agent_list] # refer to struct.cpp, FParsedDataInput resp = self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'reset', 'NumAgents' : len(SubTaskConfig.agent_list), 'AgentSettingArray': AgentSettingArray, # refer to struct.cpp, FAgentProperty 'TimeStepMax': ScenarioConfig.MaxEpisodeStep, 'TimeStep' : 0, 'Actions': None, })) resp = json.loads(resp) # make sure the map (level in UE) is correct # assert resp['dataGlobal']['levelName'] == 'UhmapLargeScale' assert len(resp['dataArr']) == len(AgentSettingArray) return self.parse_response_ob_info(resp) def step(self, act): """ step 函数,act中包含了所有agent的决策 """ assert len(act) == self.n_agents # translate actions to the format recognized by unreal engine if ScenarioConfig.ActionFormat == 'Single-Digit': act_send = [digit2act_dictionary[a] for a in act] elif ScenarioConfig.ActionFormat == 'Multi-Digit': act_send = [decode_action_as_string(a) for a in act] elif ScenarioConfig.ActionFormat == 'ASCII': act_send = [digitsToStrAction(a) for a in act] else: raise "ActionFormat is wrong!" # simulation engine IO resp = json.loads(self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'step', 'TimeStep': self.t, 'Actions': None, 'StringActions': act_send, }))) # get obs for RL, info for script AI ob, info = self.parse_response_ob_info(resp) # generate reward, get the episode ending infomation RewardForAllTeams, WinningResult = self.gen_reward_and_win(resp) if WinningResult is not None: info.update(WinningResult) assert resp['dataGlobal']['episodeDone'] done = True else: done = False if resp['dataGlobal']['timeCnt'] >= ScenarioConfig.MaxEpisodeStep: assert done return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity def parse_event(self, event): """ 解析环境返回的一些关键事件, 如智能体阵亡,某队伍胜利等等。 关键事件需要在ue中进行定义. 该设计极大地简化了python端奖励的设计流程, 减小了python端的运算量。 """ if not hasattr(self, 'pattern'): self.pattern = re.compile(r'<([^<>]*)>([^<>]*)') return {k:v for k,v in re.findall(self.pattern, event)} def extract_key_gameobj(self, resp): """ 获取非智能体的仿真物件,例如重要landmark等 """ keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): """ 奖励的设计在此定义, (UE端编程死板,虽然预留了相关字段, 但请不要在UE端提供奖励的定义。) 建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event """ reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'Destroyed': team = self.find_agent_by_uid(event_parsed['UID']).team reward[team] -= 0.10 # this team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) DefenderWin = False DefenderRank = False DefenderReward = 0 AttackerWin = -1 AttackerRank = -1 AttackerReward = 0 EndReason = event_parsed['EndReason'] # print(EndReason) # According to MISSION\uhmap\SubTasks\UhmapAttackPostConf.py, team 0 is Attacker team, team 1 is Defender team if EndReason == "Team_0_AllDead": DefenderWin = True; DefenderRank = 0; DefenderReward = 1 AttackerWin = False; AttackerRank = 1; AttackerReward = -1 elif EndReason == "TimeMaxCntReached": DefenderWin = True; DefenderRank = 0; DefenderReward = 1 AttackerWin = False; AttackerRank = 1; AttackerReward = -1 elif EndReason == "Team_1_AllDead": DefenderWin = False; DefenderRank = 1; DefenderReward = -1 AttackerWin = True; AttackerRank = 0; AttackerReward = 1 else: print('unexpected end reaon:', EndReason) WinningResult = {"team_ranking": [AttackerRank, DefenderRank], "end_reason": EndReason} reward = [AttackerReward, DefenderReward] # print(reward) return reward, WinningResult def step_skip(self): """ 跳过一次决策,无用的函数 """ return self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'skip_frame', })) def find_agent_by_uid(self, uid): """ 用uid查找智能体(带缓存加速机制) """ if not hasattr(self, 'uid_to_agent_dict'): self.uid_to_agent_dict = {} self.uid_to_agent_dict.update({agent.uid:agent for agent in self.agents}) if isinstance(uid, str): self.uid_to_agent_dict.update({str(agent.uid):agent for agent in self.agents}) return self.uid_to_agent_dict[uid] def parse_response_ob_info(self, resp): """ 粗解析智能体的观测,例如把死智能体的位置替换为inf(无穷远), 将智能体的agentLocation从字典形式转变为更简洁的(x,y,z)tuple形式 """ assert resp['valid'] resp['dataGlobal']['distanceMat'] = np.array(resp['dataGlobal']['distanceMat']['flat_arr']).reshape(self.n_agents,self.n_agents) if len(resp['dataGlobal']['events'])>0: tmp = [kv.split('>') for kv in resp['dataGlobal']['events'][0].split('<') if kv] info_parse = {t[0]:t[1] for t in tmp} info_dict = resp for info in info_dict['dataArr']: alive = info['agentAlive'] if alive: agentLocation = info.pop('agentLocation') agentRotation = info.pop('agentRotation') agentVelocity = info.pop('agentVelocity') agentScale = info.pop('agentScale') info['agentLocationArr'] = (agentLocation['x'], agentLocation['y'], agentLocation['z']) info['agentVelocityArr'] = (agentVelocity['x'], agentVelocity['y'], agentVelocity['z']) info['agentRotationArr'] = (agentRotation['yaw'], agentRotation['pitch'], agentRotation['roll']) info['agentScaleArr'] = (agentScale['x'], agentScale['y'], agentScale['z']) info.pop('previousAction') info.pop('availActions') # info.pop('rSVD1') info.pop('interaction') else: inf = float('inf') info['agentLocationArr'] = (inf, inf, inf) info['agentVelocityArr'] = (inf, inf, inf) info['agentRotationArr'] = (inf, inf, inf) info = resp['dataArr'] for i, agent_info in enumerate(info): self.agents[i].update_agent_attrs(agent_info) self.key_obj = self.extract_key_gameobj(resp) # return ob, info return self.make_obs(resp), info_dict @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 15000 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=(self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM)) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def extract_key_gameobj(self, resp): keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'Destroyed': team = self.find_agent_by_uid(event_parsed['UID']).team reward[team] -= 0.05 # this team reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) EndReason = event_parsed['EndReason'] WinTeam = int(event_parsed['WinTeam']) if WinTeam<0: # end due to timeout agents_left_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: agents_left_each_team[a.team] += 1 WinTeam = np.argmax(agents_left_each_team) # <<1>> The alive agent number is EQUAL if agents_left_each_team[WinTeam] == agents_left_each_team[1-WinTeam]: hp_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: hp_each_team[a.team] += a.hp WinTeam = np.argmax(hp_each_team) # <<2>> The alive agent HP sum is EQUAL if hp_each_team[WinTeam] == hp_each_team[1-WinTeam]: WinTeam = -1 if WinTeam >= 0: WinningResult = { "team_ranking": [0,1] if WinTeam==0 else [1,0], "end_reason": EndReason } reward[WinTeam] += 1 reward[1-WinTeam] -= 1 else: WinningResult = { "team_ranking": [-1, -1], "end_reason": EndReason } reward = [-1 for _ in range(self.n_teams)] # print(reward) return reward, WinningResult @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 1500 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=( self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM )) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) 0: OBJ_UID_OFFSET = 32768 obs_arr = RawObsArray(key = 'GameObj') for i, obj in enumerate(self.key_obj): assert obj['uId'] - OBJ_UID_OFFSET == i obs_arr.append( -self.uid_binary[i] # reverse uid binary, self.uid_binary[i] ) obs_arr.append([ obj['uId'] - OBJ_UID_OFFSET, #agent.index, -1, #agent.team, True, #agent.alive, obj['uId'] - OBJ_UID_OFFSET, #agent.uid_remote, ]) # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) obs_arr.append( [ obj['location']['x'], obj['location']['y'], obj['location']['z'] # agent.pos3d ] # tear_num_arr([ # obj['location']['x'], obj['location']['y'], obj['location']['z'] # agent.pos3d # ], 6, ScenarioConfig.ObsBreakBase, 0) ) obs_arr.append([ obj['velocity']['x'], obj['velocity']['y'], obj['velocity']['z'] # agent.vel3d ]+ [ -1, # hp obj['rotation']['yaw'], # yaw 0, # max_speed ]) OBS_GameObj = my_view(obs_arr.get(), [len(self.key_obj), -1]) OBS_GameObj = OBS_GameObj[:MAX_OBJ_NUM_ACCEPT, :] OBS_GameObj = repeat_at(OBS_GameObj, insert_dim=0, n_times=self.n_agents) OBS_ALL_AGENTS = np.concatenate((OBS_ALL_AGENTS, OBS_GameObj), axis=1) return OBS_ALL_AGENTS def init_drone(self, agent_info, pos_ro): N_COL = 2 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 10 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = (400* (tid%N_COL) + 2000) * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 500 # 500 is slightly above the ground yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 400, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 0.7, 'y': 0.7, 'z': 0.7, }, # probability of escaping dmg 闪避 "DodgeProb": 0.0, # ms explode dmg "ExplodeDmg": 75, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # Weapon CD 'WeaponCD': 1, # open fire range "PerceptionRange": 2000, "GuardRange": 1400, "FireRange": 1300 , # debugging 'RSVD1': f'-CarrierName=T0-0 -NumDrone={self.n_team_agent[team]-1}' if team==0 else f'-CarrierName=T1-0 -NumDrone={self.n_team_agent[team]-1}', # regular 'RSVD2': '-InitAct=ActionSet2::Idle;StaticAlert', # agent hp 'AgentHp': 110, # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/UhmapFormationConf.py ================================================ class SubTaskConfig(): agent_list = [ { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 0, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, { "team": 1, "type": "Lv3_MomentumAgentWithHp", "init_fn_name": "init_drone" }, ] obs_vec_length = 23 obs_n_entity = 11 ActionFormat = 'ASCII' ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/UhmapHuge.py ================================================ import json, copy, re, os, inspect, os import numpy as np from UTIL.tensor_ops import my_view, repeat_at from ...common.base_env import RawObsArray from ..actset_lookup import digit2act_dictionary, AgentPropertyDefaults from ..actset_lookup import decode_action_as_string, decode_action_as_string from ..agent import Agent from ..uhmap_env_wrapper import UhmapEnv, ScenarioConfig from .UhmapHugeConf import SubTaskConfig from .cython_func import tear_num_arr from .SubtaskCommonFn import UhmapCommonFn class UhmapHuge(UhmapCommonFn, UhmapEnv): def __init__(self, rank) -> None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def extract_key_gameobj(self, resp): keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'Destroyed': team = self.find_agent_by_uid(event_parsed['UID']).team reward[team] -= 0.05 # this team reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) EndReason = event_parsed['EndReason'] WinTeam = int(event_parsed['WinTeam']) if WinTeam<0: # end due to timeout agents_left_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: agents_left_each_team[a.team] += 1 WinTeam = np.argmax(agents_left_each_team) # <<1>> The alive agent number is EQUAL if agents_left_each_team[WinTeam] == agents_left_each_team[1-WinTeam]: hp_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: hp_each_team[a.team] += a.hp WinTeam = np.argmax(hp_each_team) # <<2>> The alive agent HP sum is EQUAL if hp_each_team[WinTeam] == hp_each_team[1-WinTeam]: WinTeam = -1 if WinTeam >= 0: WinningResult = { "team_ranking": [0,1] if WinTeam==0 else [1,0], "end_reason": EndReason } reward[WinTeam] += 1 reward[1-WinTeam] -= 1 else: WinningResult = { "team_ranking": [-1, -1], "end_reason": EndReason } reward = [-1 for _ in range(self.n_teams)] # print(reward) return reward, WinningResult @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 1500 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=( self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM )) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig inspect.getfile(SubTaskConfig) assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def reset(self): """ Reset function, it delivers reset command to unreal engine to spawn all agents 环境复位,每个episode的开始会执行一次此函数中会初始化所有智能体 """ super().reset() self.t = 0 pos_ro = np.random.rand()*2*np.pi # spawn agents AgentSettingArray = [] # count the number of agent in each team n_team_agent = {} for i, agent_info in enumerate(SubTaskConfig.agent_list): team = agent_info['team'] if team not in n_team_agent: n_team_agent[team] = 0 SubTaskConfig.agent_list[i]['uid'] = i SubTaskConfig.agent_list[i]['tid'] = n_team_agent[team] n_team_agent[team] += 1 # push agent init info one by one for i, agent_info in enumerate(SubTaskConfig.agent_list): team = agent_info['team'] agent_info['n_team_agent'] = n_team_agent[team] init_fn = getattr(self, agent_info['init_fn_name']) AgentSettingArray.append(init_fn(agent_info, pos_ro)) self.agents = [Agent(team=a['team'], team_id=a['tid'], uid=a['uid']) for a in SubTaskConfig.agent_list] # refer to struct.cpp, FParsedDataInput resp = self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'reset', 'NumAgents' : len(SubTaskConfig.agent_list), 'AgentSettingArray': AgentSettingArray, # refer to struct.cpp, FAgentProperty 'TimeStepMax': ScenarioConfig.MaxEpisodeStep, 'TimeStep' : 0, 'Actions': None, })) resp = json.loads(resp) # make sure the map (level in UE) is correct # assert resp['dataGlobal']['levelName'] == 'UhmapLargeScale' assert len(resp['dataArr']) == len(AgentSettingArray) return self.parse_response_ob_info(resp) def step(self, act): """ step 函数,act中包含了所有agent的决策 """ assert len(act) == self.n_agents # translate actions to the format recognized by unreal engine if ScenarioConfig.ActionFormat == 'Single-Digit': act_send = [digit2act_dictionary[a] for a in act] elif ScenarioConfig.ActionFormat == 'Multi-Digit': act_send = [decode_action_as_string(a) for a in act] elif ScenarioConfig.ActionFormat == 'ASCII': act_send = [digitsToStrAction(a) for a in act] else: raise "ActionFormat is wrong!" # simulation engine IO resp = json.loads(self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'step', 'TimeStep': self.t, 'Actions': None, 'StringActions': act_send, }))) # get obs for RL, info for script AI ob, info = self.parse_response_ob_info(resp) # generate reward, get the episode ending infomation RewardForAllTeams, WinningResult = self.gen_reward_and_win(resp) if WinningResult is not None: info.update(WinningResult) assert resp['dataGlobal']['episodeDone'] done = True else: done = False if resp['dataGlobal']['timeCnt'] >= ScenarioConfig.MaxEpisodeStep: assert done return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity def parse_event(self, event): """ 解析环境返回的一些关键事件, 如智能体阵亡,某队伍胜利等等。 关键事件需要在ue中进行定义. 该设计极大地简化了python端奖励的设计流程, 减小了python端的运算量。 """ if not hasattr(self, 'pattern'): self.pattern = re.compile(r'<([^<>]*)>([^<>]*)') return {k:v for k,v in re.findall(self.pattern, event)} def extract_key_gameobj(self, resp): """ 获取非智能体的仿真物件,例如重要landmark等 """ keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): """ 奖励的设计在此定义, (UE端编程死板,虽然预留了相关字段, 但请不要在UE端提供奖励的定义。) 建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event """ reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) # if event_parsed['Event'] == 'Destroyed': # team = self.find_agent_by_uid(event_parsed['UID']).team # reward[team] -= 0.05 # this team # reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) PredatorWin = False PredatorRank = False PredatorReward = 0 PreyWin = -1 PreyRank = -1 PreyReward = 0 EndReason = event_parsed['EndReason'] # According to MISSION\uhmap\SubTasks\UhmapInterceptConf.py, team 0 is prey team, team 1 is predator team if EndReason == "AllPreyCaught" or EndReason == "Team_0_AllDead": PredatorWin = True; PredatorRank = 0; PredatorReward = 1 PreyWin = False; PreyRank = 1; PreyReward = -1 elif EndReason == "TimeMaxCntReached" or EndReason == "Team_1_AllDead": PredatorWin = False; PredatorRank = 1; PredatorReward = -1 PreyWin = True; PreyRank = 0; PreyReward = 1 else: print('unexpected end reaon:', EndReason) WinningResult = {"team_ranking": [PreyRank, PredatorRank], "end_reason": EndReason} reward = [PreyReward, PredatorReward] # print(reward) return reward, WinningResult def step_skip(self): """ 跳过一次决策,无用的函数 """ return self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'skip_frame', })) def find_agent_by_uid(self, uid): """ 用uid查找智能体(带缓存加速机制) """ if not hasattr(self, 'uid_to_agent_dict'): self.uid_to_agent_dict = {} self.uid_to_agent_dict.update({agent.uid:agent for agent in self.agents}) if isinstance(uid, str): self.uid_to_agent_dict.update({str(agent.uid):agent for agent in self.agents}) return self.uid_to_agent_dict[uid] def parse_response_ob_info(self, resp): """ 粗解析智能体的观测,例如把死智能体的位置替换为inf(无穷远), 将智能体的agentLocation从字典形式转变为更简洁的(x,y,z)tuple形式 """ assert resp['valid'] resp['dataGlobal']['distanceMat'] = np.array(resp['dataGlobal']['distanceMat']['flat_arr']).reshape(self.n_agents,self.n_agents) if len(resp['dataGlobal']['events'])>0: tmp = [kv.split('>') for kv in resp['dataGlobal']['events'][0].split('<') if kv] info_parse = {t[0]:t[1] for t in tmp} info_dict = resp for info in info_dict['dataArr']: alive = info['agentAlive'] if alive: agentLocation = info.pop('agentLocation') agentRotation = info.pop('agentRotation') agentVelocity = info.pop('agentVelocity') agentScale = info.pop('agentScale') info['agentLocationArr'] = (agentLocation['x'], agentLocation['y'], agentLocation['z']) info['agentVelocityArr'] = (agentVelocity['x'], agentVelocity['y'], agentVelocity['z']) info['agentRotationArr'] = (agentRotation['yaw'], agentRotation['pitch'], agentRotation['roll']) info['agentScaleArr'] = (agentScale['x'], agentScale['y'], agentScale['z']) info.pop('previousAction') info.pop('availActions') # info.pop('rSVD1') info.pop('interaction') else: inf = float('inf') info['agentLocationArr'] = (inf, inf, inf) info['agentVelocityArr'] = (inf, inf, inf) info['agentRotationArr'] = (inf, inf, inf) info = resp['dataArr'] for i, agent_info in enumerate(info): self.agents[i].update_agent_attrs(agent_info) self.key_obj = self.extract_key_gameobj(resp) # return ob, info return self.make_obs(resp), info_dict @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 15000 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=(self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM)) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig inspect.getfile(SubTaskConfig) assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def extract_key_gameobj(self, resp): """ 获取非智能体的仿真物件,例如重要landmark等 """ keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): """ 奖励的设计在此定义, (UE端编程死板,虽然预留了相关字段, 但请不要在UE端提供奖励的定义。) 建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event """ reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) EndReason = event_parsed['EndReason'] # WinTeam = int(event_parsed['WinTeam']) WinningResult = { # 每个队伍的排名,可以指定例如[1, 0, 2],代表一队第2名,二队第1名,三队第3名 # 如果没有任何队伍取得胜利,可以指定例如[-1, -1, -1] # 如果有两只队伍成绩并列,可以指定例如[0, 2, 0, 2], 代表一队三队并列第1名,二队四队并列第3名 "team_ranking": [-1, ], "end_reason": EndReason } assert len(WinningResult["team_ranking"]) == ScenarioConfig.N_TEAM # print(reward) return reward, WinningResult @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 1500 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=( self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM )) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def extract_key_gameobj(self, resp): keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'Destroyed': team = self.find_agent_by_uid(event_parsed['UID']).team reward[team] -= 0.05 # this team reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) EndReason = event_parsed['EndReason'] WinTeam = int(event_parsed['WinTeam']) if WinTeam<0: # end due to timeout agents_left_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: agents_left_each_team[a.team] += 1 WinTeam = np.argmax(agents_left_each_team) # <<1>> The alive agent number is EQUAL if agents_left_each_team[WinTeam] == agents_left_each_team[1-WinTeam]: hp_each_team = [0 for _ in range(self.n_teams)] for a in self.agents: if a.alive: hp_each_team[a.team] += a.hp WinTeam = np.argmax(hp_each_team) # <<2>> The alive agent HP sum is EQUAL if hp_each_team[WinTeam] == hp_each_team[1-WinTeam]: WinTeam = -1 if WinTeam >= 0: WinningResult = { "team_ranking": [0,1] if WinTeam==0 else [1,0], "end_reason": EndReason } reward[WinTeam] += 1 reward[1-WinTeam] -= 1 else: WinningResult = { "team_ranking": [-1, -1], "end_reason": EndReason } reward = [-1 for _ in range(self.n_teams)] # print(reward) return reward, WinningResult @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 1500 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=( self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM )) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) 0: OBJ_UID_OFFSET = 32768 obs_arr = RawObsArray(key = 'GameObj') for i, obj in enumerate(self.key_obj): assert obj['uId'] - OBJ_UID_OFFSET == i obs_arr.append( -self.uid_binary[i] # reverse uid binary, self.uid_binary[i] ) obs_arr.append([ obj['uId'] - OBJ_UID_OFFSET, #agent.index, -1, #agent.team, True, #agent.alive, obj['uId'] - OBJ_UID_OFFSET, #agent.uid_remote, ]) # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) obs_arr.append( [ obj['location']['x'], obj['location']['y'], obj['location']['z'] # agent.pos3d ] # tear_num_arr([ # obj['location']['x'], obj['location']['y'], obj['location']['z'] # agent.pos3d # ], 6, ScenarioConfig.ObsBreakBase, 0) ) obs_arr.append([ obj['velocity']['x'], obj['velocity']['y'], obj['velocity']['z'] # agent.vel3d ]+ [ -1, # hp obj['rotation']['yaw'], # yaw 0, # max_speed ]) OBS_GameObj = my_view(obs_arr.get(), [len(self.key_obj), -1]) OBS_GameObj = OBS_GameObj[:MAX_OBJ_NUM_ACCEPT, :] OBS_GameObj = repeat_at(OBS_GameObj, insert_dim=0, n_times=self.n_agents) OBS_ALL_AGENTS = np.concatenate((OBS_ALL_AGENTS, OBS_GameObj), axis=1) return OBS_ALL_AGENTS def init_ground(self, agent_info, pos_ro): N_COL = 2 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 10 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = (400* (tid%N_COL) + 2000) * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 500 # 500 is slightly above the ground yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 720 if agent_class == 'RLA_CAR_Laser' else 600, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 1, 'y': 1, 'z': 1, }, # probability of escaping dmg 闪避 "DodgeProb": 0.0, # ms explode dmg "ExplodeDmg": 20, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # Weapon CD 'WeaponCD': 1, # open fire range "PerceptionRange": 2000 if agent_class == 'RLA_CAR_Laser' else 2500, "GuardRange": 1400 if agent_class == 'RLA_CAR_Laser' else 1700, "FireRange": 750 if agent_class == 'RLA_CAR_Laser' else 1400, # debugging 'RSVD1': '-Ring1=2000 -Ring2=1400 -Ring3=750' if agent_class == 'RLA_CAR_Laser' else '-Ring1=2500 -Ring2=1700 -Ring3=1400', # regular 'RSVD2': '-InitAct=ActionSet2::Idle;AsFarAsPossible', # agent hp 'AgentHp':np.random.randint(low=95,high=105) if agent_class == 'RLA_CAR_Laser' else np.random.randint(low=145,high=155), # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property def init_air(self, agent_info, pos_ro): N_COL = 2 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 10 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = 2000 * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 1000 yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 900, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 1, 'y': 1, 'z': 1, }, # probability of escaping dmg 闪避 "DodgeProb": 0.0, # ms explode dmg "ExplodeDmg": 10, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # Weapon CD 'WeaponCD': 3, # open fire range "PerceptionRange": 2500, "GuardRange": 1800, "FireRange": 1700, # debugging 'RSVD1': '-ring1=2500 -ring2=1800 -ring3=1700', # regular 'RSVD2': '-InitAct=ActionSet2::Idle;StaticAlert', # agent hp 'AgentHp':50, # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/UhmapLargeScaleConf.py ================================================ class SubTaskConfig(): agent_list = [ { 'team':0, 'tid':0, 'uid':0, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':1, 'uid':1, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':2, 'uid':2, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':3, 'uid':3, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':4, 'uid':4, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':5, 'uid':5, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':6, 'uid':6, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':7, 'uid':7, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':8, 'uid':8, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':0, 'tid':9, 'uid':9, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':0, 'uid':10, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':1, 'uid':11, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':2, 'uid':12, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':3, 'uid':13, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':4, 'uid':14, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':5, 'uid':15, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':6, 'uid':16, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':7, 'uid':17, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':8, 'uid':18, 'n_team_agent':10, 'type':'RLA_CAR_Laser', 'init_fn_name':'init_ground', }, { 'team':1, 'tid':9, 'uid':19, 'n_team_agent':10, 'type':'RLA_CAR', 'init_fn_name':'init_ground', }, ] obs_vec_length = 23 obs_n_entity = 11 ActionFormat = 'Multi-Digit' ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/SubTasks/UhmapPreyPredator.py ================================================ import json, copy, re, os, inspect, os import numpy as np from UTIL.tensor_ops import my_view, repeat_at from ...common.base_env import RawObsArray from ..actionset_v3 import digitsToStrAction from ..agent import Agent from ..uhmap_env_wrapper import UhmapEnv, ScenarioConfig from .UhmapPreyPredatorConf import SubTaskConfig from .SubtaskCommonFn import UhmapCommonFn from .cython_func import tear_num_arr class UhmapPreyPredator(UhmapCommonFn, UhmapEnv): def __init__(self, rank) -> None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig inspect.getfile(SubTaskConfig) assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def init_ground(self, agent_info, pos_ro): N_COL = 4 agent_class = agent_info['type'] team = agent_info['team'] n_team_agent = 50 tid = agent_info['tid'] uid = agent_info['uid'] x = 0 + 800*(tid - n_team_agent//2) //N_COL y = (400* (tid%N_COL) + 2000) * (-1)**(team+1) x,y = np.matmul(np.array([x,y]), np.array([[np.cos(pos_ro), -np.sin(pos_ro)], [np.sin(pos_ro), np.cos(pos_ro)] ])) z = 500 # 500 is slightly above the ground yaw = 90 if team==0 else -90 assert np.abs(x) < 15000.0 and np.abs(y) < 15000.0 agent_property = copy.deepcopy(SubTaskConfig.AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 720 if agent_class == 'RLA_CAR_Laser' else 600, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 1, 'y': 1, 'z': 1, }, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # debugging 'RSVD1': '-Ring1=2000 -Ring2=1400 -Ring3=750', # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': yaw, }, }), return agent_property def extract_key_gameobj(self, resp): """ 获取非智能体的仿真物件,例如重要landmark等 """ keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): """ 奖励的设计在此定义, (UE端编程死板,虽然预留了相关字段, 但请不要在UE端提供奖励的定义。) 建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event """ reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) # if event_parsed['Event'] == 'Destroyed': # team = self.find_agent_by_uid(event_parsed['UID']).team # reward[team] -= 0.05 # this team # reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) PredatorWin = False PredatorRank = False PredatorReward = 0 PreyWin = -1 PreyRank = -1 PreyReward = 0 EndReason = event_parsed['EndReason'] # According to MISSION\uhmap\SubTasks\UhmapPreyPredatorConf.py, team 0 is prey team, team 1 is predator team if EndReason == "AllPreyCaught" or EndReason == "Team_0_AllDead": PredatorWin = True; PredatorRank = 0; PredatorReward = 1 PreyWin = False; PreyRank = 1; PreyReward = -1 elif EndReason == "TimeMaxCntReached" or EndReason == "Team_1_AllDead": PredatorWin = False; PredatorRank = 1; PredatorReward = -1 PreyWin = True; PreyRank = 0; PreyReward = 1 else: print('unexpected end reaon:', EndReason) WinningResult = {"team_ranking": [PreyRank, PredatorRank], "end_reason": EndReason} reward = [PreyReward, PredatorReward] # print(reward) return reward, WinningResult @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 15000 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=(self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM)) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig inspect.getfile(SubTaskConfig) assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def reset(self): """ Reset function, it delivers reset command to unreal engine to spawn all agents 环境复位,每个episode的开始会执行一次此函数中会初始化所有智能体 """ super().reset() self.t = 0 pos_ro = np.random.rand()*2*np.pi # spawn agents AgentSettingArray = [] # count the number of agent in each team n_team_agent = {} for i, agent_info in enumerate(SubTaskConfig.agent_list): team = agent_info['team'] if team not in n_team_agent: n_team_agent[team] = 0 SubTaskConfig.agent_list[i]['uid'] = i SubTaskConfig.agent_list[i]['tid'] = n_team_agent[team] n_team_agent[team] += 1 # push agent init info one by one for i, agent_info in enumerate(SubTaskConfig.agent_list): team = agent_info['team'] agent_info['n_team_agent'] = n_team_agent[team] init_fn = getattr(self, agent_info['init_fn_name']) AgentSettingArray.append(init_fn(agent_info, pos_ro)) self.agents = [Agent(team=a['team'], team_id=a['tid'], uid=a['uid']) for a in SubTaskConfig.agent_list] # refer to struct.cpp, FParsedDataInput resp = self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'reset', 'NumAgents' : len(SubTaskConfig.agent_list), 'AgentSettingArray': AgentSettingArray, # refer to struct.cpp, FAgentProperty 'TimeStepMax': ScenarioConfig.MaxEpisodeStep, 'TimeStep' : 0, 'Actions': None, })) resp = json.loads(resp) # make sure the map (level in UE) is correct # assert resp['dataGlobal']['levelName'] == 'UhmapLargeScale' assert len(resp['dataArr']) == len(AgentSettingArray) return self.parse_response_ob_info(resp) def step(self, act): """ step 函数,act中包含了所有agent的决策 """ assert len(act) == self.n_agents # translate actions to the format recognized by unreal engine if ScenarioConfig.ActionFormat == 'Single-Digit': act_send = [digit2act_dictionary[a] for a in act] elif ScenarioConfig.ActionFormat == 'Multi-Digit': act_send = [decode_action_as_string(a) for a in act] elif ScenarioConfig.ActionFormat == 'ASCII': act_send = [digitsToStrAction(a) for a in act] else: raise "ActionFormat is wrong!" # simulation engine IO resp = json.loads(self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'step', 'TimeStep': self.t, 'Actions': None, 'StringActions': act_send, }))) # get obs for RL, info for script AI ob, info = self.parse_response_ob_info(resp) # generate reward, get the episode ending infomation RewardForAllTeams, WinningResult = self.gen_reward_and_win(resp) if WinningResult is not None: info.update(WinningResult) assert resp['dataGlobal']['episodeDone'] done = True else: done = False if resp['dataGlobal']['timeCnt'] >= ScenarioConfig.MaxEpisodeStep: assert done return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity def parse_event(self, event): """ 解析环境返回的一些关键事件, 如智能体阵亡,某队伍胜利等等。 关键事件需要在ue中进行定义. 该设计极大地简化了python端奖励的设计流程, 减小了python端的运算量。 """ if not hasattr(self, 'pattern'): self.pattern = re.compile(r'<([^<>]*)>([^<>]*)') return {k:v for k,v in re.findall(self.pattern, event)} def extract_key_gameobj(self, resp): """ 获取非智能体的仿真物件,例如重要landmark等 """ keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): """ 奖励的设计在此定义, (UE端编程死板,虽然预留了相关字段, 但请不要在UE端提供奖励的定义。) 建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event """ reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) if event_parsed['Event'] == 'EndEpisode': PreyRank = -1 PreyReward = 0 EndReason = event_parsed['EndReason'] WinningResult = {"team_ranking": [0], "end_reason": EndReason} reward = [0] return reward, WinningResult def step_skip(self): """ 跳过一次决策,无用的函数 """ return self.client.send_and_wait_reply(json.dumps({ 'valid': True, 'DataCmd': 'skip_frame', })) def find_agent_by_uid(self, uid): """ 用uid查找智能体(带缓存加速机制) """ if not hasattr(self, 'uid_to_agent_dict'): self.uid_to_agent_dict = {} self.uid_to_agent_dict.update({agent.uid:agent for agent in self.agents}) if isinstance(uid, str): self.uid_to_agent_dict.update({str(agent.uid):agent for agent in self.agents}) return self.uid_to_agent_dict[uid] def parse_response_ob_info(self, resp): """ 粗解析智能体的观测,例如把死智能体的位置替换为inf(无穷远), 将智能体的agentLocation从字典形式转变为更简洁的(x,y,z)tuple形式 """ assert resp['valid'] resp['dataGlobal']['distanceMat'] = np.array(resp['dataGlobal']['distanceMat']['flat_arr']).reshape(self.n_agents,self.n_agents) if len(resp['dataGlobal']['events'])>0: tmp = [kv.split('>') for kv in resp['dataGlobal']['events'][0].split('<') if kv] info_parse = {t[0]:t[1] for t in tmp} info_dict = resp for info in info_dict['dataArr']: alive = info['agentAlive'] if alive: agentLocation = info.pop('agentLocation') agentRotation = info.pop('agentRotation') agentVelocity = info.pop('agentVelocity') agentScale = info.pop('agentScale') info['agentLocationArr'] = (agentLocation['x'], agentLocation['y'], agentLocation['z']) info['agentVelocityArr'] = (agentVelocity['x'], agentVelocity['y'], agentVelocity['z']) info['agentRotationArr'] = (agentRotation['yaw'], agentRotation['pitch'], agentRotation['roll']) info['agentScaleArr'] = (agentScale['x'], agentScale['y'], agentScale['z']) info.pop('previousAction') info.pop('availActions') # info.pop('rSVD1') info.pop('interaction') else: inf = float('inf') info['agentLocationArr'] = (inf, inf, inf) info['agentVelocityArr'] = (inf, inf, inf) info['agentRotationArr'] = (inf, inf, inf) info = resp['dataArr'] for i, agent_info in enumerate(info): self.agents[i].update_agent_attrs(agent_info) self.key_obj = self.extract_key_gameobj(resp) # return ob, info return self.make_obs(resp), info_dict @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 15000 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=(self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM)) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) None: super().__init__(rank) self.observation_space = self.make_obs(get_shape=True) self.SubTaskConfig = SubTaskConfig inspect.getfile(SubTaskConfig) assert os.path.basename(inspect.getfile(SubTaskConfig)) == type(self).__name__+'Conf.py', \ ('make sure you have imported the correct SubTaskConfig class') def init_ship(self, agent_info, pos_ro): agent_class = agent_info['type'] team = agent_info['team'] tid = agent_info['tid'] # tid 是智能体在队伍中的编号 uid = agent_info['uid'] # uid 是智能体在仿真中的唯一编号 x = -2000 y = (tid * 1000) # tid 是智能体在队伍中的编号 z = 500 # agent_property = copy.deepcopy(SubTaskConfig.AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 500, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 1, 'y': 1, 'z': 1, }, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # custom args 'RSVD1': '', # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': 0, }, }), return agent_property def init_waterdrop(self, agent_info, pos_ro): agent_class = agent_info['type'] team = agent_info['team'] tid = agent_info['tid'] uid = agent_info['uid'] x = +2000 y = (tid * 200) z = 500 # agent_property = copy.deepcopy(SubTaskConfig.AgentPropertyDefaults) agent_property.update({ 'DebugAgent': False, # max drive/fly speed 'MaxMoveSpeed': 1000, # also influence object mass, please change it with causion! 'AgentScale' : { 'x': 1, 'y': 1, 'z': 1, }, # team belonging 'AgentTeam': team, # choose ue class to init 'ClassName': agent_class, # custom args 'RSVD1': '-MyCustomArg1=abc -MyCustomArg2=12345', # the rank of agent inside the team 'IndexInTeam': tid, # the unique identity of this agent in simulation system 'UID': uid, # show color 'Color':'(R=0,G=1,B=0,A=1)' if team==0 else '(R=0,G=0,B=1,A=1)', # initial location 'InitLocation': { 'x': x, 'y': y, 'z': z, }, # initial facing direction et.al. 'InitRotator': { 'pitch': 0, 'roll': 0, 'yaw': 0, }, }), return agent_property def extract_key_gameobj(self, resp): """ 获取非智能体的仿真物件,例如重要landmark等 """ keyObjArr = resp['dataGlobal']['keyObjArr'] return keyObjArr def gen_reward_and_win(self, resp): """ 奖励的设计在此定义, (UE端编程死板,虽然预留了相关字段, 但请不要在UE端提供奖励的定义。) 建议:在UE端定义触发奖励的事件,如智能体阵亡、战术目标完成等,见parse_event """ reward = [0]*self.n_teams events = resp['dataGlobal']['events'] WinningResult = None for event in events: event_parsed = self.parse_event(event) # if event_parsed['Event'] == 'Destroyed': # team = self.find_agent_by_uid(event_parsed['UID']).team # reward[team] -= 0.05 # this team # reward[1-team] += 0.10 # opp team if event_parsed['Event'] == 'EndEpisode': # print([a.alive * a.hp for a in self.agents]) WaterdropWin = False WaterdropRank = False WaterdropReward = 0 ShipWin = -1 ShipRank = -1 ShipReward = 0 EndReason = event_parsed['EndReason'] # According to MISSION\uhmap\SubTasks\UhmapWaterdropConf.py, team 0 is Ship team, team 1 is Waterdrop team if EndReason == "ShipNumLessThanTheshold" or EndReason == "Team_0_AllDead": WaterdropWin = True; WaterdropRank = 0; WaterdropReward = 1 ShipWin = False; ShipRank = 1; ShipReward = -1 elif EndReason == "TimeMaxCntReached" or EndReason == "Team_1_AllDead": WaterdropWin = False; WaterdropRank = 1; WaterdropReward = -1 ShipWin = True; ShipRank = 0; ShipReward = 1 else: print('unexpected end reaon:', EndReason) WinningResult = {"team_ranking": [ShipRank, WaterdropRank], "end_reason": EndReason} reward = [ShipReward, WaterdropReward] # print(reward) return reward, WinningResult @staticmethod def item_random_mv(src,dst,prob,rand=False): assert len(src.shape)==1; assert len(dst.shape)==1 if rand: np.random.shuffle(src) len_src = len(src) n_mv = (np.random.rand(len_src) < prob).sum() item_mv = src[range(len_src-n_mv,len_src)] src = src[range(0,0+len_src-n_mv)] dst = np.concatenate((item_mv, dst)) return src, dst @staticmethod def get_binary_array(n_int, n_bits=8, dtype=np.float32): arr = np.zeros((*n_int.shape, n_bits), dtype=dtype) for i in range(n_bits): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr def make_obs(self, resp=None, get_shape=False): # CORE_DIM = 38 CORE_DIM = 23 assert ScenarioConfig.obs_vec_length == CORE_DIM if get_shape: return CORE_DIM # temporary parameters OBS_RANGE_PYTHON_SIDE = 15000 MAX_NUM_OPP_OBS = 5 MAX_NUM_ALL_OBS = 5 # get and calculate distance array pos3d_arr = np.zeros(shape=(self.n_agents, 3), dtype=np.float32) for i, agent in enumerate(self.agents): pos3d_arr[i] = agent.pos3d # use the distance matrix calculated by unreal engine to accelerate # dis_mat = distance_matrix(pos3d_arr) # dis_mat is a matrix, shape = (n_agent, n_agent) dis_mat = resp['dataGlobal']['distanceMat'] alive_all = np.array([agent.alive for agent in self.agents]) try: dis_mat[~alive_all,:] = +np.inf dis_mat[:,~alive_all] = +np.inf except: pass # get team list team_belonging = np.array([agent.team for agent in self.agents]) # gather the obs arr of all known agents obs_arr = RawObsArray(key='Agent') if not hasattr(self, "uid_binary"): self.uid_binary = self.get_binary_array(np.arange(self.n_agents), 10) for i, agent in enumerate(self.agents): assert agent.location is not None assert agent.uid == i obs_arr.append( self.uid_binary[i] # 0~9 ) obs_arr.append([ agent.index, # 10 agent.team, # 11 agent.alive, # 12 agent.uid_remote, # 13 ]) obs_arr.append( #[14,15,16,17,18,19] agent.pos3d # tear_num_arr(agent.pos3d, n_digits=6, base=10, mv_left=0) # tear_num_arr(agent.pos3d, 6, ScenarioConfig.ObsBreakBase, 0) # 3 -- > 3*6 = 18 , 18-3=15, 23+15 = 38 ) obs_arr.append( agent.vel3d ) obs_arr.append([ agent.hp, agent.yaw, agent.max_speed, ]) obs_ = obs_arr.get() new_obs = my_view(obs_, [self.n_agents, -1]) assert CORE_DIM == new_obs.shape[-1] OBS_ALL_AGENTS = np.zeros(shape=(self.n_agents, MAX_NUM_OPP_OBS+MAX_NUM_ALL_OBS, CORE_DIM)) # now arranging the individual obs for i, agent in enumerate(self.agents): if not agent.alive: OBS_ALL_AGENTS[i, :] = np.nan continue # if alive # scope dis2all = dis_mat[i, :] is_ally = (team_belonging == agent.team) # scope a2h_dis = dis2all[~is_ally] h_alive = alive_all[~is_ally] h_feature = new_obs[~is_ally] h_iden_sort = np.argsort(a2h_dis)[:MAX_NUM_OPP_OBS] a2h_dis_sorted = a2h_dis[h_iden_sort] h_alive_sorted = h_alive[h_iden_sort] h_vis_mask = (a2h_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & h_alive_sorted # scope h_vis_index = h_iden_sort[h_vis_mask] h_invis_index = h_iden_sort[~h_vis_mask] h_vis_index, h_invis_index = self.item_random_mv(src=h_vis_index, dst=h_invis_index,prob=0, rand=True) h_ind = np.concatenate((h_vis_index, h_invis_index)) h_msk = np.concatenate((h_vis_index<0, h_invis_index>=0)) # "<0" project to False; ">=0" project to True a2h_feature_sort = h_feature[h_ind] a2h_feature_sort[h_msk] = 0 if len(a2h_feature_sort) a2f_dis = dis2all[is_ally] f_alive = alive_all[is_ally] f_feature = new_obs[is_ally] f_iden_sort = np.argsort(a2f_dis)[:MAX_NUM_ALL_OBS] a2f_dis_sorted = a2f_dis[f_iden_sort] f_alive_sorted = f_alive[f_iden_sort] f_vis_mask = (a2f_dis_sorted <= OBS_RANGE_PYTHON_SIDE) & f_alive_sorted # scope f_vis_index = f_iden_sort[f_vis_mask] self_vis_index = f_vis_index[:1] # seperate self and ally f_vis_index = f_vis_index[1:] # seperate self and ally f_invis_index = f_iden_sort[~f_vis_mask] f_vis_index, f_invis_index = self.item_random_mv(src=f_vis_index, dst=f_invis_index,prob=0, rand=True) f_ind = np.concatenate((self_vis_index, f_vis_index, f_invis_index)) f_msk = np.concatenate((self_vis_index<0, f_vis_index<0, f_invis_index>=0)) # "<0" project to False; ">=0" project to True self_ally_feature_sort = f_feature[f_ind] self_ally_feature_sort[f_msk] = 0 if len(self_ally_feature_sort) parts = tear_number_apart(255, n_digit=10, base=2, mv_left=1) print(parts) comb_num_back(parts, n_digit=10, base=2, mv_left=1) test <2> parts = tear_number_apart(255.778, n_digit=10, base=10, mv_left=-1) print(parts) comb_num_back(parts, n_digit=10, base=10, mv_left=-1) test <3> for i in range(1000): q = (np.random.rand() - 0.5)*1e3 parts = tear_number_apart(q, n_digit=10, base=10, mv_left=0) print(q, parts) res = np.abs(comb_num_back(parts, n_digit=10, base=10, mv_left=0)-q) < 1e-6 if not res: print('??? np.abs(comb_num_back(parts, n_digit=10, base=10, mv_left=0)-q)', np.abs(comb_num_back(parts, n_digit=10, base=10, mv_left=0)-q)) assert False ''' ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/actionset.py ================================================ import numpy as np ActDigitLen = 100 def strActionToDigits(act_string): t = [ord(c) for c in act_string] d_len = len(t) assert d_len <= ActDigitLen, ("Action string is tooo long! Don't be wordy. Or you can increase ActDigitLen above.") pad = [-1 for _ in range(ActDigitLen-d_len)] return (t+pad) def digitsToStrAction(digits): if all([a==0 for a in digits]): return 'ActionSet3::N/A;N/A' arr = [chr(d) for d in digits.astype(int) if d >= 0] return ''.join(arr) """ 'ActionSet3::ChangeHeight;100' """ ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/actionset_v3.py ================================================ import numpy as np ActDigitLen = 100 def strActionToDigits(act_string): t = [ord(c) for c in act_string] d_len = len(t) assert d_len <= ActDigitLen, ("Action string is tooo long! Don't be wordy. Or you can increase ActDigitLen above.") pad = [-1 for _ in range(ActDigitLen-d_len)] return (t+pad) def digitsToStrAction(digits): if all([a==0 for a in digits]): return 'ActionSet3::N/A;N/A' arr = [chr(d) for d in digits.astype(int) if d >= 0] return ''.join(arr) """ 'ActionSet3::ChangeHeight;100' """ ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/actset_lookup.py ================================================ import numpy as np # # # # # # # # # # # # # # # # # # # # # # # # # # # # Part 1, interface for RL # # # # # # # # # # # # # # # # # # # # # # # # # # # # dictionary_items = [ 'ActionSet2::N/A;N/A', # 0 'ActionSet2::Idle;DynamicGuard' , # 1 'ActionSet2::Idle;StaticAlert' , # 2 'ActionSet2::Idle;AggressivePersue' , # 3 'ActionSet2::SpecificMoving;Dir+X' , # 4 'ActionSet2::SpecificMoving;Dir+Y' , # 5 'ActionSet2::SpecificMoving;Dir-X' , # 6 'ActionSet2::SpecificMoving;Dir-Y' , # 7 'ActionSet2::SpecificAttacking;T1-0', # 8 'ActionSet2::SpecificAttacking;T1-1', # 9 'ActionSet2::SpecificAttacking;T1-2', # 10 'ActionSet2::SpecificAttacking;T1-3', # 11 'ActionSet2::SpecificAttacking;T1-4', # 12 'ActionSet2::SpecificAttacking;T0-0', # 13 'ActionSet2::SpecificAttacking;T0-1', # 14 'ActionSet2::SpecificAttacking;T0-2', # 15 'ActionSet2::SpecificAttacking;T0-3', # 16 'ActionSet2::SpecificAttacking;T0-4', # 17 'ActionSet2::PatrolMoving;Dir+X' , 'ActionSet2::PatrolMoving;Dir+Y' , 'ActionSet2::PatrolMoving;Dir-X' , 'ActionSet2::PatrolMoving;Dir-Y' , 'ActionSet2::Idle;AsFarAsPossible', 'ActionSet2::Idle;StayWhenTargetInRange', 'ActionSet2::Idle;StayWhenTargetInHalfRange' , ] dictionary_n_actions = len(dictionary_items) digit2act_dictionary = { i: dictionary_items[i] for i, item in enumerate(dictionary_items) } act2digit_dictionary = { dictionary_items[i]:i for i, item in enumerate(dictionary_items) } # # # # # # # # # # # # # # # # # # # # # # # # # # # # Part 2, translate actions # # # # # # # # # # # # # # # # # # # # # # # # # # # # agent_json2local_attrs = [ # json key -----> agent key ('agentAlive', 'alive'), ('agentTeam', 'team'), ('indexInTeam', 'index'), ('uId', 'uid_remote'), ('maxMoveSpeed', 'max_speed'), ('agentLocationArr', 'location'), ('agentRotationArr', 'rotation'), ('agentScaleArr', 'scale3'), ('agentVelocityArr', 'velocity'), ('agentHp', 'hp'), ('weaponCD', 'weapon_cd'), ('type', 'type'), ] # 'ActionSet2::Idle;AsFarAsPossible', # 'ActionSet2::Idle;StayWhenTargetInRange', # 'ActionSet2::Idle;StayWhenTargetInHalfRange' , def encode_action_as_digits(main_cmd, sub_cmd, x=None, y=None, z=None, UID=None, T=None, T_index=None): main_cmd_encoder = { "Idle" : 0, "SpecificMoving" : 1, "PatrolMoving" : 2, "SpecificAttacking" : 3, "N/A" : 4, } sub_cmd_encoder = { "DynamicGuard" : 0 , "StaticAlert" : 1 , "AggressivePersue" : 2 , "SpecificAttacking" : 3 , "AsFarAsPossible" : 4 , "StayWhenTargetInRange" : 5 , "StayWhenTargetInHalfRange" : 6 , "N/A" : 7 , 'Dir+X' : 8 , 'Dir+X+Y' : 9 , 'Dir+Y' : 10, 'Dir-X+Y' : 11, 'Dir-X' : 12, 'Dir-X-Y' : 13, 'Dir-Y' : 14, 'Dir+X-Y' : 15, } return np.array([ main_cmd_encoder[main_cmd], sub_cmd_encoder[sub_cmd], x if x is not None else np.inf, y if y is not None else np.inf, z if z is not None else np.inf, UID if UID is not None else np.inf, T if T is not None else np.inf, T_index if T_index is not None else np.inf ]) def decode_action_as_string(digits): main_cmd_decoder = { 0 :"Idle" , 1 :"SpecificMoving" , 2 :"PatrolMoving" , 3 :"SpecificAttacking" , 4 :"N/A" , } sub_cmd_decoder = { 0 : "DynamicGuard" , 1 : "StaticAlert" , 2 : "AggressivePersue" , 3 : "SpecificAttacking" , 4 : "AsFarAsPossible" , 5 : "StayWhenTargetInRange" , 6 : "StayWhenTargetInHalfRange" , 7 : "N/A" , 8 : 'Dir+X' , 9 : 'Dir+X+Y' , 10 : 'Dir+Y' , 11 : 'Dir-X+Y' , 12 : 'Dir-X' , 13 : 'Dir-X-Y' , 14 : 'Dir-Y' , 15 : 'Dir+X-Y' , } main_cmd = main_cmd_decoder[digits[0]] sub_cmd = sub_cmd_decoder[digits[1]] x = digits[2] if np.isfinite(digits[2]) else None y = digits[3] if np.isfinite(digits[3]) else None z = digits[4] if np.isfinite(digits[4]) else None UID = digits[5] if np.isfinite(digits[5]) else None T = digits[6] if np.isfinite(digits[6]) else None T_index = digits[7] if np.isfinite(digits[7]) else None if main_cmd == "Idle": res = 'ActionSet2::Idle;%s'%sub_cmd assert res in dictionary_items, '指令错误无法解析' elif main_cmd == "SpecificMoving": if sub_cmd == 'N/A': res = 'ActionSet2::SpecificMoving;X=%f Y=%f Z=%f'%(x,y,z) else: res = 'ActionSet2::SpecificMoving;%s'%sub_cmd elif main_cmd == "PatrolMoving": if sub_cmd == 'N/A': res = 'ActionSet2::PatrolMoving;X=%f Y=%f Z=%f'%(x,y,z) else: res = 'ActionSet2::PatrolMoving;%s'%sub_cmd elif main_cmd == "SpecificAttacking": # 'ActionSet2::SpecificAttacking;T1-3', # 'ActionSet2::SpecificAttacking;UID-4', assert sub_cmd == 'N/A', '指令错误无法解析' if UID is not None: res = 'ActionSet2::SpecificAttacking;UID-%d'%UID else: res = 'ActionSet2::SpecificAttacking;T%d-%d'%(T,T_index) elif main_cmd == "N/A": res = 'ActionSet2::N/A;N/A' else: print('指令错误无法解析') assert False return res # # # # # # # # # # # # # # # # # # # # # # # # # # # # Part 3, agent init defaults # # # # # # # # # # # # # # # # # # # # # # # # # # # AgentPropertyDefaults = { 'ClassName': 'RLA_CAR', # FString ClassName = ""; 'DebugAgent': False, 'AgentTeam': 0, # int AgentTeam = 0; 'IndexInTeam': 0, # int IndexInTeam = 0; 'UID': 0, # int UID = 0; 'MaxMoveSpeed': 600, # move speed, test ok 'InitLocation': { 'x': 0, 'y': 0, 'z': 0, }, 'InitRotation': { 'x': 0, 'y': 0, 'z': 0, }, 'AgentScale' : { 'x': 1, 'y': 1, 'z': 1, }, # agent size, test ok 'InitVelocity': { 'x': 0, 'y': 0, 'z': 0, }, 'AgentHp':100, "WeaponCD": 1, # weapon fire rate "IsTeamReward": True, "Type": "", "DodgeProb": 0.8, # probability of escaping dmg 闪避概率, test ok "ExplodeDmg": 25, # ms explode dmg. test ok "FireRange": 1000.0, # <= 1500 "GuardRange": 1400.0, # <= 1500 "PerceptionRange": 1500.0, # <= 1500 'Color':'(R=0,G=1,B=0,A=1)', # color "FireRange": 1000, 'RSVD1':'', 'RSVD2':'', } # # # # # # # # # # # # # # # # # # # # # # # # # # # # Part 3, framerate selection # # # # # # # # # # # # # # # # # # # # # # # # # # # # Check whether a number can be represented precisely by a float def binary_friendly(x): y_f16 = np.array(x, dtype=np.float16) y_f64 = np.array(x, dtype=np.float64) t = y_f64 - y_f16 assert t.dtype == np.float64 return (t==0) # ''' # T0-55Destroyed # T1-616Destroyed # T0-44Destroyed # T0-22Destroyed # T1-717Destroyed # T1-111Destroyed # T0-88Destroyed # T0-77Destroyed # T1-313Destroyed # T0-99Destroyed # T0-66Destroyed # T0-00Destroyed # T1-212Destroyed # T0-11Destroyed # T0-33Destroyed # EndEpisodeLose1 # ''' ################## ########################## ######################## ################## ########################## ######################## ################## ########################## ######################## ################## single digit encode, not used ######################## # h_map_center = (-7290.0, 6010.0) # h_grid_size = 400 # v_ground = 340 # v_grid_size = 1000 # x_arr = np.array([h_map_center[0]+v_grid_size*i for i in range(-20, 20)]) # 0~39, 40, 1 # y_arr = np.array([h_map_center[1]+v_grid_size*i for i in range(-20, 20)]) # 0~39, 40, 40 # z_arr = np.array([v_ground+v_grid_size*i for i in range(4)]) # 0~3, 4, 1600 # # offset # 0~1, 2, 6400 # # output $y \in [1000, 12800]$ # def _2digit(main_cmd, x, y, z): # z_logit = np.argmin(np.abs(z - z_arr)) # x_logit = np.argmin(np.abs(x - x_arr)) # y_logit = np.argmin(np.abs(y - y_arr)) # if main_cmd=='SpecificMoving': cmd_logit = 0 # elif main_cmd=='PatrolMoving': cmd_logit = 1 # ls_mod = [1,40,1600,6400] # offset = 1000 # x = np.array([x_logit, y_logit, z_logit, cmd_logit]) # print(x) # y = np.dot(x, ls_mod)+offset # return y # def _2coordinate(x): # offset = 1000 # ls_mod = [1,40,1600,6400] # x = x - offset # res = [] # for mod in reversed(ls_mod): # tmp = x // mod # x = x - tmp*mod # res.append(tmp) # res = list(reversed(res)) # x_logit, y_logit, z_logit, cmd_logit = res # if cmd_logit == 0 : main_cmd ='SpecificMoving' # elif cmd_logit == 1 : main_cmd ='PatrolMoving' # x = x_arr[x_logit] # y = y_arr[y_logit] # z = z_arr[z_logit] # print(main_cmd, x, y, z) # return main_cmd, x, y, z ################## ########################## ######################## ################## ########################## ######################## ################## ########################## ######################## ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/agent.py ================================================ import numpy as np from .actset_lookup import agent_json2local_attrs class Agent(object): def __init__(self, team, team_id, uid) -> None: self.team = team self.team_id = team_id self.uid = uid self.attrs = agent_json2local_attrs for attr_json, attr_agent in self.attrs: setattr(self, attr_agent, None) self.pos3d = np.array([np.nan, np.nan, np.nan]) self.pos2d = np.array([np.nan, np.nan]) def update_agent_attrs(self, dictionary): if (not dictionary['agentAlive']): self.alive = False else: assert dictionary['valid'] for attr_json, attr_agent in self.attrs: setattr(self, attr_agent, dictionary[attr_json]) assert self.uid == self.uid_remote self.pos3d = np.array(self.location) self.pos2d = self.pos3d[:2] self.vel3d = np.array(self.velocity) self.vel2d = self.pos3d[:2] self.scale3d = np.array(self.scale3) self.scale = self.scale3[0] self.yaw = self.rotation[0] ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/auto_download.py ================================================ import os, commentjson, shutil, subprocess, tqdm, shutil, distutils from onedrivedownloader import download try: os.makedirs('./TEMP') except: pass def download_from_shared_server(key = 'cat'): # download uhmap file manifest | 下载manifest目录文件 print('download uhmap file manifest | 下载manifest目录文件') manifest_url = "https://ageasga-my.sharepoint.com/:u:/g/personal/fuqingxu_yiteam_tech/EVmCQMSUWV5MgREWaxiz_GoBalBRV3DWBU3ToSJ5OTQaLQ?e=I8yjl9" try: file = download(manifest_url, filename="./TEMP/", force_download=True) except: print('failed to connect to onedrive | 连接onedrive失败, 您可能需要翻墙才能下载资源') with open("./TEMP/uhmap_manifest.jsonc", "r") as f: manifest = commentjson.load(f) if key not in key: print('The version you are looking for does not exists!') uhmap_url = manifest[key] print('download main files | 下载预定文件') try: file = download(uhmap_url, filename="./TEMP/DOWNLOAD", unzip=True, unzip_path='./TEMP/UNZIP') except: print(f'download timeout | 下载失败, 您可能需要翻墙才能下载资源。另外如果您想手动下载的话: {uhmap_url}') return file def download_client_binary_on_platform(desired_path, desired_version, is_render_client, platform): key = f"Uhmap_{platform}_Build_Version{desired_version}" print('downloading', key) download_from_shared_server(key = key) print('download and extract complete, moving files') from distutils import dir_util target_dir = os.path.abspath(os.path.dirname(desired_path) + './..') dir_util.copy_tree('./TEMP/UNZIP', target_dir) assert os.path.exists(desired_path), "unexpected path error! Are you using Linux style path on Windows?" return def download_client_binary(desired_path, desired_version, is_render_client): import platform plat = "Windows" if platform.system()=="Linux": plat = "Linux" download_client_binary_on_platform(desired_path, desired_version, is_render_client, platform=plat) return ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/struct.cpp ================================================ #pragma once #include "CoreMinimal.h" #include "Containers/UnrealString.h" #include "XtensorAPIBPLibrary.h" #include "DataStruct.generated.h" USTRUCT(BlueprintType) struct FAgentProperty { GENERATED_BODY() UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString ClassName = ""; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int AgentTeam = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int IndexInTeam = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int UID = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool DebugAgent = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float MaxMoveSpeed = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector InitLocation; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector InitRotation; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FRotator InitRotator; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector AgentScale; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector InitVelocity; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float AgentHp; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float WeaponCD = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool IsTeamReward = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString Type = ""; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString WeaponType = ""; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString Color = ""; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float DodgeProb = 0.0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float ExplodeDmg = 20.0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float FireRange = 1000.0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float GuardRange = 1400.0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float PerceptionRange = 1400.0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString RSVD1 = ""; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString RSVD2 = ""; }; USTRUCT(BlueprintType) struct FParsedDataInput { // please change lines in // bool AHMPLevelScriptActor::ParsedTcpInData() // together with this struct GENERATED_BODY() UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool valid = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString DataCmd; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int NumAgents = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray AgentSettingArray; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int TimeStep = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int TimeStepMax = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray Actions; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray StringActions; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString RSVD1 = ""; }; USTRUCT(BlueprintType) struct FAgentDataOutput { GENERATED_BODY() UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool Valid = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool AgentAlive = true; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int AgentTeam = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int IndexInTeam = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int UID = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float MaxMoveSpeed = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector AgentLocation; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FRotator AgentRotation; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector AgentScale; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector AgentVelocity; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float AgentHp; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float WeaponCD = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int PreviousAction; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray AvailActions; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float Reward; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool IsTeamReward = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray Interaction; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString Type = ""; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString RSVD1 = ""; }; USTRUCT(BlueprintType) struct FKeyObjDataOutput { GENERATED_BODY() UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool Valid = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int UID; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString ClassName; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector Location; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FRotator Rotation; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector Scale; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FVector Velocity; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float Hp; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString RSVD1 = ""; }; USTRUCT(BlueprintType) struct FGlobalDataOutput { GENERATED_BODY() UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool Valid = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float TeamReward = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool UseTeamReward = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray Events; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray VisibleMatFlatten; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray DisMatFlatten; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float MaxEpisodeStep = 999; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int TimeCnt = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) float Time = 0; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool EpisodeDone = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString EpisodeEndReason = "unknown"; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) int TeamWin = -1; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray KeyObjArr; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString LevelName = ""; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FXTensor DistanceMat; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FString RSVD1 = ""; }; USTRUCT(BlueprintType) struct FAgentDataOutputArr { GENERATED_BODY() UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) bool Valid = false; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) TArray DataArr; UPROPERTY(EditDefaultsOnly, BlueprintReadWrite) FGlobalDataOutput DataGlobal; }; ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/uhmap.md ================================================ # Unreal HMAP (UHMAP) 混合多智能体平台-虚幻仿真模块 ## UHMAP 中对虚幻源代码的修改 - (1) 将lz4的接口暴露在外,方便使用 ``` F:\UnrealSourceCode\UnrealEngine-4.27.2-release\Engine\Source\Runtime\Core\Public\Compression\lz4.h 新增一行 #define LZ4_DLL_EXPORT 1 ``` - (2) 将AIPerception Sight的计算量拉高 ``` F:\UnrealSourceCode\UnrealEngine-4.27.2-release\Engine\Source\Runtime\AIModule\Private\Perception\AISense_Sight.cpp ``` 修改参数,这两个参数增大,有助于尽早发现进入范围的智能体(源代码中为了运行效率牺牲了实时性,用运行时间和Trace数量加以约束) ``` static const int32 DefaultMaxTracesPerTick = 16; static const int32 DefaultMinQueriesPerTimeSliceCheck = 40; ``` ## Switching MISSION to UHMAP in Json Config 切至虚幻仿真模块 Please use following template: 请使用以下配置文件模板: ```jsonc { // config HMP core "config.py->GlobalConfig": { "note": "uhmp-dev", "env_name": "uhmap", // *** "env_path": "MISSION.uhmap", // *** "draw_mode": "Img", "num_threads": "1", // "heartbeat_on": "False", "report_reward_interval": "1", "test_interval": "128", "test_epoch": "4", "device": "cuda", "max_n_episode": 500000, "fold": "1", "backup_files": [ ] }, // config MISSION "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { // *** "N_AGENT_EACH_TEAM": [3, 2], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 30, "n_actions": 10, "StateProvided": false, "render": false, "SubTaskSelection": "UhmapBreakingBad", "UhmapPort": 21051, // "UhmapServerExe": "", "UhmapRenderExe": "./../../WindowsNoEditor/UHMP.exe", "UhmapServerExe": "./../../WindowsServer/UHMPServer.exe", "TimeDilation": 1.25, // 时间膨胀系数 "TEAM_NAMES": [ "ALGORITHM.script_ai.dummy_uhmap->DummyAlgorithmT1", // *** select ALGORITHMs "ALGORITHM.script_ai.dummy_uhmap->DummyAlgorithmT2" // *** select ALGORITHMs ] }, // config ALGORITHMs "ALGORITHM.script_ai.dummy_uhmap.py->DummyAlgConfig": { "reserve": "" } } ``` ## Configurations 重要配置参数 path:json配置文件 | Field | Value | Explaination | zh Explaination | | ---- | ---- | ---- | ---- | | device | ```str``` | select gpu | 选择GPU或CPU | | N_AGENT_EACH_TEAM | ```list of int``` | Agent Num in Each Team | 各队智能体数量 | | MaxEpisodeStep | ```int``` | Time Step Limit | 对战时间步数限制 | | n_actions | ```int``` | ---- | 强化学习预留 | | render | ```bool``` | use render server | 是否使用渲染 | | UhmapPort | ```int``` | | 临时,端口选择,后期将改为自动 | | UhmapPort | ```int``` | | 临时,端口选择,后期将改为自动 | | TimeDilation | ```float``` | | 时间膨胀,减小实现以实现慢动作,增大可以让CPU燃烧 | | TEAM_NAMES | ```str``` | | 分别指定一队、二队策略 | ## Unreal Agent Initializing Options 智能体初始化参数 path:```MISSION\uhmap\SubTasks\UhmapBreakingBad.py``` function:```reset``` | Field | Value | Explaination | zh Explaination | | ---- | ---- | ---- | ---- | | ClassName | ```str``` | Select Agent Class in unreal engine side | | | AgentTeam | ```int``` | team belonging of an agent | 智能体的队伍归属 | | IndexInTeam | ```int``` | team index of an agent | 智能体在队伍中的编号 | | UID | ```int``` | index of an agent in the environment | 智能体在虚幻仿真中的唯一编号 | | MaxMoveSpeed | ```float``` | | 暂未接入,无效 | | AgentHp | ```int``` | | 初始生命值 | | WeaponCD | ```float``` | | 武器cooldown时间,单位秒 | | RSVD1 | ```str``` | | 智能体展现颜色 | | InitLocation | ```dict``` | | 智能体初始位置 | ## Unit conversion 单位转换 Length unit in the system is 1mm, e.g. 800 = 800mm = 0.8m ## Algorithm For demonstration path:```ALGORITHM\script_ai\dummy_uhmap.py``` function:```interact_with_env(override)``` ### Argument: | Field | Value | Explaination | zh Explaination | | ---- | ---- | ---- | ---- | | ```State_Recall['Latest-Obs']``` | | observation array for reinforcement learning | | | ```State_Recall['ENV-PAUSE']``` | | show which thread is paused (refer to [TimeLine](./../../VISUALIZE/md_imgs/timeline.jpg)) | | | ```State_Recall['Current-Obs-Step']``` | | show time step index in an episode | | | ```State_Recall['Latest-Team-Info']``` | | interfacing with script-based AIs, including structed agent location, uid, et.al. | | | ```State_Recall['Test-Flag']'``` | | show whether HMP central has recommanded to do a test run for RL | | | ```'State_Recall['Env-Suffered-Reset']''``` | | show whether a thread has be reset and start a new episode | | ### Convert Command Format: #### attack a agent with UID ```python encode_action_as_digits("SpecificAttacking", "N/A", x=None, y=None, z=None, UID=4, T=None, T_index=None) ``` #### PatrolMoving with coordinate ```python encode_action_as_digits("PatrolMoving", "N/A", x=444*5, y=444*5, z=379, UID=None, T=None, T_index=None) ``` #### PatrolMoving with direction ```python encode_action_as_digits("PatrolMoving", "Dir+X+Y", x=None, y=None, z=None, UID=None, T=None, T_index=None) encode_action_as_digits("PatrolMoving", "Dir+X-Y", x=None, y=None, z=None, UID=None, T=None, T_index=None) encode_action_as_digits("PatrolMoving", "Dir+X", x=None, y=None, z=None, UID=None, T=None, T_index=None) ``` #### SpecificMoving with coordinate ```python encode_action_as_digits("SpecificMoving", "N/A", x=444*5, y=444*5, z=379, UID=None, T=None, T_index=None) ``` #### SpecificMoving with direction ```python encode_action_as_digits("SpecificMoving", "Dir+X+Y", x=None, y=None, z=None, UID=None, T=None, T_index=None) encode_action_as_digits("SpecificMoving", "Dir+X-Y", x=None, y=None, z=None, UID=None, T=None, T_index=None) encode_action_as_digits("SpecificMoving", "Dir+X", x=None, y=None, z=None, UID=None, T=None, T_index=None) ``` #### Idle and change guard state ```python encode_action_as_digits("Idle", "DynamicGuard", x=None, y=None, z=None, UID=None, T=None, T_index=None) encode_action_as_digits("Idle", "StaticAlert", x=None, y=None, z=None, UID=None, T=None, T_index=None) encode_action_as_digits("Idle", "AggressivePersue", x=None, y=None, z=None, UID=None, T=None, T_index=None) ``` ================================================ FILE: PythonExample/hmp_minimal_modules/MISSION/uhmap/uhmap_env_wrapper.py ================================================ import json, os, subprocess, time, stat, platform, importlib import numpy as np from UTIL.colorful import print蓝, print靛, print亮红 from UTIL.network import TcpClientP2PWithCompress, find_free_port_no_repeat, get_host_ip from UTIL.config_args import ChainVar from config import GlobalConfig from ..common.base_env import BaseEnv from .actset_lookup import binary_friendly, dictionary_n_actions from .agent import Agent # please register this into MISSION/env_router.py def make_uhmap_env(env_id, rank): if ScenarioConfig.SubTaskSelection == 'UhmapEnv': return UhmapEnv(rank) else: ST = ScenarioConfig.SubTaskSelection assert os.path.exists(f'./MISSION/uhmap/SubTasks/{ST}.py'), "Unknown subtask!" ST_CLASS = getattr(importlib.import_module(f'.SubTasks.{ST}', package='MISSION.uhmap'), ST) return ST_CLASS(rank) def get_subtask_conf(subtask): ST = subtask assert os.path.exists(f'./MISSION/uhmap/SubTasks/{ST}Conf.py'), "Configuration not found!" ST_CONF_CLASS = getattr(importlib.import_module(f'.SubTasks.{ST}Conf', package='MISSION.uhmap'), 'SubTaskConfig') return ST_CONF_CLASS def usual_id_arrangment(N_AGENT_EACH_TEAM): """ e.g., input [5, 3] output [range(0,5), range(5,8)] """ AGENT_ID_EACH_TEAM = [] p = 0 for team_agent_num in N_AGENT_EACH_TEAM: AGENT_ID_EACH_TEAM.append(range(p, p + team_agent_num)) p += team_agent_num return AGENT_ID_EACH_TEAM # please register this ScenarioConfig into MISSION/env_router.py class ScenarioConfig(object): ''' ScenarioConfig: This config class will be 'injected' with new settings from JSONC. (E.g., override configs with ```python main.py --cfg example.jsonc```) (As the name indicated, ChainVars will change WITH vars it 'chained_with' during config injection) (please see UTIL.config_args to find out how this advanced trick works out.) ''' # Needed by the hmp core # N_AGENT_EACH_TEAM = [10, ] AGENT_ID_EACH_TEAM = usual_id_arrangment(N_AGENT_EACH_TEAM) N_TEAM = len(N_AGENT_EACH_TEAM) # chained parameters, will change along with 'N_AGENT_EACH_TEAM' AGENT_ID_EACH_TEAM_cv = ChainVar(lambda N_AGENT_EACH_TEAM: usual_id_arrangment(N_AGENT_EACH_TEAM), chained_with=['N_AGENT_EACH_TEAM']) N_TEAM_cv = ChainVar(lambda N_AGENT_EACH_TEAM: len(N_AGENT_EACH_TEAM), chained_with=['N_AGENT_EACH_TEAM']) # algorithm selection TEAM_NAMES = ['ALGORITHM.None->None',] ''' ## If the length of action array == the number of teams, set ActAsUnity to True ## If the length of action array == the number of agents, set ActAsUnity to False ''' ActAsUnity = False ''' ## If the length of reward array == the number of agents, set RewardAsUnity to False ## If the length of reward array == 1, set RewardAsUnity to True ''' RewardAsUnity = True ''' ## If the length of obs array == the number of agents, set ObsAsUnity to False ## If the length of obs array == the number of teams, set ObsAsUnity to True ''' ObsAsUnity = False # Needed by env itself # MaxEpisodeStep = 100 render = False TcpAddr = '127.0.0.1' UhmapPort = 21051 UnrealLevel = 'UhmapBreakingBad' SubTaskSelection = 'UhmapBreakingBad' SubTaskConfig = get_subtask_conf(UnrealLevel) SubTaskConfig_cv = ChainVar(lambda UnrealLevel:get_subtask_conf(UnrealLevel), chained_with=['SubTaskSelection']) UElink2editor = False AutoPortOverride = True # AutoPortOverride is usually the reverse of UElink2editor AutoPortOverride_cv = ChainVar(lambda UElink2editor:(not UElink2editor), chained_with=['UElink2editor']) # this is not going to be precise, # the precise step time will be floor(StepGameTime/TimeDilation*FrameRate)*TimeDilation/FrameRate StepGameTime = 0.5 UhmapServerExe = 'F:/UHMP/Build/WindowsServer/UHMPServer.exe' UhmapRenderExe = '' TimeDilation = 1.0 # engine calcualtion speed control FrameRate = 25.6 # must satisfy: (TimeDilation=1*n, FrameRate=25.6*n) FrameRate_cv = ChainVar(lambda TimeDilation: (TimeDilation/1 * 25.6), chained_with=['TimeDilation']) UhmapStartCmd = [] # Needed by some ALGORITHM # StateProvided = False AvailActProvided = False EntityOriented = True ActionFormat = 'ASCII' # 'ASCII'/'Multi-Digit'/'Single-Digit' n_actions = dictionary_n_actions obs_vec_length = get_subtask_conf(UnrealLevel).obs_vec_length obs_vec_length_cv = ChainVar(lambda UnrealLevel:get_subtask_conf(UnrealLevel).obs_vec_length, chained_with=['SubTaskSelection']) obs_n_entity = get_subtask_conf(UnrealLevel).obs_n_entity obs_n_entity_cv = ChainVar(lambda UnrealLevel:get_subtask_conf(UnrealLevel).obs_n_entity, chained_with=['SubTaskSelection']) # # ObsBreakBase = 1e4 UhmapVersion = '2.3' CanTurnOff = False # Hete agents HeteAgents = False # 演示demo类别 DemoType = "Default" class UhmapEnvParseHelper: def parse_response_ob_info(self, response): raise NotImplementedError def make_obs(self): raise NotImplementedError class UhmapEnv(BaseEnv, UhmapEnvParseHelper): def __init__(self, rank) -> None: super().__init__(rank) self.id = rank self.render = ScenarioConfig.render and (self.id==0) self.n_agents = sum(ScenarioConfig.N_AGENT_EACH_TEAM) assert self.n_agents == len(ScenarioConfig.SubTaskConfig.agent_list), 'agent number defination error' self.n_teams = ScenarioConfig.N_TEAM self.sim_thread = None self.client = None # self.observation_space = ? # self.action_space = ? if ScenarioConfig.StateProvided: # self.observation_space['state_shape'] = ? pass # Restart env, this is very fast, can be a failsafe if there is memory leaking away on UE side self.max_simulation_life = 2048 self.simulation_life = self.max_simulation_life # with a lock, we can initialize UE side one by one (not necessary though) # wait until thread 0 finish its initialization (to avoid a traffic jam in server memory) traffic_light = './TEMP/uhmap_thread_0_init_ok_%s'%GlobalConfig.machine_info['ExpUUID'][:8] if rank != 0: while not os.path.exists(traffic_light): time.sleep(1) self.activate_simulation(self.id, find_port=True) # thread 0 finish its initialization, if rank == 0: with open(traffic_light, mode='w+') as f: f.write(traffic_light) def __del__(self): self.terminate_simulation() def activate_simulation(self, rank, find_port=True): print('thread %d initializing'%rank) self.sim_thread = 'activiting' if find_port: self.render = ScenarioConfig.render # and (rank==0) self.hmp_ue_port = ScenarioConfig.UhmapPort if ScenarioConfig.AutoPortOverride: self.hmp_ue_port, release_port_fn = find_free_port_no_repeat() # port for hmp data exchanging if not ScenarioConfig.UElink2editor: self.ue_vis_port, release_port_fn = find_free_port_no_repeat() # port for remote visualizing # self.ue_vis_port = 32222 print蓝('Port %d will be used by hmp, port %d will be used by UE internally'%(self.hmp_ue_port, self.ue_vis_port)) if (not self.render) and (not ScenarioConfig.UElink2editor): print蓝('To visualize on Windows, run "./UHMP.exe -OpenLevel=%s:%d -WINDOWED -TimeDilation=%.8f -FrameRate=%.8f -IOInterval=%.8f -DebugMod=False -LockGameDuringCom=True"'%( get_host_ip(), self.ue_vis_port, ScenarioConfig.TimeDilation, ScenarioConfig.FrameRate, ScenarioConfig.StepGameTime)) self.ip_port = (ScenarioConfig.TcpAddr, self.hmp_ue_port) # os.system() if not ScenarioConfig.UElink2editor: assert ScenarioConfig.AutoPortOverride # * A Butterfly Effect problem *: # UE4 use float (instead of double) for time delta calculation, # causing some error calcualtion dt = 1/FrameRate # which will be enlarged due to Butterfly Effect # therefore we have to make sure that FrameRate = 16,32,64,... print('checking ScenarioConfig args problems ...') assert ScenarioConfig.TimeDilation <= 128, "* TimeDilation <= 128 *" assert binary_friendly(1/ScenarioConfig.FrameRate), "* A Butterfly Effect problem *" assert binary_friendly(ScenarioConfig.TimeDilation/256), "* A Butterfly Effect problem *" # real_step_time = # np.floor(ScenarioConfig.StepGameTime/ScenarioConfig.TimeDilation*ScenarioConfig.FrameRate) # * ScenarioConfig.TimeDilation / ScenarioConfig.FrameRate if not self.render: simulation_exe = ScenarioConfig.UhmapServerExe assert 'Server' in simulation_exe else: simulation_exe = ScenarioConfig.UhmapRenderExe assert 'NoEditor' in simulation_exe if platform.system()=="Linux": if self.render: assert False, "You really want to render on Linux? If so, remove this line." if simulation_exe.endswith('.exe'): simulation_exe = simulation_exe.replace('/Windows', '/Linux') simulation_exe = simulation_exe.replace('.exe','.sh') # expand '~' path simulation_exe = os.path.expanduser(simulation_exe) else: # Windows if simulation_exe.endswith('.sh'): simulation_exe = simulation_exe.replace('/Linux', '/Windows') simulation_exe = simulation_exe.replace('.sh', '.exe') if simulation_exe.startswith('/home'): simulation_exe = './TEMP' + simulation_exe if not os.path.exists(simulation_exe): if self.rank == 0: from .auto_download import download_client_binary download_client_binary(desired_path=simulation_exe, desired_version=ScenarioConfig.UhmapVersion, is_render_client=self.render) else: while True: time.sleep(60) if os.path.exists(simulation_exe): break # give execution permission if platform.system()=="Linux": st = os.stat(simulation_exe) os.chmod(simulation_exe, st.st_mode | stat.S_IEXEC) if (not self.render) and simulation_exe != '': # start child process self.sim_thread = subprocess.Popen([ simulation_exe, # '-log', '-TcpPort=%d'%self.hmp_ue_port, # port for hmp data exchanging '-Port=%d'%self.ue_vis_port, # port for remote visualizing '-OpenLevel=%s'%ScenarioConfig.UnrealLevel, '-TimeDilation=%.8f'%ScenarioConfig.TimeDilation, '-FrameRate=%.8f'%ScenarioConfig.FrameRate, '-IOInterval=%.8f'%ScenarioConfig.StepGameTime, '-Seed=%d'%int(np.random.rand()*1e5), # 如果已经设定了主线程随机数种子,这里随机出来的数字则是确定的 '-DebugMod=False', # '-LLMCSV', '-ABSLOG=%s'%os.path.abspath('./TEMP/uhmap/%s/%d.log'%(GlobalConfig.machine_info['ExpUUID'][:8], rank)), '-Version=%s'%ScenarioConfig.UhmapVersion, '-LockGameDuringCom=True', ], stdout=subprocess.DEVNULL) print('UHMAP (Headless) started ...') elif self.render and simulation_exe != '': self.sim_thread = subprocess.Popen([ simulation_exe, # '-log', '-TcpPort=%d'%self.hmp_ue_port, # port for hmp data exchanging '-Port=%d'%self.ue_vis_port, # port for remote visualizing '-OpenLevel=%s'%ScenarioConfig.UnrealLevel, '-TimeDilation=%.8f'%ScenarioConfig.TimeDilation, '-FrameRate=%.8f'%ScenarioConfig.FrameRate, '-IOInterval=%.8f'%ScenarioConfig.StepGameTime, '-Seed=%d'%int(np.random.rand()*1e5), # 如果已经设定了主线程随机数种子,这里随机出来的数字则是确定的 '-DebugMod=False', # '-LLMCSV', '-ABSLOG=%s'%os.path.abspath('./TEMP/uhmap/%s/%d.log'%(GlobalConfig.machine_info['ExpUUID'][:8], rank)), '-Version=%s'%ScenarioConfig.UhmapVersion, '-LockGameDuringCom=True', "-ResX=1280", "-ResY=720", "-WINDOWED" ], stdout=subprocess.DEVNULL) print('UHMAP (Render) started ...') else: print('Cannot start Headless Server Or GUI Server!') assert False, 'Cannot start Headless Server Or GUI Server!' else: print('Trying to link to unreal editor ...') assert not ScenarioConfig.AutoPortOverride time.sleep(1+np.abs(self.id)/100) self.client = TcpClientP2PWithCompress(self.ip_port) MAX_RETRY = 150 for i in range(MAX_RETRY): try: self.client.manual_connect() print('handshake complete %d'%rank) break except: if i>25: print('Thread %d: Trying to connect to unreal engine. Related library not in memory, going to take some minutes. Retry %d ...'%(rank, i)) elif i>75: print('Thread %d: Waiting too long, please reduce parallel threads (num_threads), Retry %d ... | 请减小num_threads运行一次, 让动态库载入内存, 然后恢复num_threads即可'%(rank, i)) elif i >= MAX_RETRY-1: assert False, ('uhmap connection timeout, please reduce parallel threads (num_threads) !') time.sleep(1) # now that port is bind, no need to hold them anymore if find_port: if ScenarioConfig.AutoPortOverride: release_port_fn(self.hmp_ue_port) if not ScenarioConfig.UElink2editor: release_port_fn(self.ue_vis_port) self.t = 0 print('thread %d initialize complete'%rank) def terminate_simulation(self): if hasattr(self,'sim_thread') and (self.sim_thread is not None) and (self.client is not None): # self.sim_thread.terminate() # send terminate command to unreal side self.client.send_dgram_to_target(json.dumps({ 'valid': True, 'DataCmd': 'end_unreal_engine', 'TimeStepMax': ScenarioConfig.MaxEpisodeStep, 'TimeStep' : 0, 'Actions': None, })) self.client.close() self.sim_thread = None self.client = None # override reset function def reset(self): self.simulation_life -= 1 if self.simulation_life < 0: print('restarting simutation') self.terminate_simulation() self.simulation_life = self.max_simulation_life self.activate_simulation(self.id, find_port=False) def sleep(self): self.simulation_life = -1 self.terminate_simulation() # override step function def step(self, act): raise NotImplementedError # return (ob, RewardForAllTeams, done, info) # choose this if RewardAsUnity ================================================ FILE: PythonExample/hmp_minimal_modules/README.md ================================================ # HMP:Hybrid Multi-agent Playground See https://github.com/binary-husky/hmp2g # Run demo in Editor mode ``` (Open map UhmapLargeScale in Unreal Editor) cd PythonExample/hmp_minimal_modules python main.py -c ZHECKPOINT/uhmap_hete10vs10/render_result_editor.jsonc ``` # Run tutorial of designing custom actions in Editor mode ``` (Open map UhmapWaterdrop or UhmapLargeScale in Unreal Editor) cd PythonExample/hmp_minimal_modules python main.py -c ZDOCS/examples/uhmap/random_waterdrop.jsonc ``` ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/__init__.py ================================================ ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/auto_gpu.py ================================================ """ Created on Tue Aug 22 19:41:55 2017 @author: Quantum Liu """ ''' Example: gm=GPUManager() with torch.cuda.device(gm.auto_choice()): blabla Or: gm=GPUManager() torch.cuda.set_device(gm.auto_choice()) ''' import os, time from UTIL.colorful import print黄 class sel_gpu(): ''' qargs: query arguments A manager which can list all available GPU devices and sort them and choice the most free one.Unspecified ones pref. GPU设备管理器, 考虑列举出所有可用GPU设备, 并加以排序, 自动选出 最空闲的设备。在一个GPUManager对象内会记录每个GPU是否已被指定, 优先选择未指定的GPU。 ''' def __init__(self,qargs=[]): ''' ''' self.qargs=qargs def _sort_by_memory(self,gpus,by_size=False): for gpu in gpus: # 优先使用A100显卡 if 'A100' in gpu['gpu_name']: gpu['memory.free'] *= 1.25 gpu['memory.total'] *= 1.25 if by_size: print黄('Sorted by free memory size') res = sorted(gpus,key=lambda d:d['memory.free'],reverse=True) return res else: print黄('Sorted by free memory rate') return sorted(gpus,key=lambda d:float(d['memory.free'])/ d['memory.total'],reverse=True) def _sort_by_power(self,gpus): return sorted(gpus,key='by_power') def _sort_by_custom(self,gpus,key,reverse=False,qargs=[]): if isinstance(key,str) and (key in qargs): return sorted(gpus,key=lambda d:d[key],reverse=reverse) if isinstance(key,type(lambda a:a)): return sorted(gpus,key=key,reverse=reverse) raise ValueError("The argument 'key' must be a function or a key in query args,please read the documention of nvidia-smi") def auto_choice(self,mode=0): ''' mode: 0:(default)sorted by free memory size return: a TF device object Auto choice the freest GPU device,not specified ones 自动选择最空闲GPU,返回索引 ''' from UTIL.colorful import print黄 self.gpus=self.query_gpu(self.qargs) for gpu in self.gpus: gpu['specified']=False self.gpu_num=len(self.gpus) # if not self.check_gpus(): # raise ImportError('GPU available check failed') for old_infos,new_infos in zip(self.gpus,self.query_gpu(self.qargs)): old_infos.update(new_infos) unspecified_gpus=[gpu for gpu in self.gpus if not gpu['specified']] or self.gpus if mode==0: chosen_gpu=self._sort_by_memory(unspecified_gpus,True)[0] print黄('Choosing the GPU device has largest free memory...\n') elif mode==1: chosen_gpu=self._sort_by_power(unspecified_gpus)[0] print黄('Choosing the GPU device has highest free memory rate...\n') elif mode==2: chosen_gpu=self._sort_by_power(unspecified_gpus)[0] print黄('Choosing the GPU device by power...\n') else: chosen_gpu=self._sort_by_memory(unspecified_gpus)[0] print黄('Given an unaviliable mode,will be chosen by memory\n') chosen_gpu['specified']=True index=chosen_gpu['index'] print黄('Using GPU {i}:\n{info}'.format(i=index,info='\n'.join([str(k)+':'+str(v) for k,v in chosen_gpu.items()]))) return int(index) @staticmethod def check_gpus(): ''' GPU available check http://pytorch-cn.readthedocs.io/zh/latest/package_references/torch-cuda/ ''' import torch if not torch.cuda.is_available(): print黄('This script could only be used to manage NVIDIA GPUs,but no GPU found in your device') return False with os.popen('nvidia-smi -h') as f: if not 'NVIDIA System Management' in f.read(): print黄("'nvidia-smi' tool not found.") f.close() return False f.close() return True @staticmethod def parse(line,qargs): ''' line: a line of text qargs: query arguments return: a dict of gpu infos Pasing a line of csv format text returned by nvidia-smi 解析一行nvidia-smi返回的csv格式文本 ''' numberic_args = ['memory.free', 'memory.total', 'power.draw', 'power.limit'] #可计数的参数 power_manage_enable=lambda v:(('Not Support' not in v) and ('[N/A]' not in v)) #lambda表达式,显卡是否支持power management(笔记本可能不支持) to_numberic=lambda v:float(v.upper().strip().replace('MIB','').replace('W','')) #带单位字符串去掉单位 process = lambda k,v:((int(to_numberic(v)) if power_manage_enable(v) else 1) if k in numberic_args else v.strip()) return {k:process(k,v) for k,v in zip(qargs,line.strip().split(','))} def query_gpu(self, qargs=[]): ''' qargs: query arguments return: a list of dict Querying GPUs infos 查询GPU信息 ''' qargs =['index','gpu_name', 'memory.free', 'memory.total', 'power.draw', 'power.limit']+ qargs cmd = 'nvidia-smi --query-gpu={} --format=csv,noheader'.format(','.join(qargs)) results = os.popen(cmd).readlines() return [self.parse(line,qargs) for line in results] @staticmethod def by_power(d): ''' helper function fo sorting gpus by power ''' power_infos=(d['power.draw'],d['power.limit']) if any(v==1 for v in power_infos): print黄('Power management unable for GPU {}'.format(d['index'])) return 1 return float(d['power.draw'])/d['power.limit'] ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/batch_exp.py ================================================ import subprocess import threading import copy, os import time import json from UTIL.network import get_host_ip from UTIL.colorful import * def get_info(script_path): info = { 'HostIP': get_host_ip(), 'RunPath': os.getcwd(), 'ScriptPath': os.path.abspath(script_path), 'StartDateTime': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) } try: info['DockerContainerHash'] = subprocess.getoutput(r'cat /proc/self/cgroup | grep -o -e "docker/.*"| head -n 1 |sed "s/docker\\/\\(.*\\)/\\1/" |cut -c1-12') except: info['DockerContainerHash'] = 'None' return info def run_batch_exp(sum_note, n_run, n_run_mode, base_conf, conf_override, script_path): arg_base = ['python', 'main.py'] time_mark_only = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) time_mark = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '-' + sum_note log_dir = '%s/'%time_mark exp_log_dir = log_dir+'exp_log' if not os.path.exists('PROFILE/%s'%exp_log_dir): os.makedirs('PROFILE/%s'%exp_log_dir) exp_json_dir = log_dir+'exp_json' if not os.path.exists('PROFILE/%s'%exp_json_dir): os.makedirs('PROFILE/%s'%exp_json_dir) conf_list = [] new_json_paths = [] for i in range(n_run): conf = copy.deepcopy(base_conf) new_json_path = 'PROFILE/%s/run-%d.json'%(exp_json_dir, i+1) for key in conf_override: assert n_run == len(conf_override[key]), ('检查!n_run是否对应') tree_path, item = key.split('-->') conf[tree_path][item] = conf_override[key][i] with open(new_json_path,'w') as f: json.dump(conf, f, indent=4) # print(conf) conf_list.append(conf) new_json_paths.append(new_json_path) print红('\n') print红('\n') print红('\n') printX = [ print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, ] conf_base_ = conf_list[0] for k_ in conf_base_: conf_base = conf_base_[k_] for key in conf_base: different = False for i in range(len(conf_list)): if conf_base[key]!=conf_list[i][k_][key]: different = True break # if different: for i in range(len(conf_list)): printX[i](key, conf_list[i][k_][key]) else: print(key, conf_base[key]) final_arg_list = [] for ith_run in range(n_run): final_arg = copy.deepcopy(arg_base) final_arg.append('--cfg') final_arg.append(new_json_paths[ith_run]) final_arg_list.append(final_arg) print('') def local_worker(ith_run): log_path = open('PROFILE/%s/run-%d.log'%(exp_log_dir, ith_run+1), 'w+') printX[ith_run%len(printX)](final_arg_list[ith_run]) subprocess.run(final_arg_list[ith_run], stdout=log_path, stderr=log_path) def remote_worker(ith_run): # step 1: transfer all files from UTIL.exp_helper import get_ssh_sftp addr = n_run_mode[ith_run]['addr'] if 'exe_here' in addr: _, addr = addr.split('=>') usr = n_run_mode[ith_run]['usr'] pwd = n_run_mode[ith_run]['pwd'] ssh, sftp = get_ssh_sftp(addr, usr, pwd) src_path = os.getcwd() else: # assert False usr = n_run_mode[ith_run]['usr'] pwd = n_run_mode[ith_run]['pwd'] ssh, sftp = get_ssh_sftp(addr, usr, pwd) sftp.mkdir('/home/%s/MultiServerMission'%(usr), ignore_existing=True) sftp.mkdir('/home/%s/MultiServerMission/%s'%(usr, time_mark), ignore_existing=True) src_path = '/home/%s/MultiServerMission/%s/src'%(usr, time_mark) try: sftp.mkdir(src_path, ignore_existing=False) sftp.put_dir('./', src_path, ignore_list=['__pycache__','TEMP','ZHECKPOINT']) sftp.close() print紫('upload complete') except: sftp.close() print紫('do not need upload') print('byobu attach -t %s'%time_mark_only) addr_ip, addr_port = addr.split(':') print亮蓝("Attach cmd: ssh %s@%s -p %s -t \"byobu attach -t %s\""%(usr, addr_ip, addr_port, time_mark_only)) stdin, stdout, stderr = ssh.exec_command(command='byobu new-session -d -s %s'%time_mark_only, timeout=1) print亮紫('byobu new-session -d -s %s'%time_mark_only) time.sleep(1) byobu_win_name = '%s--run-%d'%(time_mark_only, ith_run) byobu_win_name = byobu_win_name stdin, stdout, stderr = ssh.exec_command(command='byobu new-window -t %s'%time_mark_only, timeout=1) print亮紫('byobu new-window -t %s'%time_mark_only) time.sleep(1) cmd = 'cd ' + src_path stdin, stdout, stderr = ssh.exec_command(command='byobu send-keys -t %s "%s" C-m'%(time_mark_only, cmd), timeout=1) print亮紫('byobu send-keys "%s" C-m'%cmd) time.sleep(1) cmd = ' '.join(['echo', str(get_info(script_path)) ,'>>', './private_remote_execution.log']) stdin, stdout, stderr = ssh.exec_command(command='byobu send-keys -t %s "%s" C-m'%(time_mark_only, cmd), timeout=1) print亮紫('byobu send-keys "%s" C-m'%cmd) time.sleep(1) cmd = ' '.join(final_arg_list[ith_run]) stdin, stdout, stderr = ssh.exec_command(command='byobu send-keys -t %s "%s" C-m'%(time_mark_only, cmd), timeout=1) print亮紫('byobu send-keys "%s" C-m'%cmd) time.sleep(1) print亮蓝("command send is done!") time.sleep(2) # 杀死 # stdin, stdout, stderr = ssh.exec_command(command='byobu kill-session -t %s'%byobu_win_name, timeout=1) pass def worker(ith_run): if n_run_mode[ith_run] is None: local_worker(ith_run) else: remote_worker(ith_run) def clean_process(pid): import psutil parent = psutil.Process(pid) for child in parent.children(recursive=True): try: print亮红('sending Terminate signal to', child) child.terminate() time.sleep(5) print亮红('sending Kill signal to', child) child.kill() except: pass parent.kill() def clean_up(): print亮红('clean up!') parent_pid = os.getpid() # my example clean_process(parent_pid) input('Confirm execution? 确认执行?') input('Confirm execution! 确认执行!') t = 0 while (t >= 0): print('Counting down ', t) time.sleep(1) t -= 1 DELAY = 60 for ith_run in range(n_run): worker(ith_run) for i in range(DELAY): time.sleep(1) print('all submitted') ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/colorful.py ================================================ import platform from sys import stdout if platform.system()=="Linux": pass else: from colorama import init init() # Do you like the elegance of Chinese characters? def print红(*kw,**kargs): print("\033[0;31m",*kw,"\033[0m",**kargs) def print绿(*kw,**kargs): print("\033[0;32m",*kw,"\033[0m",**kargs) def print黄(*kw,**kargs): print("\033[0;33m",*kw,"\033[0m",**kargs) def print蓝(*kw,**kargs): print("\033[0;34m",*kw,"\033[0m",**kargs) def print紫(*kw,**kargs): print("\033[0;35m",*kw,"\033[0m",**kargs) def print靛(*kw,**kargs): print("\033[0;36m",*kw,"\033[0m",**kargs) def print亮红(*kw,**kargs): print("\033[1;31m",*kw,"\033[0m",**kargs) def print亮绿(*kw,**kargs): print("\033[1;32m",*kw,"\033[0m",**kargs) def print亮黄(*kw,**kargs): print("\033[1;33m",*kw,"\033[0m",**kargs) def print亮蓝(*kw,**kargs): print("\033[1;34m",*kw,"\033[0m",**kargs) def print亮紫(*kw,**kargs): print("\033[1;35m",*kw,"\033[0m",**kargs) def print亮靛(*kw,**kargs): print("\033[1;36m",*kw,"\033[0m",**kargs) def print亮红(*kw,**kargs): print("\033[1;31m",*kw,"\033[0m",**kargs) def print亮绿(*kw,**kargs): print("\033[1;32m",*kw,"\033[0m",**kargs) def print亮黄(*kw,**kargs): print("\033[1;33m",*kw,"\033[0m",**kargs) def print亮蓝(*kw,**kargs): print("\033[1;34m",*kw,"\033[0m",**kargs) def print亮紫(*kw,**kargs): print("\033[1;35m",*kw,"\033[0m",**kargs) def print亮靛(*kw,**kargs): print("\033[1;36m",*kw,"\033[0m",**kargs) print_red = print红 print_green = print绿 print_yellow = print黄 print_blue = print蓝 print_purple = print紫 print_indigo = print靛 print_bold_red = print亮红 print_bold_green = print亮绿 print_bold_yellow = print亮黄 print_bold_blue = print亮蓝 print_bold_purple = print亮紫 print_bold_indigo = print亮靛 if not stdout.isatty(): # redirection, avoid a fucked up log file print红 = print print绿 = print print黄 = print print蓝 = print print紫 = print print靛 = print print亮红 = print print亮绿 = print print亮黄 = print print亮蓝 = print print亮紫 = print print亮靛 = print print_red = print print_green = print print_yellow = print print_blue = print print_purple = print print_indigo = print print_bold_red = print print_bold_green = print print_bold_yellow = print print_bold_blue = print print_bold_purple = print print_bold_indigo = print ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/config_args.py ================================================ import argparse, os, time, func_timeout from ast import Global from shutil import copyfile, copytree, ignore_patterns, rmtree from .colorful import * from .data_struct import remove_prefix, remove_suffix ''' This a chained var class, it deal with hyper-parameters that are bound together, e.g. number of threads and test episode interval. ChainVars are handled in utils.config_args.py ''' class ChainVar(object): def __init__(self, chain_func, chained_with): self.chain_func = chain_func self.chained_with = chained_with # ChainVar relationship must end with '_cv' or '_CV' def is_chained_key(key): if key.endswith('_cv'): return True, remove_suffix(key, '_cv') elif key.endswith('_CV'): return True, remove_suffix(key, '_CV') else: return False, key ''' Load all parameters in place ''' def prepare_args(vb=True): if vb: prepare_tmp_folder() parser = argparse.ArgumentParser(description='HMP') parser.add_argument('-c', '--cfg', help='Path of the configuration file') parser.add_argument('-s', '--skip', action='store_true', help='skip logdir check') args, unknown = parser.parse_known_args() load_via_json = (hasattr(args, 'cfg') and args.cfg is not None) assert load_via_json skip_logdir_check = (hasattr(args, 'skip') and (args.skip is not None) and args.skip) or (not vb) if len(unknown) > 0 and vb: print亮红('Warning! In json setting mode, %s is ignored'%str(unknown)) # load configuration from file import commentjson as json if vb: print亮绿('reading configuration at', args.cfg) # inject configuration into place with open(args.cfg, encoding='utf8') as f: json_data = json.load(f) # check and process tmp alg folder if vb: prepare_alg_tmp_folder(json_data) # inject configuration into place load_config_via_json(json_data, vb) # read the new global configuration from config import GlobalConfig as cfg # check log path conflict, change note name if required note_name_overide = None if not skip_logdir_check: note_name_overide = check_experiment_log_path(cfg.logdir) if note_name_overide is not None: override_config_file('config.py->GlobalConfig', {'note':note_name_overide}, vb) # create log path if not os.path.exists(cfg.logdir): os.makedirs(cfg.logdir) # back up essiential files if (not cfg.recall_previous_session) and vb: copyfile(args.cfg, '%s/experiment.jsonc'%cfg.logdir) if not os.path.exists('%s/raw_exp.jsonc'%cfg.logdir): copyfile(args.cfg, '%s/raw_exp.jsonc'%cfg.logdir) backup_files(cfg.backup_files, cfg.logdir, args.cfg) cfg.machine_info = register_machine_info(cfg.logdir) # light up the ready flag cfg.cfg_ready = True # finish return cfg def load_config_via_json(json_data, vb): for cfg_group in json_data: if cfg_group == 'config.py->GlobalConfig': random_seed_warning(json_data[cfg_group]) dependency = override_config_file(cfg_group, json_data[cfg_group], vb) if dependency is not None: for dep in dependency: assert any([dep in k for k in json_data.keys()]), 'Arg check failure, There is something missing!' check_config_relevence(json_data) return None def override_config_file(cfg_group, new_cfg, vb): import importlib assert '->' in cfg_group str_pro = '------------- %s -------------'%cfg_group if vb: print绿(str_pro) file_, class_ = cfg_group.split('->') if '.py' in file_: # replace it with removesuffix('.py') if you have python>=3.9 if file_.endswith('.py'): file_ = file_[:-3] default_configs = getattr(importlib.import_module(file_), class_) for key in new_cfg: if new_cfg[key] is None: continue my_setattr(conf_class=default_configs, key=key, new_value=new_cfg[key], vb=vb) altered_cv = secure_chained_vars(default_configs, new_cfg, vb) if vb: print绿(''.join(['-']*len(str_pro)),) arg_summary(default_configs, new_cfg, altered_cv) print绿(''.join(['-']*len(str_pro)),'\n\n\n') if 'TEAM_NAMES' in new_cfg: return [item.split('->')[0] for item in new_cfg['TEAM_NAMES'] if not item.startswith('TEMP')] return None def secure_chained_vars(default_cfg, new_cfg, vb): default_cfg_dict = default_cfg.__dict__ altered_cv = [] for key in default_cfg_dict: is_chain, o_key = is_chained_key(key) if not is_chain: continue if o_key in new_cfg: continue assert hasattr(default_cfg, o_key), ('twin var does not have original') # get twin chain_var = getattr(default_cfg, key) need_reflesh = False for chain_by_var in chain_var.chained_with: if chain_by_var in new_cfg: need_reflesh = True if not need_reflesh: continue replace_item = chain_var.chain_func(*[getattr(default_cfg, v) for v in chain_var.chained_with]) original_item = getattr(default_cfg, o_key) if vb: print靛('[config] warning, %s is chained by %s, automatic modifying:'%(o_key, str(chain_var.chained_with)), original_item, '-->', replace_item) setattr(default_cfg, o_key, replace_item) altered_cv.append(o_key) return altered_cv """ make sure that env selection Matches env configuration """ def check_config_relevence(json_data): env_name = json_data['config.py->GlobalConfig']['env_name'] env_path = json_data['config.py->GlobalConfig']['env_path'] for key in json_data.keys(): if 'MISSION' in key: assert env_path in key, ('configering wrong env!') """ Warn user if the random seed is not given """ def random_seed_warning(json_data): if 'seed' not in json_data: from config import GlobalConfig as cfg print亮红('Random seed not given, using %d'%cfg.seed) time.sleep(5) def prepare_tmp_folder(): def init_dir(dir): if not os.path.exists(dir): os.makedirs(dir) local_temp_folder = './TEMP' global_temp_folder = os.path.expanduser('~/HmapTemp') init_dir(local_temp_folder) init_dir(global_temp_folder+'/GpuLock') init_dir(global_temp_folder+'/PortFinder') def prepare_alg_tmp_folder(json_data): try: # scan mission conf mission_key = [k for k in json_data.keys() if k.startswith('MISSION')][0] # obtain algorithm assignment TEAM_NAMES = json_data[mission_key]['TEAM_NAMES'] for tname in TEAM_NAMES: if not tname.startswith('TEMP'): continue # obtain the path of algorithm to be mirrored path = tname.split('->')[0].replace('.','/') # trace path parent to algorithm folder. trace_success = False max_depth = 5 for _ in range(max_depth): parent = os.path.relpath(path+'/..') if os.path.basename(parent) == 'ALGORITHM': src_path = os.path.relpath(path, start=os.path.relpath(parent+'/..')) trace_success = True break path = parent # transmit temp algorithm if trace_success: import glob from stat import S_IREAD, S_IRGRP, S_IROTH, S_IWRITE def readonly_handler(func, path, execinfo): try: os.chmod(path, S_IWRITE) func(path) except: pass return rmtree(path, onerror=readonly_handler) # src_path = remove_prefix(path, 'TEMP/') print亮绿(f'[config] Copying mirror algorithm from {src_path} to {path}') copytree(src_path, path) # make these temp files read only for f in glob.glob(path+'/**/*.py', recursive=True): os.chmod(f, S_IREAD|S_IRGRP|S_IROTH) except: print亮红('[config] Errors occurs when executing prepare_alg_tmp_folder') time.sleep(5) return def register_machine_info(logdir): import socket, json, subprocess, uuid from .network import get_host_ip info = { 'HostIP': get_host_ip(), 'ExpUUID': uuid.uuid1().hex, 'RunPath': os.getcwd(), 'StartDateTime': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) } try: info['DockerContainerHash'] = subprocess.getoutput(r'cat /proc/self/cgroup | grep -o -e "docker/.*"| head -n 1 |sed "s/docker\\/\\(.*\\)/\\1/" |cut -c1-12') except: info['DockerContainerHash'] = 'None' with open('%s/info.json'%logdir, 'w+') as f: json.dump(info, f, indent=4) return info def backup_files(files, logdir, jsonfile): from config import GlobalConfig as cfg if cfg.remote_server_ops != "": remote_server_ops = cfg.remote_server_ops.replace("LOCALFILE", jsonfile).replace( "REMOTEFILE", time.strftime("%Y_%m_%d_%H_%M_%S__", time.localtime())+ cfg.note + '__' + os.path.basename(jsonfile)) os.popen(remote_server_ops) for file in files: if os.path.isfile(file): print绿('[config] Backup File:',file) bkdir = '%s/backup_files/'%logdir if not os.path.exists(bkdir): os.makedirs(bkdir) copyfile(file, '%s/%s'%(bkdir, os.path.basename(file))) else: print亮绿('[config] Backup Folder:',file) assert os.path.isdir(file), ('cannot find', file) copytree(file, '%s/backup_files/%s'%(logdir, os.path.basename(file)), dirs_exist_ok=True, ignore=ignore_patterns("__pycache__")) return def check_experiment_log_path(logdir): res = None if os.path.exists(logdir): if os.path.exists(logdir+'test_stage'): return None print亮红('Current log path:', logdir) print亮红('Warning! you will overwrite old logs if continue!') print亮红("Pause for 60 seconds before continue (or press Enter to confirm!)") try: res = askChoice() if res == '': res = None except func_timeout.exceptions.FunctionTimedOut as e: res = None return res @func_timeout.func_set_timeout(60) def askChoice(): return input('>>') def arg_summary(config_class, modify_dict = {}, altered_cv = []): for key in config_class.__dict__: if '__' in key: continue is_chain, _ = is_chained_key(key) if is_chain: continue if (not key in modify_dict) or (modify_dict[key] is None): if key not in altered_cv: print绿(key.center(25), '-->', str(getattr(config_class,key))) else: print靛(key.center(25), '-->', str(getattr(config_class,key))) else: print红(key.center(25), '-->', str(getattr(config_class,key))) def my_setattr(conf_class, key, new_value, vb): assert hasattr(conf_class, key), (conf_class, 'has no such config item: **%s**'%key) setting_name = key replace_item = new_value original_item = getattr(conf_class, setting_name) if vb: print绿('[config] override %s:'%setting_name, original_item, '-->', replace_item) if isinstance(original_item, float): replace_item = float(replace_item) elif isinstance(original_item, bool): if replace_item == 'True': replace_item = True elif replace_item == 'False': replace_item = False elif isinstance(replace_item, bool): replace_item = replace_item else: assert False, ('enter True or False, but have:', replace_item) elif isinstance(original_item, int): assert int(replace_item) == float(replace_item), ("warning, this var **%s** has an int default, but given a float override!"%key) replace_item = int(replace_item) elif isinstance(original_item, str): replace_item = replace_item elif isinstance(original_item, list): assert isinstance(replace_item, list) elif isinstance(original_item, dict): assert isinstance(replace_item, dict) else: assert False, ('not support this type') setattr(conf_class, setting_name, replace_item) return def find_all_conf(): import glob py_script_list = glob.glob('./**/*.py', recursive=True) conf_class_gather = [] for python_file in py_script_list: with open(python_file,encoding='UTF-8') as f: lines = f.readlines() for line in lines: if 'ADD_TO_CONF_SYSTEM' not in line: continue if 'class ' not in line: continue conf_class_gather.append({'line':line, 'file':python_file}) def getBetween(str, str1, str2): strOutput = str[str.find(str1)+len(str1):str.find(str2)] return strOutput for target in conf_class_gather: class_name = getBetween(target['line'], 'class ', '(') target['class_name'] = class_name target['file'] = target['file'].replace('/', '.').replace('..', '') import importlib target['class'] = getattr(importlib.import_module(target['file'].replace('.py', '')), class_name) return conf_class_gather def make_json(conf_list): import json out = {} for conf in conf_list: local_conf = {} config_class = conf['class'] for key in config_class.__dict__: if '__' in key: continue is_chain, _ = is_chained_key(key) if is_chain: continue item_to_be_serialize = getattr(config_class, key) try: json.dumps(item_to_be_serialize) except: item_to_be_serialize = '[cannot be json]' + str(item_to_be_serialize) local_conf[key] = item_to_be_serialize out[conf['file']] = local_conf # json_str = json.dumps(out) with open('all_conf.json', 'w') as f: json.dump(out, f, indent=4) print亮紫('the conf summary is successfully saved to all_conf.json') if __name__ == '__main__': conf_list = find_all_conf() res_json = make_json(conf_list) ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/data_struct.py ================================================ class UniqueList(): def __init__(self, list_input=None): self._list = [] if list_input is not None: self.extend_unique(list_input) def append_unique(self, item): if item in self._list: return False else: self._list.append(item) def extend_unique(self, list_input): for item in list_input: self.append_unique(item) def has(self, item): return (item in self._list) def len(self): return len(self._list) def __len__(self): return len(self._list) def get(self): return self._list def __iter__(self): return self._list.__iter__() # # https://stackoverflow.com/questions/16891340/remove-a-prefix-from-a-string # def remove_prefix(text, prefix): # return text[text.startswith(prefix) and len(prefix):] # https://stackoverflow.com/questions/3663450/remove-substring-only-at-the-end-of-string def remove_suffix(s, sub): return s[:-len(sub)] if s.endswith(sub) else s def remove_prefix(s, sub): return s[len(sub):] if s.startswith(sub) else s ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/exp_helper.py ================================================ import paramiko, os, time from UTIL.colorful import print亮紫, print亮靛 from UTIL.tensor_ops import __hash__ def singleton(cls): _instance = {} def inner(*args, **kwargs): if cls not in _instance: _instance[cls] = cls(*args, **kwargs) return _instance[cls] return inner class ChainVar(object): def __init__(self, chain_func, chained_with): self.chain_func = chain_func self.chained_with = chained_with class DataCentralServer(object): # ADD_TO_CONF_SYSTEM //DO NOT remove this comment// addr = 'None' usr = 'None' pwd = 'None' @singleton class changed(): def __init__(self): self._storage = {} def check(self, value, key): if key in self._storage: new_hash = __hash__(value) if self._storage[key] == new_hash: return False else: self._storage[key] = new_hash return True else: self._storage[key] = __hash__(value) return True from stat import S_ISDIR # great thank to skoll for sharing this at stackoverflow: # https://stackoverflow.com/questions/4409502/directory-transfers-with-paramiko class MySFTPClient(paramiko.SFTPClient): def put_dir(self, source, target, ignore_list=[]): ''' Uploads the contents of the source directory to the target path. The target directory needs to exists. All subdirectories in source are created under target. ''' for item in os.listdir(source): if item in ignore_list: continue if os.path.isfile(os.path.join(source, item)): # print亮靛('uploading: %s --> %s'%(os.path.join(source, item),'%s/%s' % (target, item))) self.put(os.path.join(source, item), '%s/%s' % (target, item)) else: self.mkdir('%s/%s' % (target, item), ignore_existing=True) self.put_dir(os.path.join(source, item), '%s/%s' % (target, item), ignore_list) def isfile(self, path): try: return not S_ISDIR(self.stat(path).st_mode) except IOError: #Path does not exist, so by definition not a directory return True def get_dir(self, source, target, ignore_list=[]): ''' Download the contents of the source directory to the target path. The target directory needs to exists. All subdirectories in source are created under target. ''' for item in self.listdir(source): if item in ignore_list: continue if self.isfile(os.path.join(source, item).replace('\\','/')): # print亮靛('uploading: %s --> %s'%(os.path.join(source, item),'%s/%s' % (target, item))) self.get(os.path.join(source, item).replace('\\','/'), '%s/%s' % (target, item)) else: if os.path.exists('%s/%s' % (target, item)): print('local dir already exists:', '%s/%s' % (target, item)) continue os.mkdir('%s/%s' % (target, item)) self.get_dir(os.path.join(source, item).replace('\\','/'), '%s/%s' % (target, item), ignore_list) def mkdir(self, path, mode=511, ignore_existing=False): ''' Augments mkdir by adding an option to not fail if the folder exists ''' try: super(MySFTPClient, self).mkdir(path, mode) except IOError as e: if e.__class__ == FileNotFoundError: raise if ignore_existing: pass else: raise def get_ssh_sftp(addr, usr, pwd): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts"))) port = 22 if ':' in addr: addr, port = addr.split(':') ssh.connect(addr, username=usr, password=pwd, port=port) sftp = MySFTPClient.from_transport(ssh.get_transport()) return ssh, sftp def upload_exp(cfg): # shell it to catch error try: upload_exp_(cfg) except: pass def upload_exp_(cfg): path = cfg.logdir name = cfg.note try: addr = DataCentralServer.addr # ssh ubuntu address usr = DataCentralServer.usr # ubuntu user pwd = DataCentralServer.pwd # ubuntu password assert addr != 'None' and (addr is not None) assert usr != 'None' and (usr is not None) assert pwd != 'None' and (pwd is not None) except: print('No experiment data central server is configured, 没有配置中央日志服务器') return remote_path = '/home/%s/CenterHmp/'%usr ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts"))) ssh.connect(addr, username=usr, password=pwd) put_str = '[%s] [%s] %s'%(cfg.note, time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()), str(cfg.machine_info).replace('\'','')) ssh.exec_command(command='echo -e "%s" >> %s/active.log'%(put_str, remote_path), timeout=1) sftp = MySFTPClient.from_transport(ssh.get_transport()) print亮紫('uploading results: %s --> %s'%(path, '%s/%s'%(remote_path, name))) sftp.mkdir(remote_path, ignore_existing=True) sftp.mkdir('%s/%s'%(remote_path, name), ignore_existing=True) sftp.put_dir(path, '%s/%s'%(remote_path, name)) sftp.close() print亮紫('upload complete') ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/fetch_multiserver.py ================================================ from UTIL.exp_helper import get_ssh_sftp from UTIL.colorful import * import time,os ''' Fetch experiment results from worker servers ''' n_run_mode = [ # { # @1 # "addr": "172.18.116.149:2233", # "usr": "hmp", # "pwd": "hmp" # }, # { # @2 # "addr": "172.18.116.150:2233", # "usr": "fuqingxu", # "pwd": "clara" # }, { # @3 "addr": "172.18.116.149:2233", "usr": "fuqingxu", "pwd": "clara" } ] download_dir = './fetch/' after_date = '2022-03-22-17-22-00' consider_days = None info_list = {} to_download = {} for ith_run in range(len(n_run_mode)): addr = n_run_mode[ith_run]['addr'] usr = n_run_mode[ith_run]['usr'] pwd = n_run_mode[ith_run]['pwd'] ssh, sftp = get_ssh_sftp(addr, usr, pwd) experiments_path = sftp.listdir(path='./MultiServerMission/') # 将顺序改为从最早到最晚 experiments_path = reversed(sorted(experiments_path)) for index, exp_time in enumerate(experiments_path): time_then = time.mktime(time.strptime(exp_time,"%Y-%m-%d-%H:%M:%S")) time_now = time.mktime(time.localtime()) diff_time_days = (time_now - time_then)/3600/24 if consider_days is None: consider_days = (time_now - time.mktime(time.strptime(after_date,"%Y-%m-%d-%H-%M-%S")))/3600/24 if diff_time_days > consider_days: continue path_ckpt = './MultiServerMission/%s/src/ZHECKPOINT/'%exp_time try: list_of_sub_exp = sftp.listdir(path=path_ckpt) except: print('No ZHECKPOINT directory found!') continue key = str(ith_run)+'-'+str(index) print亮绿(key,':',exp_time) for sep in list_of_sub_exp: print亮紫('\t- ',sep) info_list[key] = {'ith_run':ith_run, 'index':index, 'path':path_ckpt} target_path = (download_dir+'/%s/'%exp_time.replace(':','-')) try: os.mkdir(target_path) sftp.get_dir(source=path_ckpt,target=target_path) # 下载! except BaseException as e: print('This directory already exists, skipping:', target_path) print('download complete') ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/file_lock.py ================================================ # pip install filelock from filelock import FileLock as FileLockBase class FileLock(FileLockBase): def __init__(self, lock_file, timeout: float = -1) -> None: assert lock_file.endswith('.lock') super().__init__(lock_file, timeout) def is_file_empty(file_path): with open(file_path, 'r') as f: file_content = f.read() if file_content == '' or file_content == '\n': return True else: return False ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/gpu_eater.py ================================================ def validate_path(): import os, sys dir_name = os.path.dirname(__file__) root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') os.chdir(root_dir_assume) sys.path.append(root_dir_assume) if __name__ == '__main__': validate_path() from multiprocessing import Process from UTIL.network import UnixTcpServerMultiClient import os, time, re, torch import threading def check_devices_mem(): devices_info = os.popen( '"/usr/bin/nvidia-smi"' + ' --query-gpu=memory.total,memory.used' + ' --format=csv,nounits,noheader' ).read().strip().split("\n") divices_mem_info = [x.split(',') for x in devices_info] divices = os.environ.get("CUDA_VISIBLE_DEVICES") if divices is None: return divices_mem_info else: device_list = [] for i in [int(x) for x in divices.split(',')]: device_list.append(divices_mem_info[i]) return device_list def occupy_device_mem(cuda_device, mem_info, free=1024): total, used = int(mem_info[0]), int(mem_info[1]) block_mem = total - used - free if block_mem > 0: print('Occupy device_{}\'s mem ...'.format(cuda_device)) x = torch.zeros( (256, 1024, block_mem), dtype=torch.float32, device='cuda:{}'.format(cuda_device) ) del x print('Occupy device_{}\'s mem finished'.format(cuda_device)) else: print('Device_{}\'s out of memory'.format(cuda_device)) def occupy_gpus_mem(free=4096): for i, mem_info in enumerate(check_devices_mem()): occupy_device_mem(i, mem_info, free) print('Occupy all device\'s mem finished') class GPU_Eater(Process): def __init__(self, unix_path, party): super(GPU_Eater, self).__init__() self.unix_path = unix_path self.server = None self.party = party match_res = re.match(pattern=r'cuda(.)_party(.)', string=party) cudax, self.party_index = match_res[1], match_res[2] assert self.party_index == '0' self.device = f'cuda:{cudax}' cudax_int = int(cudax) os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = cudax self.previous_req = time.time() def __del__(self): if self.server is not None: self.server.close() self.terminate() def run_timer(self): while True: time.sleep(60) delta_time = time.time() - self.previous_req print(f'inactive for {delta_time} seconds') if delta_time > 3600: self.__del__() break def release_gpu(self): torch.cuda.empty_cache() pass def hold_gpu(self): occupy_gpus_mem(free=2048) pass def on_receive_data(self, data): print('data incoming') if data == 'link': self.previous_req = time.time() reply = 'success' elif data == 'need_gpu': self.release_gpu() self.previous_req = time.time() reply = 'ok' elif data == 'giveup_gpu': self.hold_gpu() self.previous_req = time.time() reply = 'ok' elif data == 'offline': self.previous_req = time.time() reply = 'ok' else: assert False print(data) return reply def run(self): print('started') try: os.unlink(self.unix_path) except: pass t = threading.Thread(target=self.run_timer) t.daemon = True t.start() self.server = UnixTcpServerMultiClient(self.unix_path, obj='str') self.server.on_receive_data = lambda data: self.on_receive_data(data) self.server.be_online() if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='gpu_party') parser.add_argument('--party', type=str) args = parser.parse_args() party = args.party unix_path = os.path.expanduser(f'~/HmapTemp/GpuLock/GpuEater_{party}') o = GPU_Eater(unix_path, party) o.run() ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/gpu_share.py ================================================ import platform, os, torch, uuid, time, psutil, json, random from UTIL.network import UnixTcpClientP2P, UnixTcpServerP2P from atexit import register from .file_lock import FileLock def pid_exist(pid_str): pid = int(pid_str) return psutil.pid_exists(pid) def read_json(fp): # create if not exist if not os.path.exists(fp): with open(fp, "w") as f: pass # try to read, otherwise reset try: with open(fp, "r+") as f: json_data = json.load(f) except: json_data = {} return json_data def write_json(fp, buf): with open(fp, "w") as f: json.dump(buf, fp=f) return def create_eater(unix_path): from .gpu_eater import GPU_Eater proc = GPU_Eater(unix_path) proc.daemon = True proc.start() class GpuHolder(): def __init__(self, device) -> None: # try to communicate with gpu holder unix_path = os.path.expanduser(f'~/HmapTemp/GpuLock/GpuEater_{device}') try: self.client = UnixTcpClientP2P(unix_path, obj='str') success = self.client.send_and_wait_reply('link') print('already have a GpuHolder online') except: assert False print('creating GpuHolder') create_eater(unix_path) time.sleep(3) print('creating Finished') self.client = UnixTcpClientP2P(unix_path, obj='str') success = self.client.send_and_wait_reply('link') assert success == 'success' def __del__(self): if self.client is not None: self.client.send_and_wait_reply('offline') self.client.__del__() def need_gpu(self): ok = self.client.send_and_wait_reply('need_gpu') assert ok == 'ok' def giveup_gpu(self): ok = self.client.send_and_wait_reply('giveup_gpu') assert ok == 'ok' class GpuShareUnit(): flesh = True def __init__(self, which_gpu, lock_path=None, manual_gpu_ctl=True, gpu_party='', gpu_ensure_safe=False): self.device = which_gpu self.manual_gpu_ctl = True self.lock_path=lock_path self.gpu_party = gpu_party self.gpu_lock = None self.ensure_gpu_safe = gpu_ensure_safe self.pid_str = str(os.getpid()) self.n_gpu_process_online = 1 if self.ensure_gpu_safe: assert 'party0' in self.gpu_party; assert 'cuda' in self.gpu_party self.gpu_eater = GpuHolder(device=self.gpu_party) if gpu_party == 'off': self.manual_gpu_ctl = False # the default file lock path if self.lock_path is None: self.lock_path = os.path.expanduser('~/HmapTemp/GpuLock') # create a folder if the path is invalid if not os.path.exists(self.lock_path): os.makedirs(self.lock_path) # gpu party register file self.register_file = self.lock_path+'/lock_gpu_%s_%s.json'%(self.device, self.gpu_party) register(self.__del__) def __del__(self): if hasattr(self,'_deleted_'): # avoid exit twice return else: self._deleted_ = True # avoid exit twice try: with FileLock(self.register_file+'.lock'): self.unregister_pid() except: pass try: self.gpu_lock.__exit__(None,None,None) except:pass def __enter__(self): self.get_gpu_lock() return self def __exit__(self, exc_type, exc_value, traceback): self.release_gpu_lock() def get_gpu_lock(self): if self.manual_gpu_ctl: print('Waiting for GPU %s %s...'%(self.device, self.gpu_party), end='', flush=True) with FileLock(self.register_file+'.lock'): self.n_gpu_process_online = self.register_pid() fp = self.lock_path+'/gpu_lock_%s_%s'%(self.device, self.gpu_party) self.gpu_lock = FileLock(fp+'.lock') self.gpu_lock.__enter__() if self.ensure_gpu_safe: self.gpu_eater.need_gpu() print('Get GPU, currently shared with %d process!'%self.n_gpu_process_online) return def release_gpu_lock(self): if self.manual_gpu_ctl: # if self.n_gpu_process_online > 1: torch.cuda.empty_cache() if self.ensure_gpu_safe: self.gpu_eater.giveup_gpu() self.gpu_lock.__exit__(None,None,None) # else: # print('GPU not shared') return def register_pid(self): all_pids = read_json(self.register_file) need_write = False # check all pid alive occasionally if GpuShareUnit.flesh or random.random() < 0.05: for pid in list(all_pids.keys()): if not pid_exist(pid): all_pids.pop(pid); print('removing dead item', pid) need_write = True GpuShareUnit.flesh = False # add entry if not exist if self.pid_str not in all_pids: all_pids[self.pid_str] = {} need_write = True # write back if needed if need_write: write_json(self.register_file, all_pids) return len(all_pids) def unregister_pid(self): all_pids = read_json(self.register_file) # check all pid alive for pid in list(all_pids.keys()): if not pid_exist(pid): all_pids.pop(pid); print('removing dead item', pid) try: all_pids.pop(self.pid_str) except: pass # write back if needed write_json(self.register_file, all_pids) ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/hidden_print.py ================================================ import sys, os class HiddenPrints: def __enter__(self): self._original_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') def __exit__(self, exc_type, exc_val, exc_tb): sys.stdout.close() sys.stdout = self._original_stdout ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/hmp_daemon.py ================================================ import time, requests, threading, os, atexit, psutil from UTIL.colorful import * def kill_process(p): try: # print('正在发送terminate命令到进程:', os.getpid(), '-->', p.pid) p.terminate() _, alive = psutil.wait_procs([p,], timeout=0.01) # 先等 10ms if len(alive): _, alive = psutil.wait_procs(alive, timeout=0.10) # 再等 100ms if len(alive): # print('\t (R1) 很遗憾, 进程不服从terminate信号, 正在发送kill-9命令到进程:', os.getpid(), '-->', p.pid) for p in alive: p.kill() else: # print('\t (R2) 进程成功结束') pass else: # print('\t (R2) 进程成功结束') pass except Exception as e: print(e) def kill_process_and_its_children(p): p = psutil.Process(p.pid) # p might be Python's process, convert to psutil's process if len(p.children())>0: # print('有子进程') for child in p.children(): if hasattr(child,'children') and len(child.children())>0: kill_process_and_its_children(child) else: kill_process(child) else: pass # print('无子进程') kill_process(p) def kill_process_children(p): p = psutil.Process(p.pid) # p might be Python's process, convert to psutil's process if len(p.children())>0: # print('有子进程') for child in p.children(): if hasattr(child,'children') and len(child.children())>0: kill_process_and_its_children(child) else: kill_process(child) else: pass # print('无子进程') def clean_child_process(pid): parent = psutil.Process(pid) kill_process_children(parent) def hmp_clean_up(): from UTIL.exp_helper import upload_exp from config import GlobalConfig as cfg print亮黄('[main.py] upload results to storage server via SSH') if cfg.allow_res_upload: upload_exp(cfg) print亮黄('[main.py] kill all children process, then self-terminate.') clean_child_process(os.getpid()) def start_periodic_daemon(cfg): print('[hmp_daemon.py] Disable periodic daemon to debug.') return periodic_thread = threading.Thread(target=periodic_daemon,args=(cfg,)) periodic_thread.setDaemon(True) periodic_thread.start() for i in range(100): time.sleep(1) print(i) atexit.register(hmp_clean_up) def periodic_daemon(cfg): while True: try: print('start periodic_daemon_(cfg)') periodic_daemon_(cfg) print('end periodic_daemon_(cfg)') except AssertionError: hmp_clean_up() except BaseException: print('hmp server failed') break time.sleep(15*60) def periodic_daemon_(cfg): report = { 'type': 'hmp-client', 'note': cfg.note, 'time': time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()), 'client_status': 'Running', 'StartingTime': cfg.machine_info['StartDateTime'], 'HostIP': cfg.machine_info['HostIP'], 'ExpUUID': cfg.machine_info['ExpUUID'], 'RunPath':cfg.machine_info['RunPath'], 'DockerContainerHash':cfg.machine_info['DockerContainerHash'] } res = requests.post('http://linux.ipv4.fuqingxu.top:11511/',data = report) if res.text=='Stop_Now': report['client_status'] = 'Terminate' requests.post('http://linux.ipv4.fuqingxu.top:11511/',data = report) raise AssertionError('HMP-Center Has Given Terminate Signal!') ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/legacy/gpu_share_unfin.py ================================================ import flock, os, torch, uuid, time, glob from atexit import register class GpuShareUnit(): def __init__(self, which_gpu, lock_path=None, manual_gpu_ctl=True, gpu_party=''): self.device = which_gpu self.manual_gpu_ctl = True self.lock_path=lock_path self.gpu_party = gpu_party self.experiment_uuid = uuid.uuid1().hex + '\n' self.n_gpu_process_online = 1 self.flag_req_all_party = False self.parties_req = None # 如果单个party的显存不够用,那么需要同时排队多个party,获取内存 if gpu_party == 'off' or gpu_party == 'OFF' or gpu_party<0: self.manual_gpu_ctl = False if self.lock_path is None: self.lock_path = os.path.expanduser('~/GpuLock') if not os.path.exists(self.lock_path): os.makedirs(self.lock_path) register(self.unregister_uuids_) def __exit__(self, exc_type, exc_value, traceback): if not self.flag_req_all_party: self.release_gpu_lock() def __enter__(self): self._get_gpu_locks() return self def __del__(self): self.unregister_uuids_() def _get_gpu_locks(self): if not self.flag_req_all_party: self.parties_req = None self.__get_gpu_lock(self.device, self.gpu_party) else: self.parties_req = self.__find_all_active_party(self.device) if not (self.gpu_party in self.parties_req): self.parties_req.append(self.gpu_party) for each_party in self.parties_req: self.__get_gpu_lock(self.device, each_party) def __find_all_active_party(self, device): list_of_active_parties = [] for indx in range(64): res = self.___get_party_n_share(device, gpu_party=str(indx)) if res is None: break if res == 0: break if res >0: list_of_active_parties.append(str(indx)) return list_of_active_parties def __get_gpu_lock(self, device, gpu_party): if self.manual_gpu_ctl: print('Waiting for GPU %s %s...'%(device, gpu_party), end='', flush=True) gpu_lock, gpu_lock_file = (None, None) self.n_gpu_process_online = self.register_uuid_(device, gpu_party) self.gpu_lock_file = open(self.lock_path+'/lock_gpu_%s_%s.glock'%(device, gpu_party), 'w+') self.gpu_lock = flock.Flock(self.gpu_lock_file, flock.LOCK_EX) self.gpu_lock.__enter__() print('Get GPU, currently shared with %d process!'%self.n_gpu_process_online) return def release_gpu_lock(self): self.flag_req_all_party = False if self.manual_gpu_ctl: if self.n_gpu_process_online >1: torch.cuda.empty_cache() self.gpu_lock.__exit__(None,None,None) self.gpu_lock_file.close() else: print('不共享GPU') return def ___get_party_n_share(self, device, gpu_party): try: flag = 'r' with open(self.lock_path+'/lock_gpu_%s_%s.register'%(device, gpu_party), mode=flag) as gpu_register_file: _lock = flock.Flock(gpu_register_file, flock.LOCK_EX); _lock.__enter__() lines = gpu_register_file.readlines() _lock.__exit__(None,None,None) return len(lines) except: return None def register_uuid_(self, device, gpu_party): try: flag = 'w+' if not os.path.exists(self.lock_path+'/lock_gpu_%s_%s.register'%(device, gpu_party)) else 'r+' with open(self.lock_path+'/lock_gpu_%s_%s.register'%(device, gpu_party), mode=flag) as gpu_register_file: _lock = flock.Flock(gpu_register_file, flock.LOCK_EX); _lock.__enter__() lines = gpu_register_file.readlines() if not any([line==self.experiment_uuid for line in lines]): lines.append(self.experiment_uuid) gpu_register_file.seek(0); gpu_register_file.truncate(0) gpu_register_file.writelines(lines) gpu_register_file.flush() _lock.__exit__(None,None,None) return len(lines) except: print('GPU 队列异常!') return 999 def unregister_uuids_(self, device, gpu_party): for __ self.unregister_uuid_(device, gpu_party) try: self.gpu_lock.__exit__(None,None,None) except:pass try: self.gpu_lock_file.close() except:pass def unregister_uuid__(self, device, gpu_party): flag = 'w+' if not os.path.exists(self.lock_path+'/lock_gpu_%s_%s.register'%(device, gpu_party)) else 'r+' with open(self.lock_path+'/lock_gpu_%s_%s.register'%(device, gpu_party), mode=flag) as gpu_register_file: _lock = flock.Flock(gpu_register_file, flock.LOCK_EX); _lock.__enter__() lines = gpu_register_file.readlines() gpu_register_file.seek(0); gpu_register_file.truncate(0) gpu_register_file.writelines([line for line in lines if line!=self.experiment_uuid]) gpu_register_file.flush() _lock.__exit__(None,None,None) print('unregister') def req_all_party(self): self.flag_req_all_party = True ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/mem_watcher_ue.py ================================================ def validate_path(): import os, sys # '/home/hmp/xx/hmp2g-heterogeneous-phase2/UTIL' dir_name = os.path.dirname(__file__) # '/home/hmp/xx/hmp2g-heterogeneous-phase2' root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') # change working dir os.chdir(root_dir_assume) # import root sys.path.append(root_dir_assume) validate_path() import time, requests, threading, os, atexit, psutil from UTIL.colorful import * def thread_dfs(p, depth=0, info=None): try: if isinstance(p, int): p = psutil.Process(p) elif isinstance(p, psutil.Process): pass else: p = psutil.Process(p.pid) pp = p print_info(depth, pp, info) if len(p.children())>0: # print('有子进程') for child in p.children(): if hasattr(child,'children') and len(child.children())>0: thread_dfs(child, depth = depth+1, info=info) else: pp = child print_info(depth+1, pp, info) else: pass except: return def print_info(depth, pp, info=None): pid = pp.pid name = pp.name() name_trim = 'HmapShmPoolWorker' if name.startswith('HmapShmPoolWorker') else name mem = (psutil.Process(pid).memory_info().rss / 1024 / 1024 / 1024) info['tot_mem'] += mem info['tot_procs'] += 1 if name_trim not in info: info[name_trim] = { 'mem':0, 'procs':0, } info[name_trim]['mem'] += mem info[name_trim]['procs'] += 1 def find_procs_by_name(name): "Return a list of processes matching 'name'." ls = [] for p in psutil.process_iter(["name", "exe", "cmdline"]): if name == p.info['name'] or \ p.info['exe'] and os.path.basename(p.info['exe']) == name or \ p.info['cmdline'] and p.info['cmdline'][0] == name: ls.append(p) return ls[0] if __name__ == "__main__": from VISUALIZE.mcom import mcom mcv = mcom( path='TEMP', digit=-1, rapid_flush=True, draw_mode='Img' ) def main(root_name = 'UE4Editor.exe'): pid = find_procs_by_name(root_name) mem = (pid.memory_info().rss / 1024 / 1024 / 1024) mcv.rec(mem, 'mem') mcv.rec_show() if __name__ == "__main__": while True: main() time.sleep(10) # 十分钟一次 # time.sleep(300) # 十分钟一次 ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/memleak_finder.py ================================================ from pympler import tracker tr = tracker.SummaryTracker() def memdb_print_diff(): tr.print_diff() ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/mprofile.py ================================================ import subprocess import threading import copy, os import time import json from UTIL.colorful import * # test sync to github # ubuntu command to kill process: kill -9 $(ps -ef | grep fuqingxu |grep python | grep -v grep | awk '{print $ 2}') arg_base = ['python', 'main.py'] log_dir = '%s/'%time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) run_group = "bench" # base_conf = 'train.json' n_run = 4 n_run_mode = ['local', 'remote'] conf_override = { "config.py->GlobalConfig-->note": [ "train_origin_T(5itf) t5", "train_origin_T(5itf) t6", "train_origin_T(5itf) t7", "train_origin_T(5itf) t8", ], "MISSION.collective_assult.collective_assult_parallel_run.py->ScenarioConfig-->random_jam_prob": [ 0.05, 0.05, 0.05, 0.05, ], "config.py->GlobalConfig-->seed": [ 22222221, 22222222, 22222223, 22222224, ], "config.py->GlobalConfig-->device": [ "cuda:0", "cuda:1", "cuda:2", "cuda:3", ], "config.py->GlobalConfig-->gpu_party": [ "off", "off", "off", "off", ], } base_conf = { "config.py->GlobalConfig": { # please checkout config.py for information "note": "example experiment", # in case you forget the purpose of this trainning session, write a note "env_name": "collective_assult", # which environment, see ./MISSION/env_router.py "env_path": "MISSION.collective_assult", # path of environment "draw_mode": "Img", # activate data plotting (Tensorboard is not used because I do not like it) "num_threads": "64", # run N parallel envs, a 'env' is refered to as a 'thread' "report_reward_interval": "64", # reporting interval "test_interval": "2048", # test every $test_interval episode "fold": "1", # this 'folding' is designed for IPC efficiency, you can thank python GIL for such a strange design... "seed": 22222222, # seed controls pytorch and numpy "backup_files": [ # backup files, pack them up "example.jsonc", "ALGORITHM/conc", "MISSION/collective_assult/envs/collective_assult_env.py" ], "device": "cuda:0", # choose from 'cpu' (no GPU), 'cuda' (auto select GPU), 'cuda:3' (manual select GPU) # GPU memory is precious! assign multiple training process to a 'party', then they will share GPU memory "gpu_party": "Cuda0-Party0", # default is 'off', "upload_after_test": True }, "UTIL.exp_upload.py->DataCentralServer": { "addr": "172.18.112.16", "usr": "fuqingxu", "pwd": "clara" }, "MISSION.collective_assult.collective_assult_parallel_run.py->ScenarioConfig": { # please checkout ./MISSION/collective_assult/collective_assult_parallel_run.py for information "size": "5", "random_jam_prob": 0.05, "introduce_terrain": "True", "terrain_parameters": [ 0.05, 0.2 ], "num_steps": "180", "render": "False", "render_with_unity": "False", "MCOM_DEBUG": "False", "render_ip_with_unity": "cn-cd-dx-1.natfrp.cloud:55861", "half_death_reward": "True", "TEAM_NAMES": [ "ALGORITHM.conc.foundation->ReinforceAlgorithmFoundation" ] }, "ALGORITHM.conc.foundation.py->AlgorithmConfig": { "n_focus_on": 2, "actor_attn_mod": "False", "extral_train_loop": "False", "lr": 0.0005, "ppo_epoch": 24, "train_traj_needed": "64", "load_checkpoint": False } } assert '_' not in run_group, ('下划线的显示效果不好') exp_log_dir = log_dir+'exp_log' if not os.path.exists('PROFILE/%s'%exp_log_dir): os.makedirs('PROFILE/%s'%exp_log_dir) exp_json_dir = log_dir+'exp_json' if not os.path.exists('PROFILE/%s'%exp_json_dir): os.makedirs('PROFILE/%s'%exp_json_dir) new_json_paths = [] for i in range(n_run): conf = copy.deepcopy(base_conf) new_json_path = 'PROFILE/%s/run-%d.json'%(exp_json_dir, i+1) for key in conf_override: assert n_run == len(conf_override[key]), ('检查!n_run是否对应') tree_path, item = key.split('-->') conf[tree_path][item] = conf_override[key][i] with open(new_json_path,'w') as f: json.dump(conf, f, indent=4) print(conf) new_json_paths.append(new_json_path) final_arg_list = [] printX = [print红,print绿,print黄,print蓝,print紫,print靛,print亮红,print亮绿,print亮黄,print亮蓝,print亮紫,print亮靛] for ith_run in range(n_run): final_arg = copy.deepcopy(arg_base) final_arg.append('--cfg') final_arg.append(new_json_paths[ith_run]) final_arg_list.append(final_arg) print('') def worker(ith_run): log_path = open('PROFILE/%s/run-%d.log'%(exp_log_dir, ith_run+1), 'w+') printX[ith_run%len(printX)](final_arg_list[ith_run]) res = subprocess.run(final_arg_list[ith_run], stdout=log_path, stderr=log_path) print('worker end') def clean_process(pid): import psutil parent = psutil.Process(pid) for child in parent.children(recursive=True): try: print亮红('sending Terminate signal to', child) child.terminate() time.sleep(5) print亮红('sending Kill signal to', child) child.kill() except: pass parent.kill() def clean_up(): print亮红('clean up!') parent_pid = os.getpid() # my example clean_process(parent_pid) if __name__ == '__main__': input('确认执行?') input('确认执行!') t = 0 while (t >= 0): print('运行倒计时:', t) time.sleep(1) t -= 1 threads = [ threading.Thread( target=worker,args=(ith_run,) ) for ith_run in range(n_run) ] for thread in threads: thread.setDaemon(True) thread.start() print('错峰执行,启动', thread) DELAY = 3 for i in range(DELAY): print('\r 错峰执行,启动倒计时%d '%(DELAY-i), end='', flush=True) time.sleep(1) from atexit import register register(clean_up) while True: is_alive = [thread.is_alive() for thread in threads] if any(is_alive): time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) print(time_now, 'I am still running!', is_alive) print靛('current scipt:%s, current log:%s'%(os.path.abspath(__file__), 'PROFILE/%s/run-%d.log'%(exp_log_dir, ith_run+1))) time.sleep(60) else: break print('[profile] All task done!') for thread in threads: thread.join() ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/mserver_launcher.sh ================================================ byobu new-session -d -s $USER ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/network.py ================================================ import socket, threading, pickle, uuid, os, atexit, time, json, psutil from UTIL.file_lock import FileLock port_finder = os.path.expanduser('~/HmapTemp') + '/PortFinder/find_free_port_no_repeat.json' def check_pid(pid): return psutil.pid_exists(pid) # return True # """ Check For the existence of a unix pid. """ # try: # os.kill(pid, 0) # except OSError: # return False # else: # return True def find_free_port(): from contextlib import closing with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind(('', 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1] def find_free_port_no_repeat(): fp = port_finder def read(): if not os.path.exists(fp): with open(fp, "w") as f: pass try: with open(fp, "r+") as f: ports_to_be_taken = json.load(f) except: ports_to_be_taken = {} return ports_to_be_taken def write(ports_to_be_taken): # clean outdated for port in list(ports_to_be_taken.keys()): if not check_pid(ports_to_be_taken[port]['pid']): ports_to_be_taken.pop(port) print('removing dead item', port) with open(fp, "w") as f: json.dump(ports_to_be_taken, fp=f) with FileLock(fp+'.lock'): ports_to_be_taken = read() while True: new_port = find_free_port() if str(new_port) not in ports_to_be_taken: break else: print('port taken, change another') print('find port:', new_port) ports_to_be_taken[str(new_port)] = { 'time': time.time(), 'pid': os.getpid(), } write(ports_to_be_taken) def release_fn(port): with FileLock(fp+'.lock'): ports_to_be_taken = read() if str(port) in ports_to_be_taken: ports_to_be_taken.pop(str(port)) else: pass write(ports_to_be_taken) return release_fn import atexit atexit.register(release_fn, port=new_port) return new_port, release_fn def get_host_ip(): ip = None try: s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM) s.connect(('8.8.8.8',80)) # if fail here, please connect Internet to get IP? ip=s.getsockname()[0] finally: s.close() return ip BUFSIZE = 10485760 # ip_port = ('127.0.0.1', 9999) DEBUG_NETWORK = False class UdpServer: def __init__(self, ip_port, obj='bytes') -> None: self.ip_port = ip_port self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.server.bind(self.ip_port) self.most_recent_client = None self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') return def wait_next_dgram(self): data, self.most_recent_client = self.server.recvfrom(BUFSIZE) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('recv from :', self.most_recent_client, ' data :', data) return data def reply_last_client(self, data): assert self.most_recent_client is not None if DEBUG_NETWORK: print('reply_last_client :', self.most_recent_client, ' data :', data) if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') self.server.sendto(data, self.most_recent_client) return def __del__(self): self.server.close() return class UdpTargetedClient: def __init__(self, target_ip_port, obj='bytes') -> None: self.target_ip_port = target_ip_port self.client = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') return def send_dgram_to_target(self, data): if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') self.client.sendto(data, self.target_ip_port) if DEBUG_NETWORK: print('send_targeted_dgram :', self.target_ip_port, ' data :', data) return def send_and_wait_reply(self, data): if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') self.client.sendto(data, self.target_ip_port) data, _ = self.client.recvfrom(BUFSIZE) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('get_reply :', self.target_ip_port, ' data :', data) return data # /////// test ipv4 udp # import numpy as np # server = UdpServer(ip_port, obj='pickle') # client = UdpTargetedClient(ip_port, obj='pickle') # def server_fn(): # data = server.wait_next_dgram() # server.reply_last_client(np.array([4,5,6])) # def client_fn(): # rep = client.send_and_wait_reply(np.array([1,2,3])) # thread_hi = threading.Thread(target=server_fn) # thread_hello = threading.Thread(target=client_fn) # # 启动线程 # thread_hi.start() # thread_hello.start() class UnixUdpServer: def __init__(self, unix_path, obj='bytes') -> None: try: os.makedirs(os.path.dirname(unix_path)) except: pass self.unix_path = unix_path self.server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) self.server.bind(self.unix_path) self.most_recent_client = None self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') return def wait_next_dgram(self): data, self.most_recent_client = self.server.recvfrom(BUFSIZE) if DEBUG_NETWORK: print('self.most_recent_client',self.most_recent_client) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('recv from :', self.most_recent_client, ' data :', data) return data def reply_last_client(self, data): assert self.most_recent_client is not None if DEBUG_NETWORK: print('reply_last_client :', self.most_recent_client, ' data :', data) if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') self.server.sendto(data, self.most_recent_client) return def __del__(self): self.server.close() os.unlink(self.unix_path) return class UnixUdpTargetedClient: def __init__(self, target_unix_path, self_unix_path=None, obj='bytes') -> None: self.target_unix_path = target_unix_path if self_unix_path is not None: self.self_unix_path = self_unix_path else: self.self_unix_path = target_unix_path+'_client_'+uuid.uuid1().hex[:5] self.client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) self.client.bind(self.self_unix_path) self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') return def send_dgram_to_target(self, data): if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') self.client.sendto(data, self.target_unix_path) if DEBUG_NETWORK: print('send_targeted_dgram :', self.target_unix_path, ' data :', data) return def send_and_wait_reply(self, data): if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') self.client.sendto(data, self.target_unix_path) data, _ = self.client.recvfrom(BUFSIZE) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('get_reply :', self.target_unix_path, ' data :', data) return data def __del__(self): self.client.close() os.unlink(self.self_unix_path) return # /////// test unix udp # remote_uuid = uuid.uuid1().hex # use uuid to identify threads # unix_path = 'TEMP/Sockets/unix/%s'%remote_uuid # server = UnixUdpServer(unix_path, obj='pickle') # client = UnixUdpTargetedClient(unix_path, obj='pickle') # def server_fn(): # data = server.wait_next_dgram() # server.reply_last_client(np.array([4,5,6])) # def client_fn(): # rep = client.send_and_wait_reply(np.array([1,2,3])) # thread_hi = threading.Thread(target=server_fn) # thread_hello = threading.Thread(target=client_fn) # # 启动线程 # thread_hi.start() # thread_hello.start() class StreamingPackageSep: def __init__(self): self.buff = [b''] self.myEOF = b'\xaa\x55\xaaHMP\xaa\x55' # those bytes follow 010101 or 101010 pattern # self.myEOF = b'#A5@5A#' # the EOF string for frame seperation def lower_send(self, data, connection): if DEBUG_NETWORK: assert self.myEOF not in data, 'This is (almost) not possible!' data = data + self.myEOF if DEBUG_NETWORK: print('data length:', len(data)) connection.send(data) def lowest_recv(self, connection): while True: recvData = connection.recv(BUFSIZE) # ends_with_mark = recvData.endswith(self.myEOF) split_res = recvData.split(self.myEOF) assert len(split_res) != 0 if len(split_res) == 1: # 说明没有终止符,直接将结果贴到buf最后一项 self.buff[-1] = self.buff[-1] + split_res[0] if self.myEOF in self.buff[-1]: self.handle_flag_breakdown() else: n_split = len(split_res) for i, r in enumerate(split_res): self.buff[-1] = self.buff[-1] + r # 追加buff if i == 0 and (self.myEOF in self.buff[-1]): # 第一次追加后,在修复的数据断面上发现了myEOF! self.handle_flag_breakdown() if i != n_split-1: # starts a new entry self.buff.append(b'') else: # i == n_split-1, which is the last item if r == b'': continue if len(self.buff)>=2: # 数据成型,拿取成型的数据 buff_list = self.buff[:-1] self.buff = self.buff[-1:] return buff_list # Fox-Protocal def lower_recv(self, connection, expect_single=True): buff_list = self.lowest_recv(connection) if expect_single: assert len(buff_list) == 1, ('一次拿到了多帧数据, 但expect_single=True, 触发错误.', buff_list) return buff_list[0], connection else: return buff_list, connection def handle_flag_breakdown(self): split_ = self.buff[-1].split(self.myEOF) assert len(split_)==2 self.buff[-1] = split_[0] # starts a new entry self.buff.append(b'') self.buff[-1] = split_[1] return # send() is used for TCP SOCK_STREAM connected sockets, and sendto() is used for UDP SOCK_DGRAM unconnected datagram sockets class UnixTcpServerP2P(StreamingPackageSep): def __init__(self, unix_path, obj='bytes') -> None: super().__init__() try: os.makedirs(os.path.dirname(unix_path)) except: pass self.unix_path = unix_path self.server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.server.bind(self.unix_path) self.server.listen() self.most_recent_client = None self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') atexit.register(self.__del__) def accept_conn(self): conn, _ = self.server.accept() return conn def wait_next_dgram(self): if self.most_recent_client is None: self.most_recent_client, _ = self.server.accept() data, self.most_recent_client = self.lower_recv(self.most_recent_client) if DEBUG_NETWORK: print('self.most_recent_client',self.most_recent_client) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('recv from :', self.most_recent_client, ' data :', data) return data def reply_last_client(self, data): assert self.most_recent_client is not None if DEBUG_NETWORK: print('reply_last_client :', self.most_recent_client, ' data :', data) if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') self.lower_send(data, self.most_recent_client) return def __del__(self): self.server.close() try: os.unlink(self.unix_path) except: pass return class UnixTcpServerMultiClient(StreamingPackageSep): def __init__(self, unix_path, obj='bytes') -> None: super().__init__() try: os.makedirs(os.path.dirname(unix_path)) except: pass self.unix_path = unix_path self.server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.server.bind(self.unix_path) self.server.listen() self.most_recent_client = None self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') self.on_receive_data = lambda data: data atexit.register(self.__del__) def serve_clients(self, most_recent_client): while True: data, most_recent_client = self.lower_recv(most_recent_client) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) reply = self.on_receive_data(data) if self.use_pickle: reply = pickle.dumps(reply) if self.convert_str: reply = bytes(reply, encoding='utf8') self.lower_send(reply, most_recent_client) if data == 'offline': break def be_online(self): while True: most_recent_client, _ = self.server.accept() t = threading.Thread(target=self.serve_clients, args=(most_recent_client, )) t.daemon = True t.start() def __del__(self): self.server.close() try: os.unlink(self.unix_path) except: pass return class UnixTcpClientP2P(StreamingPackageSep): def __init__(self, target_unix_path, self_unix_path=None, obj='bytes') -> None: super().__init__() self.target_unix_path = target_unix_path if self_unix_path is not None: self.self_unix_path = self_unix_path else: self.self_unix_path = target_unix_path+'_client_'+uuid.uuid1().hex[:5] self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.client.bind(self.self_unix_path) self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') self.connected = False atexit.register(self.__del__) def send_dgram_to_target(self, data): if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') if not self.connected: self.client.connect(self.target_unix_path); self.connected = True self.lower_send(data, self.client) if DEBUG_NETWORK: print('send_targeted_dgram :', self.client, ' data :', data) return def send_and_wait_reply(self, data): if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') if not self.connected: self.client.connect(self.target_unix_path); self.connected = True self.lower_send(data, self.client) data, _ = self.lower_recv(self.client) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('get_reply :', self.client, ' data :', data) return data def __del__(self): self.client.close() os.unlink(self.self_unix_path) return ''' remote_uuid = uuid.uuid1().hex # use uuid to identify threads unix_path = 'TEMP/Sockets/unix/%s'%remote_uuid server = UnixTcpServerP2P(unix_path, obj='pickle') client = UnixTcpClientP2P(unix_path, obj='pickle') def server_fn(): # data = server.wait_next_dgram() # server.reply_last_client(np.array([4,5,6])) while 1: data = server.wait_next_dgram() server.reply_last_client(data) def client_fn(): # rep = client.send_and_wait_reply(np.array([1,2,3])) while True: buf = np.random.rand(100,1000) rep = client.send_and_wait_reply(buf) assert (buf==rep).all() print('成功') thread_hi = threading.Thread(target=server_fn) thread_hello = threading.Thread(target=client_fn) # 启动线程 thread_hi.start() thread_hello.start() ''' # send() is used for TCP SOCK_STREAM connected sockets, and sendto() is used for UDP SOCK_DGRAM unconnected datagram sockets class TcpServerP2P(StreamingPackageSep): def __init__(self, ip_port, obj='bytes') -> None: super().__init__() self.ip_port = ip_port self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server.bind(self.ip_port) self.server.listen() self.most_recent_client = None self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') atexit.register(self.__del__) def accept_conn(self): conn, _ = self.server.accept() return conn def manual_wait_connection(self): if self.most_recent_client is None: self.most_recent_client, _ = self.server.accept() return def wait_next_dgram(self): if self.most_recent_client is None: self.most_recent_client, _ = self.server.accept() data, self.most_recent_client = self.lower_recv(self.most_recent_client) if DEBUG_NETWORK: print('self.most_recent_client',self.most_recent_client) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('recv from :', self.most_recent_client, ' data :', data) return data def wait_multi_dgrams(self): if self.most_recent_client is None: self.most_recent_client, _ = self.server.accept() data_list, self.most_recent_client = self.lower_recv(self.most_recent_client, expect_single=False) if DEBUG_NETWORK: print('self.most_recent_client',self.most_recent_client) if self.convert_str: data_list = [data.decode('utf8') for data in data_list] if self.use_pickle: data_list = [pickle.loads(data) for data in data_list] if DEBUG_NETWORK: print('recv from :', self.most_recent_client, ' data_list :', data_list) return data_list def reply_last_client(self, data): assert self.most_recent_client is not None if DEBUG_NETWORK: print('reply_last_client :', self.most_recent_client, ' data :', data) if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') self.lower_send(data, self.most_recent_client) return def __del__(self): self.close() return def close(self): self.server.close() class TcpClientP2P(StreamingPackageSep): def __init__(self, target_ip_port, self_ip_port=None, obj='bytes') -> None: super().__init__() self.target_ip_port = target_ip_port self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.use_pickle = (obj=='pickle') self.convert_str = (obj=='str') self.connected = False atexit.register(self.__del__) def send_dgram_to_target(self, data): if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') if not self.connected: self.client.connect(self.target_ip_port); self.connected = True self.lower_send(data, self.client) if DEBUG_NETWORK: print('send_targeted_dgram :', self.client, ' data :', data) return def manual_connect(self): if not self.connected: self.client.connect(self.target_ip_port); self.connected = True def send_and_wait_reply(self, data): if self.use_pickle: data = pickle.dumps(data) if self.convert_str: data = bytes(data, encoding='utf8') if not self.connected: self.client.connect(self.target_ip_port); self.connected = True self.lower_send(data, self.client) data, _ = self.lower_recv(self.client) if self.convert_str: data = data.decode('utf8') if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('get_reply :', self.client, ' data :', data) return data def __del__(self): self.close() return def close(self): self.client.close() ''' ipport = ('127.0.0.1', 25453) server = TcpServerP2P(ipport, obj='pickle') client = TcpClientP2P(ipport, obj='pickle') def server_fn(): data = server.wait_next_dgram() server.reply_last_client(np.array([4,5,6])) def client_fn(): rep = client.send_and_wait_reply(np.array([1,2,3])) thread_hi = threading.Thread(target=server_fn) thread_hello = threading.Thread(target=client_fn) # 启动线程 thread_hi.start() thread_hello.start() ''' class TcpClientP2PWithCompress(StreamingPackageSep): def __init__(self, target_ip_port, self_ip_port=None, obj='bytes') -> None: import lz4.block as lz4block self.lz4block = lz4block self.try_decom_usize = 255 super().__init__() self.target_ip_port = target_ip_port self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.use_pickle = (obj=='pickle') assert not (obj=='str') self.connected = False atexit.register(self.__del__) def decompress(self, data): while True: try: decompressed = self.lz4block.decompress(data, uncompressed_size=self.try_decom_usize) return decompressed except: self.try_decom_usize *= 2 if self.try_decom_usize > 10485760: # 10 MB assert False, "compression failure" return None def compress(self, data): compressed = self.lz4block.compress(data, store_size=False) return compressed def send_dgram_to_target(self, data): if self.use_pickle: data = pickle.dumps(data) data = bytes(data, encoding='utf8') if not self.connected: self.client.connect(self.target_ip_port); self.connected = True data = self.compress(data) self.lower_send(data, self.client) if DEBUG_NETWORK: print('send_targeted_dgram :', self.client, ' data :', data) return def manual_connect(self): if not self.connected: self.client.connect(self.target_ip_port); self.connected = True def send_and_wait_reply(self, data): if self.use_pickle: data = pickle.dumps(data) data = bytes(data, encoding='utf8') if not self.connected: self.client.connect(self.target_ip_port); self.connected = True data = self.compress(data) self.lower_send(data, self.client) data, _ = self.lower_recv(self.client) data = self.decompress(data) if self.use_pickle: data = pickle.loads(data) if DEBUG_NETWORK: print('get_reply :', self.client, ' data :', data) return data def __del__(self): self.close() return def close(self): self.client.close() class QueueOnTcpClient(): def __init__(self, ip): TCP_IP, TCP_PORT = ip.split(':') TCP_PORT = int(TCP_PORT) ip_port = (TCP_IP, TCP_PORT) self.tcpClientP2P = TcpClientP2P(ip_port, obj='str') self.tcpClientP2P.manual_connect() def send_str(self, b_msg): self.tcpClientP2P.send_dgram_to_target(b_msg) def close(self): self.tcpClientP2P.close() def __del__(self): self.close() class QueueOnTcpServer(): def __init__(self, ip_port): from UTIL.network import TcpServerP2P self.tcpServerP2P = TcpServerP2P(ip_port, obj='str') self.handler = None self.queue = None self.buff = [''] def wait_connection(self): self.tcpServerP2P.manual_wait_connection() t = threading.Thread(target=self.listening_thread) t.daemon = True t.start() def listening_thread(self): while True: buff_list = self.tcpServerP2P.wait_multi_dgrams() if self.handler is not None: self.handler(buff_list) if self.queue is not None: self.queue.put(buff_list) def set_handler(self, handler): self.handler = handler def get_queue(self): import queue self.queue = queue.Queue() return self.queue def recv(self): return def close(self): self.tcpServerP2P.close() def __del__(self): self.close() ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/pip_find_missing.py ================================================ #coding=utf-8 import glob,os,sys,re,subprocess,platform def print红(*kw): print("\033[0;31m",*kw,"\033[0m") def print绿(*kw): print("\033[0;32m",*kw,"\033[0m") def print黄(*kw): print("\033[0;33m",*kw,"\033[0m") def print蓝(*kw): print("\033[0;34m",*kw,"\033[0m") def print紫(*kw): print("\033[0;35m",*kw,"\033[0m") def print靛(*kw): print("\033[0;36m",*kw,"\033[0m") def printX(*kw): print("\033[0;38m",*kw,"\033[0m") # 用pip执行安装指令 def install(package): try: subprocess.check_call([sys.executable, "-m", "pip", "install", "-i","https://pypi.tuna.tsinghua.edu.cn/simple", "--progress-bar","emoji", "--prefer-binary", package]) except: print红("执行命令 ", "pip", "install","-i","https://pypi.tuna.tsinghua.edu.cn/simple", package, "时,抛出错误") pass sys_name = platform.system() if sys_name == "Windows": try: from colorama import init,Fore,Back,Style init(autoreset=False) def print红(*kw): print(Fore.RED,*kw) def print绿(*kw): print(Fore.GREEN,*kw) def print黄(*kw): print(Fore.YELLOW,*kw) def print蓝(*kw): print(Fore.BLUE,*kw) def print紫(*kw): print(Fore.MAGENTA,*kw) def print靛(*kw): print(Fore.CYAN,*kw) except: install('colorama') print('颜色组件安装完成!现在请重新运行!') sys_name.exit(0) """ # step 1, 查询所有子路径.py脚本文件,列表 """ py_script_list = glob.glob('./**/*.py', recursive=True) required = [] local_name_list = {"None":False} 引发连锁错误的包_列表 = {"None":False} """ # step 2, 提取 import 以及 from *** import """ def 是否为工程内的文件交叉调用(包,python_file): 包_org = 包 if '.' not in 包: res = os.path.exists("./"+包+".py") if res: return True,包_org else: return False, 包_org if 包.startswith('.'): 包 = os.path.dirname(python_file).replace("/", ".").replace("..", ".") + 包 包_org = 包 包 = 包.replace(".", "/") res = os.path.exists("./"+包+".py") if res: tmp = 包_org.split(".") if tmp[0]!='': local_name_list[tmp[0]] = True return True, 包_org else: return False, 包_org for python_file in py_script_list: with open(python_file,encoding='UTF-8') as f: lines = f.readlines() for line in lines: if "import" in line or "from" in line: t = line.split() # from 开头 或者 import开头 if t[0] == "import" or t[0] == "from": i = 1 包 = "" for ti in t[1:]: if (ti!="import") and (ti!="as"): 包 = 包 + ti else: break if "," in 包: 包_l = 包.split(",") else: 包_l = [包] for 包 in 包_l: 包_debug = 包 if 包_debug == '.': continue res,包 = 是否为工程内的文件交叉调用(包,python_file) if not res: required.append(包) required = set(required) required = sorted(required) """ # step 3, 尝试import,筛查缺失的包 """ print黄("**************************************************************") print黄("尝试import") # 使用清华镜像 need_fix_cmd_orig = "pip install -i https://pypi.tuna.tsinghua.edu.cn/simple " need_fix_cmd = "pip install -i https://pypi.tuna.tsinghua.edu.cn/simple " need_fix_list = [] failed_cmd = [] chain_failed = [] for 包 in required: cmd = "import "+包 try: # 如果这里罕见地报错, # 说明该文件有 import开头的、被“”“包裹的注释, # 找到它,然后删除这个奇葩注释 exec(cmd) except ImportError as error: print红("error trying to do:",cmd,error.msg) error_str = error.msg.split('\'') package_import_error = (len(error_str) >= 2) if not package_import_error: continue 包_error = error_str[1] if '.' in 包: 包_l = 包.split('.') 包_tmp = 包_l[0] # 引发问题的不是这个包本身,而是这个包import其他包,但这个包内引包失败了 # 仅仅是一个连锁错误而已,无需处理 if 包_error != 包: print红("发生连锁引包错误: ",error.msg) chain_failed.append(error.msg) cmd = cmd + "\t\t此项仅仅由连锁引包错误导致: " + error.msg if 包_tmp not in 引发连锁错误的包_列表: 引发连锁错误的包_列表[包_tmp]=True else: # 非连锁,一定是真的缺 引发连锁错误的包_列表[包]=False failed_cmd.append(cmd) if '.' in 包: 包_l = 包.split('.') 包 = 包_l[0] if len(包)>19: # some comment mixed in somehow continue need_fix_list.append(包) except BaseException as error: print红(error) else: print绿("this package is ok:",cmd) need_fix_list = set(need_fix_list) need_fix_list = sorted(need_fix_list) if len(failed_cmd) > 0: print红("以下的包import操作失败") for cmd in failed_cmd: print红(cmd) """ # step 4, 处理缺失的包,并找到对应的pip安装指令 """ term_replace_dict = { "cv2":"opencv-python", "torch":"torch", "mpi":"mpi4py", "MPI":"mpi4py", "mujoco_py":"None", # pip cannot install this???? "pybullet_envs":"None", "stable_baselines3":"None", "pyximport":"cython", "PIL":"None", "collective_assult":"None", "gym_fortattack":"None", "multiagent":"None", "z_config":"None", "gym_vecenv":"None" } PIL for inx, 包 in enumerate(need_fix_list): if 包 in term_replace_dict: 包 = term_replace_dict[包] need_fix_list[inx] = 包 if (包 in local_name_list) or (包 in 引发连锁错误的包_列表 and 引发连锁错误的包_列表[包]==True): need_fix_list[inx] = "None" need_fix_list = set(need_fix_list) need_fix_list = sorted(need_fix_list) if len(need_fix_list) == 0: print绿("所有依赖已就绪") exit(0) """ # step 5, 如果有requirement.txt,从中提取出有用的版本信息 """ print黄("**************************************************************") print蓝("requirement.txt中的相关信息") execute_fix = [] if os.path.exists("./requirements.txt"): with open("./requirements.txt",encoding='UTF-8') as f: lines = f.readlines() for line in lines: if line.startswith("-"): print蓝("requirement.txt要求以下版本: --> "+"pip install "+line[:-1]) print蓝("首先git clone,然后找到setup.py的路径,然后执行 pip install --no-deps -e .") continue line_split = line.split("==") if (len(line_split)==2) and (line_split[0] in need_fix_list): print蓝("requirement.txt要求以下版本: --> "+"pip install "+line[:-1]) """ # step 6, 如果需要安装pytorch,gym等特殊包,对应给出安装建议 """ def config_anaconda(): with open(__file__,'r') as f: conda_cmd = f.readlines() condarc_lines = conda_cmd[-18:-2] f = open('./.condarc','w+') f.writelines(condarc_lines) f.close() print黄("**************************************************************") try: conda_env_name = sys.executable.split('/')[-3] except: conda_env_name = sys.executable.split('\\')[-2] for 包 in need_fix_list: if 包 == "torch": print蓝("pytorch需要手动安装,pytorch 的安装方法(选择其一),然后重新运行该脚本:") print蓝("conda install -n %s pytorch torchvision torchaudio cudatoolkit=10.2 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch/"%conda_env_name) print蓝("conda install -n %s pytorch torchvision torchaudio cudatoolkit=11.0 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch/"%conda_env_name) print蓝("") sys.exit(0) # if 包 == "tensorflow": # print靛("Tensorflow需要手动安装,首先,更换conda源的指令") # config_anaconda() # print靛("cp",os.getcwd()+"/.condarc","~/.condarc") # print靛("然后,安装TF一代的指令") # print靛("conda install -n %s tensorflow-gpu=1.*"%conda_env_name) # sys.exit(0) if (包 is not "None"): need_fix_cmd = need_fix_cmd + 包 + " " execute_fix.append(包) print黄("**************************************************************") print绿(need_fix_cmd) print黄("**************************************************************") """ # step 7, 对于除了特殊包之外的其他软件包,调用pip直接安装 """ print绿("注意!当前的conda环境是:",conda_env_name," 所有操作都将只在该conda环境内生效") input("执行自动安装?") if input("确定执行自动安装?(y/n)")=='y': for 包 in execute_fix: install(包) """ # step 8, 完成任务,取消以下代码的注释,测试pytorch是否工作 """ # import torch # flag = torch.cuda.is_available() # print(flag) # ngpu= 1 # # Decide which device we want to run on # device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu") # print(device) # print(torch.cuda.get_device_name(0)) # print(torch.rand(3,3).cuda()) ''' 不要修改或者删除以下内容!!有用!! channels: - defaults show_channel_urls: true channel_alias: https://mirrors.tuna.tsinghua.edu.cn/anaconda default_channels: - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/r - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/pro - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/msys2 custom_channels: conda-forge: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud msys2: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud bioconda: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud menpo: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud pytorch: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud simpleitk: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud ''' ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/shm_env.py ================================================ import numpy as np import time from MISSION.env_router import make_env_function from UTIL.colorful import print亮红 N = lambda x: np.array(x) # Here use a pool of multiprocess workers to control a bundle of environment to sync step # SuperPool.add_target: in each process, initiate a class object named xxxx, # example: # self.SuperPool.add_target(name='env', lam=EnvWithRay, args_list=env_args_dict_list) # SuperPool.exec_target: in each process, make the object (id by name) to call its method # example: # self.SuperPool.exec_target(name='env', dowhat='step', args_list=actions) # self.SuperPool.exec_target(name='env', dowhat='reset') # ! this class execute in child process # Ray is much slower compare to our shm/pipe solution, # we don't use it any more despite the class name class EnvWithRay(object): def __init__(self, env_args_dict): env_name = env_args_dict['env_name'] proc_index = env_args_dict['proc_index'] env_init_fn = make_env_function(env_name=env_name, rank=proc_index) self.env = env_init_fn() # finally the env is initialized self.observation_space = self.env.observation_space self.action_space = self.env.action_space self.echo = None def __del__(self): # print亮红('[shm_env.py] exec EnvWithRay exit') if hasattr(self,'env'): del self.env def step(self, act): if np.isnan(act).any(): # env is paused, skip by returning previous obs assert self.echo is not None return self.echo # ! step here ob, reward, done, info = self.env.step(act) if isinstance(ob, list): print('warning, ob is list, which is low-efficient') ob = np.array(ob, dtype=object) if np.any(done): # if the environment is terminated, # first, put terminal obs into 'info' if info is None: info = {'obs-echo':ob} else: assert isinstance(info, dict), ('oh? info is not dictionary? did not expect that...') info.update({'obs-echo': ob.copy()}) # second, automatically reset env ob = self.env.reset() if isinstance(ob, tuple): # some env like starcraft return (ob, info) tuple at reset # have info, then update info ob, info_reset = ob info = self.dict_update(info, info_reset) # preserve an echo here, # will be use to handle unexpected env pause self.echo = [ob, reward, done, info] # give everything back to main process return (ob, reward, done, info) def dict_update(self, info, info_reset): for key in info_reset: if key in info: info[key+'-echo'] = info.pop(key) info.update(info_reset) return info def reset(self): return self.env.reset() def sleep(self): return self.env.sleep() def render(self): return self.env.render() def close(self): return None def get_act_space(self): return self.action_space def get_obs_space(self): return self.observation_space def get_act_space_str(self): return str(self.action_space) def get_obs_space_str(self): return str(self.observation_space) # ! this class execute in main process class SuperpoolEnv(object): def __init__(self, process_pool, env_args_dict_list, spaces=None): self.SuperPool = process_pool self.num_envs = len(env_args_dict_list) self.env_name_marker = env_args_dict_list[0][0]['marker'] self.env = 'env' + self.env_name_marker self.SuperPool.add_target(name=self.env, lam=EnvWithRay, args_list=env_args_dict_list) try: self.observation_space = self.SuperPool.exec_target(name=self.env, dowhat='get_obs_space')[0] self.action_space = self.SuperPool.exec_target(name=self.env, dowhat='get_act_space')[0] except: print亮红('Gym Space is unable to transfer between processes, using string instead') self.observation_space = self.SuperPool.exec_target(name=self.env, dowhat='get_obs_space_str')[0] self.action_space = self.SuperPool.exec_target(name=self.env, dowhat='get_act_space_str')[0] # self.observation_space = self.SuperPool.exec_target(name=self.env, dowhat='get_obs_space_str')[0] # self.action_space = self.SuperPool.exec_target(name=self.env, dowhat='get_act_space_str')[0] return def get_space(self): return {'obs_space': self.observation_space, 'act_space': self.action_space} def step(self, actions): # ENV_PAUSE = [np.isnan(thread_act).any() for thread_act in actions] results = self.SuperPool.exec_target(name=self.env, dowhat='step', args_list=actions) obs, rews, dones, infos = zip(*results) # if any(ENV_PAUSE): # assert not all(ENV_PAUSE) # return self.stack(ENV_PAUSE, obs, rews, dones, infos) # else: try: return np.stack(obs), np.stack(rews), np.stack(dones), np.stack(infos) except: assert False, ('unalign! ',obs, rews, dones) def reset(self): results = self.SuperPool.exec_target(name=self.env, dowhat='reset') # [ env.reset.remote() for env in self.ray_env_vector]) if isinstance(results[0], tuple): obs, infos = zip(*results) return np.stack(obs), np.stack(infos) else: return np.stack(results) def sleep(self): self.SuperPool.exec_target(name=self.env, dowhat='sleep') ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/shm_pool.pyx ================================================ """ Author: Fu Qingxu,CASIA Description: Efficient parallel execting tool, it resembles Ray but: 1.optimized for single machine using shared memory 2.optimized for numpy ndarray 3.use semaphore for IPC 4.faster! Note: SHARE_BUF_SIZE: shared memory size, 10MB per process """ import time, pickle, platform, setproctitle, numpy, copy, traceback from multiprocessing import Process, RawValue, Semaphore from multiprocessing import shared_memory from .hmp_daemon import kill_process_and_its_children from ctypes import c_bool, c_uint32 from sys import stdout SHARE_BUF_SIZE = 10485760 # 10 MB for parameter buffer REGULAR_BUF_SIZE = 500000 # The non-numpy content max buffer size TRAFFIC_LIGHT_ERROR = 2 TRAFFIC_LIGHT_CHILD_BUSY = 1 TRAFFIC_LIGHT_CHILD_FREE = 0 # define Python user-defined exceptions class ChildExitException(Exception): pass def print_red(*kw,**kargs): print("\033[1;31m",*kw,"\033[0m",**kargs) def print_green(*kw,**kargs): print("\033[1;32m",*kw,"\033[0m",**kargs) if not stdout.isatty(): print_green = print_red = print # optimize share mem IO for numpy ndarray class ndarray_indicator(): def __init__(self, shape, dtype, shm_start, shm_end): self.shape = shape self.dtype = dtype self.shm_start = shm_start self.shm_end = shm_end self.count = (self.shm_end-self.shm_start)//self.dtype.itemsize # optimize share mem IO for numpy ndarray def convert_ndarray(numpy_ndarray, shm_pointer, shm): nbyte = numpy_ndarray.nbytes shape = numpy_ndarray.shape dtype = numpy_ndarray.dtype assert shm_pointer+nbyte < SHARE_BUF_SIZE, ('share memory overflow, need at least %d, yet only have %d'%(shm_pointer+nbyte, SHARE_BUF_SIZE)) shm_array_object = numpy.ndarray(shape, dtype=dtype, buffer=shm[shm_pointer:shm_pointer+nbyte]) shm_array_object[:] = numpy_ndarray[:] NID = ndarray_indicator(shape, dtype, shm_pointer, shm_pointer+nbyte) shm_pointer = shm_pointer+nbyte return NID, shm_pointer # optimize share mem IO for numpy ndarray def deepin(obj, shm, shm_pointer): if isinstance(obj, list): iterator_ = enumerate(obj) elif isinstance(obj, dict): iterator_ = obj.items() elif isinstance(obj, numpy.ndarray) and obj.dtype=='object': iterator_ = enumerate(obj) else: assert not isinstance(obj, tuple) return shm_pointer for k, v in iterator_: if isinstance(v, (list,dict)) and len(v)>0: shm_pointer = deepin(v, shm, shm_pointer) elif isinstance(v, tuple): item2 = list(v) shm_pointer = deepin(item2, shm, shm_pointer) obj[k] = tuple(item2) elif isinstance(v, numpy.ndarray) and len(v)>0: if v.dtype == 'object': shm_pointer = deepin(v, shm, shm_pointer) elif v.nbytes < 64: pass else: NID, shm_pointer = convert_ndarray(v, shm_pointer, shm) obj[k] = NID else: continue return shm_pointer # optimize share mem IO for numpy ndarray def opti_numpy_object(obj, shm, shm_pointer=REGULAR_BUF_SIZE): shm_pointer_terminal = deepin(obj, shm, shm_pointer) return obj, shm_pointer_terminal # optimize share mem IO for numpy ndarray def reverse_deepin(obj, shm): if isinstance(obj, list): iterator_ = enumerate(obj) elif isinstance(obj, dict): iterator_ = obj.items() elif isinstance(obj, numpy.ndarray) and obj.dtype == 'object': iterator_ = enumerate(obj) else: return for k, v in iterator_: if isinstance(v, (list,dict)) and len(v)>0: reverse_deepin(v, shm) if isinstance(v, numpy.ndarray) and v.dtype == 'object' and len(v)>0: reverse_deepin(v, shm) elif isinstance(v, tuple): item2 = list(v) reverse_deepin(item2, shm) obj[k] = tuple(item2) elif isinstance(v, ndarray_indicator): obj[k] = numpy.frombuffer(shm, dtype=v.dtype, offset=v.shm_start, count=v.count).reshape(v.shape) return # optimize share mem IO for numpy ndarray def reverse_opti_numpy_object(obj, shm): reverse_deepin(obj, shm) return obj class SuperProc(Process): """ Child process worker (efficient distributed worker) """ # initialize traffic IO def __init__(self, index, smib, smiobli, smtl, buf_size_limit, base_seed, sem_push, sem_pull): super(SuperProc, self).__init__() self.shared_memory = smib self.shared_memory_io_buffer = smib.buf self.shared_memory_io_buffer_len_indicator = smiobli self.shared_memory_traffic_light = smtl self.buf_size_limit = buf_size_limit self.local_seed = index + base_seed self.index = index self.sem_push = sem_push self.sem_pull = sem_pull self.target_tracker = [] # on parent exit def __del__(self): if hasattr(self,'_deleted_'): return # avoid exit twice else: self._deleted_ = True # avoid exit twice self.shared_memory.close() for target_name in self.target_tracker: setattr(self, target_name, None) # GC by clearing the pointer. # force terminate all child process try: kill_process_and_its_children(self) except Exception as e: print_red('[shm_pool]: error occur when kill_process_and_its_children:\n', e) # add any class level objects def automatic_generation(self, name, gen_fn, *arg): setattr(self, name, gen_fn(*arg)) # add any class level objects def add_targets(self, new_tarprepare_args): for new_target_arg in new_tarprepare_args: name, gen_fn, arg = new_target_arg if name not in self.target_tracker: self.target_tracker.append(name) if arg is None: self.automatic_generation(name, gen_fn) elif isinstance(arg, tuple): self.automatic_generation(name, gen_fn, *arg) else: self.automatic_generation(name, gen_fn, arg) # execute any class method, return the results def execute_target(self, recv_args): res_list = [None] * len(recv_args) for i, recv_arg in enumerate(recv_args): name, dowhat, arg = recv_arg if dowhat == 'None': continue if arg is None: res = getattr(getattr(self, name), dowhat)() elif isinstance(arg, tuple): res = getattr(getattr(self, name), dowhat)(*arg) else: res = getattr(getattr(self, name), dowhat)(arg) res_list[i] = res return res_list # inf loop, controlled / blocked by semaphore def run(self): # reset numpy seed import numpy; numpy.random.seed(self.local_seed) # set top process title setproctitle.setproctitle('HmapShmPoolWorker_%d'%self.index) try: while True: recv_args = self._recv_squence() # block and wait incoming req if not isinstance(recv_args, list): # not list object, switch to helper channel if recv_args == 0: self._set_done() self.add_targets(self._recv_squence()) self._set_done() elif recv_args == -1: self._set_done() # termination signal break else: assert False, "unknown command" continue else: # if list, execute target result = self.execute_target(recv_args) # return the results (self._set_done() is called inside) self._send_squence(result) except KeyboardInterrupt: # 'child KeyboardInterrupt: close unlink' self._demand_exit() self.__del__() except: print_red(traceback.format_exc(), flush=True) self._demand_exit() self.__del__() def _demand_exit(self): self.shared_memory_traffic_light.value = TRAFFIC_LIGHT_ERROR # CORE! the job is done, waiting for next one self.sem_pull.release() # block and wait incoming req def _recv_squence(self): self.sem_push.acquire() assert self.shared_memory_traffic_light.value == TRAFFIC_LIGHT_CHILD_BUSY bufLen = self.shared_memory_io_buffer_len_indicator.value recv_args = pickle.loads(self.shared_memory_io_buffer[:bufLen]) recv_args = reverse_opti_numpy_object(recv_args, shm=self.shared_memory_io_buffer) return recv_args # return results def _send_squence(self, send_obj): assert self.shared_memory_traffic_light.value == TRAFFIC_LIGHT_CHILD_BUSY # second prepare parameter send_obj, _ = opti_numpy_object(send_obj, shm=self.shared_memory_io_buffer) picked_obj = pickle.dumps(send_obj, protocol=pickle.HIGHEST_PROTOCOL) lenOfObj = len(picked_obj) assert lenOfObj <= REGULAR_BUF_SIZE, ('The non-numpy content size > 0.5MB, please check!', lenOfObj) self.shared_memory_io_buffer_len_indicator.value = lenOfObj self.shared_memory_io_buffer[:lenOfObj] = picked_obj # then light up the work flag, turn off the processed flag self.shared_memory_traffic_light.value = TRAFFIC_LIGHT_CHILD_FREE # CORE! the job is done, waiting for next one self.sem_pull.release() # set traffic IO flag def _set_done(self): self.shared_memory_traffic_light.value = TRAFFIC_LIGHT_CHILD_FREE # CORE! the job is done, waiting for next one self.sem_pull.release() class SmartPool(object): """ Main parallel runner / coodinator """ # setup and spawn workers def __init__(self, proc_num, fold, base_seed=None): self.proc_num = proc_num self.task_fold = fold self.base_seed = int(numpy.random.rand()*1e5) if base_seed is None else base_seed self.buf_size_limit = SHARE_BUF_SIZE # 10 MB for parameter buffer print_green('Linux multi-env using share memory') setproctitle.setproctitle('HmapRootProcess') self.shared_memory_io_buffer_handle = [shared_memory.SharedMemory(create=True, size=SHARE_BUF_SIZE) for _ in range(proc_num)] self.shared_memory_io_buffer_len_indicator = [RawValue(c_uint32, 0) for _ in range(proc_num)] self.shared_memory_traffic_light = [RawValue(c_uint32, False) for _ in range(proc_num)] # time to work flag self.last_time_response_handled = [True for _ in range(proc_num)] # time to work flag self.semaphore_push = [Semaphore(value=0) for _ in range(proc_num)] # time to work flag self.semaphore_pull = Semaphore(value=0) # time to work flag self.proc_pool = [SuperProc(cnt, smib, smiobli, smtl, SHARE_BUF_SIZE, self.base_seed, sem_push, self.semaphore_pull) for cnt, smib, smiobli, smtl, sem_push in zip(range(proc_num), self.shared_memory_io_buffer_handle, self.shared_memory_io_buffer_len_indicator, self.shared_memory_traffic_light, self.semaphore_push )] self.shared_memory_io_buffer = [shm.buf for shm in self.shared_memory_io_buffer_handle] self.t_profile = 0 for proc in self.proc_pool: # proc.daemon = True proc.start() # add class level targets in each worker def add_target(self, name, lam, args_list=None): lam_list = None if isinstance(lam, list): lam_list = lam # send command for workers to wait appending new target for j in range(self.proc_num): self._send_squence(send_obj=0, target_proc=j) self.notify_all_children() for j in range(self.proc_num): self._wait_done(j) for j in range(self.proc_num): tuple_list_to_be_send = [] for i in range(self.task_fold): name_fold = name + str(i) args = None if args_list is None else args_list[i + j*self.task_fold] if lam_list is not None: lam = lam_list[i + j*self.task_fold] tuple_list_to_be_send.append((name_fold, lam, args)) self._send_squence(send_obj=tuple_list_to_be_send, target_proc=j) self.notify_all_children() for j in range(self.proc_num): self._wait_done(j) # run class method in each worker def exec_target(self, name, dowhat, args_list = None, index_list = None, ensure_safe = False): if index_list is not None: for j in range(self.proc_num): tuple_list_to_be_send = [] for i in range(self.task_fold): n_thread = i + j*self.task_fold name_fold = name + str(i) if n_thread in index_list: args = None if args_list is None else args_list[index_list.index(n_thread)] tuple_list_to_be_send.append((name_fold, dowhat, args)) else: tuple_list_to_be_send.append((name_fold, 'None', 'None')) self._send_squence(send_obj=tuple_list_to_be_send, target_proc=j, ensure_safe=ensure_safe) self.semaphore_push[j].release() else: # if index_list is None: for j in range(self.proc_num): tuple_list_to_be_send = [] for i in range(self.task_fold): name_fold = name + str(i) args = None if args_list is None else args_list[i + j*self.task_fold] tuple_list_to_be_send.append((name_fold, dowhat, args)) self._send_squence(send_obj=tuple_list_to_be_send, target_proc=j, ensure_safe=ensure_safe) self.semaphore_push[j].release() res_sort = self._recv_squence_all() return res_sort # low-level send def _send_squence(self, send_obj, target_proc, ensure_safe=False): assert self.last_time_response_handled[target_proc] == True send_obj, shm_pointer = opti_numpy_object(send_obj, shm=self.shared_memory_io_buffer[target_proc]) picked_obj = pickle.dumps(send_obj, protocol=pickle.HIGHEST_PROTOCOL) lenOfObj = len(picked_obj) assert lenOfObj <= REGULAR_BUF_SIZE, ('The non-numpy content size > 0.5MB, please check!', lenOfObj) self.shared_memory_io_buffer_len_indicator[target_proc].value = lenOfObj self.shared_memory_io_buffer[target_proc][:lenOfObj] = picked_obj self.last_time_response_handled[target_proc] = False # then light up the work flag, turn off the processed flag if ensure_safe and shm_pointer != REGULAR_BUF_SIZE: send_obj = reverse_opti_numpy_object(send_obj, shm=self.shared_memory_io_buffer[target_proc]) self.shared_memory_traffic_light[target_proc].value = TRAFFIC_LIGHT_CHILD_BUSY # low-level recv def _recv_squence_all(self): res_sort = [None] * (self.proc_num*self.task_fold) not_ready = [True] * self.proc_num n_acq = 0 ready_n = 0 while True: self.semaphore_pull.acquire() # wait child process and OS coordination, it will take a moment n_acq += 1 for target_proc, not_r in enumerate(not_ready): if not not_r: continue # finish already if self.shared_memory_traffic_light[target_proc].value == TRAFFIC_LIGHT_CHILD_BUSY: continue # not ready if self.shared_memory_traffic_light[target_proc].value == TRAFFIC_LIGHT_ERROR: raise ChildExitException bufLen = self.shared_memory_io_buffer_len_indicator[target_proc].value recv_obj = pickle.loads(self.shared_memory_io_buffer[target_proc][:bufLen]) recv_obj = reverse_opti_numpy_object(recv_obj, shm=self.shared_memory_io_buffer[target_proc]) self.last_time_response_handled[target_proc] = True res_sort[target_proc*self.task_fold: (target_proc+1)*self.task_fold] = recv_obj not_ready[target_proc] = False ready_n += 1 if ready_n == self.proc_num: break for _ in range(self.proc_num-n_acq): self.semaphore_pull.acquire() # clear semaphore_pull return res_sort # low-level wait def _wait_done(self, target_proc): # used only in add_target self.semaphore_pull.acquire() if self.shared_memory_traffic_light[target_proc].value == TRAFFIC_LIGHT_ERROR: raise ChildExitException self.last_time_response_handled[target_proc] = True # let all workers know about incomming req def notify_all_children(self): for j in range(self.proc_num): self.semaphore_push[j].release() # notify all child process # exit and clean up carefully def party_over(self): self.__del__() # exit and clean up carefully def __del__(self): if hasattr(self, 'terminated'): return # traceback.print_exc() print_green('[shm_pool]: executing superpool del') try: for i in range(self.proc_num): self._send_squence(send_obj=-1, target_proc=i) self.notify_all_children() # print('[shm_pool]: self.notify_all_children()') except: pass # print('[shm_pool]: shm.close(); shm.unlink()') for shm in self.shared_memory_io_buffer_handle: try: shm.close(); shm.unlink() except: pass N_SEC_WAIT = 2 for i in range(N_SEC_WAIT): print_red('[shm_pool]: terminate in %d'%(N_SEC_WAIT-i));time.sleep(1) # kill shm_pool's process tree # print_red('[shm_pool]: kill_process_and_its_children(proc)') for proc in self.proc_pool: try: kill_process_and_its_children(proc) except Exception as e: pass # print_red('[shm_pool]: error occur when kill_process_and_its_children:\n', e) print_green('[shm_pool]: __del__ finish') self.terminated = True # To compat Windows, redirect to pipe solution if not platform.system()=="Linux": from UTIL.win_pool import SmartPool ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/sync_exp.py ================================================ import torch, time import pickle, os from UTIL.colorful import print亮红 from .tensor_ops import __hash__ from UTIL.exp_helper import singleton @singleton class SynWorker: def __init__(self, mod) -> None: self.sychronize_FILE_hashdict = 'TEMP/sychronize_hashdict' self.sychronize_FILE_cnt = 'TEMP/sychronize_cnt' self.mod = mod self.sychronize_internal_hashdict = {} self.sychronize_internal_cnt = {} self.follow_cnt = {} print亮红('warning, SynWorker init, mod is', mod) time.sleep(5) if mod == 'follow': with open(self.sychronize_FILE_hashdict, 'rb') as f: self.sychronize_internal_hashdict = pickle.load(f) with open(self.sychronize_FILE_cnt, 'rb') as f: self.sychronize_internal_cnt = pickle.load(f) else: try: os.remove(self.sychronize_FILE_hashdict) os.remove(self.sychronize_FILE_cnt) except: pass def dump_sychronize_data(self): if self.mod == 'follow': return with open(self.sychronize_FILE_hashdict, 'wb+') as f: pickle.dump(self.sychronize_internal_hashdict, f) with open(self.sychronize_FILE_cnt, 'wb+') as f: pickle.dump(self.sychronize_internal_cnt, f) def sychronize_experiment(self, key, data, reset_when_close=False): if self.mod == 'lead': hash_code = __hash__(data) if key not in self.sychronize_internal_hashdict: self.sychronize_internal_cnt[key] = 0 self.sychronize_internal_hashdict[key] = [ { 'hash_code':hash_code, 'data': data, } , ] else: self.sychronize_internal_hashdict[key].append({ 'hash_code':hash_code, 'data': data, }) self.sychronize_internal_cnt[key] += 1 if self.mod == 'follow': hash_code = __hash__(data) if key not in self.follow_cnt: self.follow_cnt[key] = 0 if hash_code != self.sychronize_internal_hashdict[key][self.follow_cnt[key]]['hash_code']: if not (torch.isclose(self.sychronize_internal_hashdict[key][self.follow_cnt[key]]['data'],data).all()) or (not isinstance(data, torch.Tensor)): print('%s: error expected hash: %s, get hash %s, data %s'%(key, self.sychronize_internal_hashdict[key][self.follow_cnt[key]]['hash_code'], hash_code, str(data) )) else: print('%s: error expected hash, but very very close (<1e-5)'%key) if reset_when_close: return data self.follow_cnt[key] += 1 ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/tensor_ops.py ================================================ import copy, json import numpy as np from functools import lru_cache try: import torch import torch.nn.functional as F except: print('warning, pytorch not installed!') print('警告, 没有安装pytorch, 所有pytorch相关函数不可用!') class torch(): Tensor = Exception from functools import wraps class ConfigCache(object): def __init__(self) -> None: super().__init__() self.init = False def read_cfg(self): from config import GlobalConfig if GlobalConfig.cfg_ready: self.device_ = GlobalConfig.device self.use_float64_ = GlobalConfig.use_float64 self.init = True @property def device(self): if not self.init: self.read_cfg() assert self.init, ('cuda_cfg not ready!') return self.device_ @property def use_float64(self): if not self.init: self.read_cfg() assert self.init, ('cuda_cfg not ready!') return self.use_float64_ cuda_cfg = ConfigCache() def pt_inf(): # if not cuda_cfg.init: cuda_cfg.read_cfg() pt_dtype = torch.float64 if cuda_cfg.use_float64 else torch.float32 return torch.tensor(np.inf, dtype=pt_dtype, device=cuda_cfg.device) def pt_nan(): # if not cuda_cfg.init: cuda_cfg.read_cfg() pt_dtype = torch.float64 if cuda_cfg.use_float64 else torch.float32 return torch.tensor(np.nan, dtype=pt_dtype, device=cuda_cfg.device) def vis_mat(mat): mat = mat.astype(np.float) mat = mat - mat.min() mat = mat / mat.max() import matplotlib.pyplot as plt import matplotlib.image as mpimg imgplot = plt.imshow(mat) plt.xlabel("cols, 2rd dim") plt.ylabel("lines, 1st dim") plt.show() """ improve torch.repeat / torch.expand function eg.1 x.shape = (4, 5, 6, 7); insert_dim = -1; n_times=666 y = repeat_at(x, insert_dim, n_times) y.shape = (4, 5, 6, 7, 666) eg.2 x.shape = (4, 5, 6, 7); insert_dim = +1; n_times=666 y = repeat_at(x, insert_dim, n_times) y.shape = (4, 666, 5, 6, 7) """ def repeat_at(tensor, insert_dim, n_times, copy_mem=False): if not isinstance(tensor, torch.Tensor): return np_repeat_at(tensor, insert_dim, n_times) tensor = tensor.unsqueeze(insert_dim) shape = list(tensor.shape) assert shape[insert_dim] == 1 shape[insert_dim] = n_times if copy_mem: tensor.repeat(*shape) return tensor.expand(*shape) def np_repeat_at(array, insert_dim, n_times): array = np.expand_dims(array, insert_dim) return array.repeat(axis=insert_dim, repeats=n_times) def copy_clone(x): if x is None: return None return ( x.clone() if hasattr(x, "clone") else x.copy() if hasattr(x, "copy") else copy.deepcopy(x) ) """ improve np.reshape and torch.view function If a dim is assigned with 0, it will keep its original dimension eg.1 x.shape = (4, 5, 6, 7); new_shape = [0, 0, -1] y = my_view(x, new_shape) y.shape = (4, 5, 6*7) eg.2 x.shape = (4, 5, 6, 7); new_shape = [-1, 0, 0] y = my_view(x, new_shape) y.shape = (4*5, 6, 7) eg.3 x.shape = (4, 5, 6); new_shape = [0, 0, -1, 3] y = my_view(x, new_shape) y.shape = [4, 5, 2, 3] eg.4 x.shape = (3, 4, 5, 6); new_shape = [0, 2, -1, 0, 0] y = my_view(x, new_shape) y.shape = [3, 2, 2, 5, 6] eg.5 x.shape = (32, 10, 24); new_shape = [32, 10, 24, 1] y = my_view(x, new_shape) y.shape = [32, 10, 24, 1] Error eg.1 x.shape = (3, 4, 5, 6); new_shape = [0, 2, 0, -1, 0] Error: 2(!=4) and -1 must stick together! Fix 1: new_shape = [0, 2, 2, 0, 0] Fix 2: new_shape = [0, 2, -1, 0, 0] Fix 3: new_shape = [0, -1, 2, 0, 0] After Fix: y.shape = [3, 2, 2, 5, 6] Error eg.2 x.shape = (3, 4, 5, 6); new_shape = [12, 0, -1] Error: 12(!=3) and -1 must stick together! Fix 1: new_shape = [12, 0, 0] Fix 2: new_shape = [12, -1, 6] Fix 3: new_shape = [12, -1, 0] Fix 4: new_shape = [-1, 0, 0] After Fix: y.shape = [12, 5, 6] """ def my_view(x, shape): # fill both way until meet -1 for i, dim in enumerate(shape): if dim == 0: shape[i] = x.shape[i] elif dim == -1: break elif i >= len(x.shape): break # prevent x.shape[i] out of range elif dim != x.shape[i]: break for i in range(len(shape)): if i >= len(x.shape): break # prevent x.shape[ni] out of range ni = -(i + 1) dim = shape[ni] if dim == 0: shape[ni] = x.shape[ni] elif dim == -1: break # print(shape) if isinstance(x, np.ndarray): return x.reshape(*shape) return x.view(*shape) def add_onehot_id_at_last_dim(x): if isinstance(x, np.ndarray): return np_add_onehot_id_at_last_dim(x) _hot_dim = x.shape[-2] _identity = torch.tile(torch.eye(_hot_dim, device=x.device), (*x.shape[:-2], 1, 1)) return torch.cat((x, _identity), -1) def np_add_onehot_id_at_last_dim(x): _hot_dim = x.shape[-2] _identity = np.tile(np.eye(_hot_dim), (*x.shape[:-2], 1, 1)) return np.concatenate((x, _identity), -1) # x. shape = (..., core_dim) # agent_ids.shape = (..., null) # output. shape = (..., core_dim+fixlen) def add_onehot_id_at_last_dim_fixlen(x, fixlen, agent_ids): if agent_ids is None: return add_onehot_id_at_last_dim(x) # if isinstance(x, np.ndarray): # return np_add_onehot_id_at_last_dim_fixlen(x, fixlen) # manually control output vector length # or # adjust output vector length according to -2 dim _identity = torch.eye(fixlen, device=x.device)[agent_ids] return torch.cat((x, _identity), -1) # def np_add_onehot_id_at_last_dim_fixlen(x, fixlen, agent_ids): # _identity = np.tile(np.eye(fixlen), (*x.shape[:-2], 1, 1)) # return np.concatenate((x, _identity[..., :x.shape[-2], :]), -1) """ numpy corresponding to torch.nn.functional.one_hot x is array, e.g. x = [4,2,3,1] n is int, e.g. n=5 >> np_one_hot( np.array([4,2,3,1]), n=5) np.array([ [0,0,0,0,1], [0,0,1,0,0], [0,0,0,1,0], [0,1,0,0,0], ]) """ def np_one_hot(x, n): return np.eye(n)[x] def add_obs_container_subject(container_emb, subject_emb, div): # for subject, add one-hot embedding of its group n_container = container_emb.shape[1] subject_belonging_info = np_one_hot(div, n_container) subject_out_emb = np.concatenate((subject_emb, subject_belonging_info), -1) # for container, add add multi-hot embedding of its subjects container_multihot = np.concatenate( [np.expand_dims((div == nth_container).astype(np.long), 1) for nth_container in range(n_container)], 1, ) container_out_emb = np.concatenate((container_emb, container_multihot), -1) return container_out_emb, subject_out_emb def MayGoWrong(f): @wraps(f) def decorated(*args, **kwargs): try: return f(*args, **kwargs) except: print('going wrong!') return f(*args, **kwargs) return decorated def dummy_decorator(f=None): if callable(f): @wraps(f) def decorated(*args, **kwargs): return f(*args, **kwargs) return decorated else: def actual_decorator(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper return actual_decorator """ Function decorate, Turning numpy array to torch.Tensor, then put it on the right GPU / CPU """ def Args2tensor(f): # if not cuda_cfg.init: cuda_cfg.read_cfg() def _2tensor(x): if isinstance(x, torch.Tensor): return x.to(cuda_cfg.device) elif isinstance(x, np.ndarray): if (not cuda_cfg.use_float64) and x.dtype == np.float64: x = x.astype(np.float32) if cuda_cfg.use_float64 and x.dtype == np.float32: x = x.astype(np.float64) return torch.from_numpy(x).to(cuda_cfg.device) elif isinstance(x, dict): y = {} for key in x: y[key] = _2tensor(x[key]) return y else: return x @wraps(f) def decorated(*args, **kwargs): for key in kwargs: kwargs[key] = _2tensor(kwargs[key]) return f(*(_2tensor(arg) for arg in args), **kwargs) return decorated def Return2numpy(f): def _2cpu2numpy(x): return ( None if x is None else x if not isinstance(x, torch.Tensor) else x.detach().cpu().numpy() if x.requires_grad else x.cpu().numpy() ) @wraps(f) def decorated(*args, **kwargs): ret_tuple = f(*args, **kwargs) if isinstance(ret_tuple, tuple): return (_2cpu2numpy(ret) for ret in ret_tuple) else: return _2cpu2numpy(ret_tuple) return decorated """ Function decorate, Turning numpy array to torch.Tensor, then put it on the right GPU / CPU, When returning, convert all torch.Tensor to numpy array """ def Args2tensor_Return2numpy(f): def _2tensor(x): if isinstance(x, torch.Tensor): return x.to(cuda_cfg.device) elif isinstance(x, np.ndarray) and x.dtype != 'object': if (not cuda_cfg.use_float64) and x.dtype == np.float64: x = x.astype(np.float32) if cuda_cfg.use_float64 and x.dtype == np.float32: x = x.astype(np.float64) return torch.from_numpy(x).to(cuda_cfg.device) elif isinstance(x, dict): y = {} for key in x: y[key] = _2tensor(x[key]) return y else: return x def _2cpu2numpy(x): return ( None if x is None else x if not isinstance(x, torch.Tensor) else x.detach().cpu().numpy() if x.requires_grad else x.cpu().numpy() ) @wraps(f) def decorated(*args, **kwargs): for key in kwargs: kwargs[key] = _2tensor(kwargs[key]) ret_tuple = f(*(_2tensor(arg) for arg in args), **kwargs) if not isinstance(ret_tuple, tuple): return _2cpu2numpy(ret_tuple) return (_2cpu2numpy(ret) for ret in ret_tuple) return decorated """ Turning torch.Tensor to numpy array, put it on CPU, """ def _2cpu2numpy(x): return ( None if x is None else x if not isinstance(x, torch.Tensor) else x.detach().cpu().numpy() if x.requires_grad else x.cpu().numpy() ) """ Convert torch.Tensor to numpy array. Turning numpy array to torch.Tensor, then put it on the right GPU / CPU. """ def _2tensor(x): # if not cuda_cfg.init: cuda_cfg.read_cfg() if isinstance(x, torch.Tensor): return x.to(cuda_cfg.device) elif isinstance(x, np.ndarray): if (not cuda_cfg.use_float64) and x.dtype == np.float64: x = x.astype(np.float32) if cuda_cfg.use_float64 and x.dtype == np.float32: x = x.astype(np.float64) return torch.from_numpy(x).to(cuda_cfg.device) elif isinstance(x, dict): y = {} for key in x: y[key] = _2tensor(x[key]) return y elif isinstance(x, torch.nn.Module): x.to(cuda_cfg.device) return x else: return x """ Stack an array whose elements with different len, pad empty place with with NaN """ def pad_vec_array(arr_list, max_len): # init to NaNs res = np.zeros(shape=(len(arr_list), max_len), dtype=np.double) + np.nan for i in range(len(arr_list)): if arr_list[i] is None: continue res[i, : len(arr_list[i])] = arr_list[i] return res def one_hot_with_nan_np(tensr, num_classes): tensr = tensr.copy() tensr[np.isnan(tensr)] = num_classes Res_1MoreCol = np_one_hot(tensr.astype(np.long), num_classes + 1) return Res_1MoreCol[..., :-1] def one_hot_with_nan(tensr, num_classes): if isinstance(tensr, np.ndarray): return one_hot_with_nan_np(tensr, num_classes) tensr = tensr.clone() tensr[torch.isnan(tensr)] = num_classes Res_1MoreCol = F.one_hot(tensr.long(), num_classes + 1) return Res_1MoreCol[..., :-1] def scatter_with_nan(tensr, num_classes, out_type="binary"): res = one_hot_with_nan(tensr, num_classes) res = res.sum(-2) if out_type == "bool": res = res != 0 return res """ Not used anymore """ def process_space(space): # starcraft 环境无须特殊处理 if not ("Box" in space["obs_space"] or "Discrete" in space["act_space"]): return space # 其他环境需要进行格式转换 import re obs_dim = int( re.findall( re.compile(r"Box[(]-inf, inf, [(](.*?)[,)]", re.S), space["obs_space"] )[0] ) print(space["obs_space"]) space_ = {} space_["obs_space"] = {} space_["act_space"] = {} space_["obs_space"]["state_shape"] = 8 space_["obs_space"]["obs_shape"] = obs_dim space_["act_space"]["n_actions"] = 8 space_["obs_space"] = str(space_["obs_space"]) space_["act_space"] = str(space_["act_space"]) return space_ """ Not used anymore """ class Policy_shift_observer(object): def __init__(self, act_range, act_num): self.act_range = act_range # 15 self.act_num = act_num # 3 self.act_cnt_array = np.zeros(shape=(act_num, act_range)) self.rate = None self.rate_history = None def new_sample(self, act): act_rec = act.shape[0] for act_index in range(self.act_num): for act_nth in range(self.act_range): self.act_cnt_array[act_index, act_nth] = torch.sum( (act[:, act_index] == act_nth).long() ) self.rate = self.act_cnt_array / act_rec if self.rate_history is None: self.rate_history = self.rate else: self.rate_history = self.rate_history * 0.9 + self.rate * 0.1 print("rate", self.rate) # conclusion: the action distribution is not reinforced because the rewards signal is too weak. """ Get the hash code string of an array, compatable for numpy array and torch.tensor """ def __hash__(x): import hashlib md5 = hashlib.md5() # ignore # if isinstance(x, str): # md5.update(x) # return md5.hexdigest() if hasattr(x, "cpu"): md5.update(x.detach().cpu().numpy().data.tobytes()) return md5.hexdigest() elif hasattr(x, "numpy"): md5.update(x.numpy().data.tobytes()) return md5.hexdigest() elif hasattr(x, "data"): md5.update(x.data.tobytes()) return md5.hexdigest() else: try: md5.update(x.encode("utf-8")) return md5.hexdigest() except: return str(x) def __hashm__(*args): import hashlib md5 = hashlib.md5() # ignore for arg in args: x = arg if hasattr(x, "cpu"): md5.update(x.detach().cpu().numpy().data.tobytes()) elif hasattr(x, "numpy"): md5.update(x.numpy().data.tobytes()) elif hasattr(x, "data"): md5.update(x.data.tobytes()) else: try: md5.update(x.encode("utf-8")) except: md5.update(str(x).encode("utf-8")) return md5.hexdigest() """ Get the hash code string of the pytorch network parameters eg. __hashn__(mlp_module.parameters()) """ def __hashn__(generator): import hashlib md5 = hashlib.md5() # ignore for arg in generator: x = arg.data if hasattr(x, "cpu"): md5.update(x.detach().cpu().numpy().data.tobytes()) elif hasattr(x, "numpy"): md5.update(x.numpy().data.tobytes()) elif hasattr(x, "data"): md5.update(x.data.tobytes()) else: try: md5.update(x.encode("utf-8")) except: md5.update(str(x).encode("utf-8")) return md5.hexdigest() """ numpy version of softmax """ def np_softmax(x, axis=None): # compute in log space for numerical stability return np.exp(x - logsumexp(x, axis=axis, keepdims=True)) """ numpy version of logsumexp """ def logsumexp(a, axis=None, keepdims=False, return_sign=False): a_max = np.amax(a, axis=axis, keepdims=True) if a_max.ndim > 0: a_max[~np.isfinite(a_max)] = 0 elif not np.isfinite(a_max): a_max = 0 tmp = np.exp(a - a_max) # suppress warnings about log of zero with np.errstate(divide="ignore"): s = np.sum(tmp, axis=axis, keepdims=keepdims) if return_sign: sgn = np.sign(s) s *= sgn # /= makes more sense but we need zero -> zero out = np.log(s) if not keepdims: a_max = np.squeeze(a_max, axis=axis) out += a_max if return_sign: return out, sgn else: return out """ 函数说明:在有限的、不均衡的多标签数据集中,按照预设的比例,取出尽可能多的样本 """ def sample_balance(x, y, n_class, weight=None): if weight is None: weight = torch.ones(n_class, device=x.device) else: weight = torch.Tensor(weight).to(x.device) n_instance = torch.zeros(n_class, device=x.device) indices = [None] * n_class for i in range(n_class): indices[i] = torch.where(y == i)[0] n_instance[i] = len(indices[i]) ratio = n_instance / weight bottle_neck = torch.argmin(n_instance / weight) r = ratio[bottle_neck] n_sample = (r * weight).long() # print(n_instance, n_sample) new_indices = [indices[i][torch.randperm(n_sample[i])] for i in range(n_class)] # print(new_indices) new_indices_ = torch.cat(new_indices) assert len(new_indices_) == sum(n_sample) return x[new_indices_], y[new_indices_] """ gather tensor with index, regarding all right hand dimensions as dimensions need to be gathered eg.1 src = torch.Tensor([[[ 0, 1, 2], [ 3, 4, 5]], [[ 6, 7, 8], [ 9, 10, 11]], [[12, 13, 14], [15, 16, 17]]]) index = torch.Tensor([[0], [1], [0]]) src.shape = (3, 2, 3) src.shape = (3, 1) >> res = gather_righthand(src,index) res.shape = (3, 1, 3) res= tensor([[[ 0., 1., 2.]], [[ 9., 10., 11.]], [[12., 13., 14.]]]) eg.2 src.shape = (64, 16, 8, 88, 888) index.shape = (64, 5) >> res = gather_righthand(src,index) res.shape = (64, 5, 8, 88, 888) eg.3 src.shape = (64, 16, 88, 888) index.shape = (64, 777) >> res = gather_righthand(src,index) res.shape = (64, 777, 88, 888) """ def gather_righthand(src, index, check=True): if not isinstance(src, torch.Tensor): return np_gather_righthand(src, index, check) index = index.long() i_dim = index.dim() s_dim = src.dim() t_dim = i_dim - 1 if check: assert s_dim >= i_dim assert index.max() <= src.shape[t_dim] - 1 if index.max() != src.shape[t_dim] - 1: print( "[gather_righthand] warning, index max value does not match src target dim" ) assert ( src.shape[t_dim] != index.shape[t_dim] ), "Do you really want to select %d item out of %d?? If so, please set check=False." % ( index.shape[t_dim], src.shape[t_dim], ) for d in range(0, t_dim): assert src.shape[d] == index.shape[d] index_new_shape = list(src.shape) index_new_shape[t_dim] = index.shape[t_dim] for _ in range(i_dim, s_dim): index = index.unsqueeze(-1) index_expand = index.expand(index_new_shape) # only this two line matters return torch.gather( src, dim=t_dim, index=index_expand ) # only this two line matters """ numpy version of 'gather_righthand' """ def np_gather_righthand(src, index, check=True): index = index.astype(np.long) dim = lambda x: len(x.shape) i_dim = dim(index) s_dim = dim(src) t_dim = i_dim - 1 if check: assert s_dim >= i_dim assert index.max() <= src.shape[t_dim] - 1, ("\tindex.max()=", index.max(), "\tsrc.shape[t_dim]-1=", src.shape[t_dim] - 1) if index.max() != src.shape[t_dim] - 1: print( "[gather_righthand] warning, index max value does not match src target dim" ) assert ( src.shape[t_dim] != index.shape[t_dim] ), "you really want to select %d item out of %d?" % ( index.shape[t_dim], src.shape[t_dim], ) for d in range(0, t_dim): assert src.shape[d] == index.shape[d] tile_shape = np.array(src.shape) # warning: careful when moving to pytorch tile_shape[: (t_dim + 1)] = 1 for _ in range(i_dim, s_dim): index = np.expand_dims(index, -1) index_expand = np.tile( index, tile_shape ) # index.expand(index_new_shape) # only this two line matters return np.take_along_axis(arr=src, indices=index_expand, axis=t_dim) # return torch.gather(src, dim=t_dim, index=index_expand) # only this two line matters """ reverse operation of 'gather_righthand' """ def scatter_righthand(scatter_into, src, index, check=True): index = index.long() i_dim = index.dim() s_dim = src.dim() t_dim = i_dim - 1 index_new_shape = list(src.shape) index_new_shape[t_dim] = index.shape[t_dim] for _ in range(i_dim, s_dim): index = index.unsqueeze(-1) index_expand = index.expand(index_new_shape) # only this two line matters return scatter_into.scatter(t_dim, index_expand, src) """ calculate distance matrix between two position vector array A and B, support 3d and 2d test >> A = np.array([ [0,0], [1,1],]) B = np.array([ [0,-1], [1, 0], [0, 1],]) distance_mat_between(A, B) == [ [ 1 1 1 ], [sqrt(5), 1, 1 ]] => shape = (2,3) """ def distance_mat_between(A, B): n_subject_a = A.shape[-2] # A (64, 3) n_subject_b = B.shape[-2] # B (28, 3) A = np.repeat(np.expand_dims(A, -2), n_subject_b, axis=-2) # =>(64, 28, 3) B = np.repeat(np.expand_dims(B, -2), n_subject_a, axis=-2) # =>(28, 64, 3) B = np.swapaxes(B, -2, -3) # =>(64, 28, 3) dis = A - B # =>(64, 100, 100, 2) dis = np.linalg.norm(dis, axis=-1) return dis """ calculate distance matrix for a position vector array A, support 3d and 2d """ def distance_matrix(A): n_subject = A.shape[-2] # is 2 A = np.repeat(np.expand_dims(A, -2), n_subject, axis=-2) # =>(64, 100, 100, 2) At = np.swapaxes(A, -2, -3) # =>(64, 100, 100, 2) dis = At - A # =>(64, 100, 100, 2) dis = np.linalg.norm(dis, axis=-1) return dis """ calculate delta matrix for a position vector array A """ def delta_matrix(A): n_subject = A.shape[-2] # is 2 A = np.repeat(np.expand_dims(A, -2), n_subject, axis=-2) # =>(64, 100, 100, 2) At = np.swapaxes(A, -2, -3) # =>(64, 100, 100, 2) delta = At - A # =>(64, 100, 100, 2) return delta def np_normalize_last_dim(mat): return mat / np.expand_dims(np.linalg.norm(mat, axis=-1) + 1e-16, axis=-1) def dir2rad_old(delta_pos): result = np.empty(delta_pos.shape[:-1], dtype=complex) result.real = delta_pos[..., 0] result.imag = delta_pos[..., 1] rad_angle = np.angle(result) # assert (dir2rad_new(delta_pos)==rad_angle).all() return rad_angle """ arctan2, but support any batch """ def dir2rad(delta_pos): return np.arctan2(delta_pos[..., 1], delta_pos[..., 0]) def dir3d_rad(delta_pos): assert delta_pos.shape[-1]==3 xy = delta_pos[..., :2] r1 = dir2rad(xy) xy_norm = np.linalg.norm(xy, axis=-1) r2 = dir2rad(np.stack((xy_norm, delta_pos[..., 2]),-1)) return np.stack((r1,r2), axis=-1) def reg_deg(deg): return (deg + 180) % 360 - 180 # make angles comparable def reg_deg_at(rad, ref): return reg_deg(rad-ref) + ref def reg_rad(rad): # it's OK to show "RuntimeWarning: invalid value encountered in remainder" return (rad + np.pi) % (2 * np.pi) - np.pi # make angles comparable def reg_rad_at(rad, ref): return reg_rad(rad-ref) + ref # the average of two angles (in rad) def avg_rad(rad1, rad2): return reg_rad_at(rad1, rad2)/2 + rad2/2 def zeros_like_except_dim(array, except_dim, n): shape_ = list(array.shape) shape_[except_dim] = n return torch.zeros(size=shape_, device=array.device, dtype=array.dtype) def pad_at_dim(array, dim, n): extra_n = n-array.shape[dim] padding = zeros_like_except_dim(array, except_dim=dim, n=extra_n) return torch.cat((array, padding), axis=dim) def stack_vec_with_padding(arr_list): _len = [arr.len() for arr in arr_list] max_len = max(_len) n_subject = arr_list.len() dtype = arr_list[0].dtype arr_np = np.zeros(shape=(n_subject, max_len), dtype=dtype) for i, arr in enumerate(arr_list): arr_np[i,:_len[i]] = arr return arr_np def objdump(obj): import pickle with open('objdump.tmp', 'wb+') as f: pickle.dump(obj, f) return def objload(): import pickle, os if not os.path.exists('objdump.tmp'): return with open('objdump.tmp', 'rb') as f: return pickle.load(f) def stack_padding(l, padding=np.nan): max_len = max([t.shape[0] for t in l]) shape_desired = (len(l), max_len, *(l[0].shape[1:])) target = np.zeros(shape=shape_desired, dtype=float) + padding for i in range(len(l)): target[i, :len(l[i])] = l[i] return target def n_item(tensor): n = 1 for d in tensor.shape: n = n*d return n def cat_last_dim(tensor, cat): assert tensor.shape[-1] >= cat.shape[-1] for i, s in enumerate(tensor.shape[:-1]): if s!=cat.shape[i]: cat = repeat_at(cat, i, s) cat = tensor[..., :cat.shape[-1]] * 0 + cat return torch.cat((tensor, cat), -1) """ input: [25, 25] output: [ range(0,25), range(25,50) ] """ # @lru_cache(10) def arrange_id(N_AGENT_EACH_TEAM): AGENT_ID_EACH_TEAM_cv = [] begin = 0 for _, n in enumerate(N_AGENT_EACH_TEAM): b = begin s = begin + n AGENT_ID_EACH_TEAM_cv.append(range(b, s)) begin = s return AGENT_ID_EACH_TEAM_cv """ convert digit to binary >> get_binary(3, 8) np.array([ 1,1,0,0, 0,0,0,0 ]) """ @lru_cache(500) def get_binary(n:int, n_bits:int, dtype=np.float32): arr = np.zeros(n_bits, dtype=dtype) pointer = 0 while True: arr[pointer] = int(n%2==1) n = n >> 1 pointer += 1 if n == 0: break return arr """ >> get_binary_n_rows( 3, 8) array([[0., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0.], [0., 1., 0., 0., 0., 0., 0., 0.]], dtype=float32) """ @lru_cache(10) def get_binary_n_rows(n_row, n_bit=8, dtype=np.float32): n_int = np.arange(n_row) arr = np.zeros((n_row, n_bit), dtype=dtype) for i in range(n_bit): arr[:, i] = (n_int%2==1).astype(int) n_int = n_int / 2 n_int = n_int.astype(np.int8) return arr ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/tensor_ops_c.pyx ================================================ import numpy as np cimport numpy as np cimport cython from cython.parallel import prange from libc.math cimport cos, atan2, abs np.import_array() ctypedef np.float64_t DTYPE_F64_t ctypedef np.float32_t DTYPE_t ctypedef fused DTYPE_int64_t: np.int64_t np.int32_t # to compat Windows ctypedef np.uint8_t DTYPE_bool_t PI = np.pi @cython.boundscheck(False) @cython.wraparound(False) @cython.nonecheck(False) def reg_rad_arr(DTYPE_F64_t[:] rad): cdef Py_ssize_t dim = rad.shape[0] cdef Py_ssize_t x, y result = np.zeros(dim, dtype=np.double) cdef DTYPE_F64_t[:] result_view = result cdef DTYPE_F64_t PI = np.pi for x in prange(dim, nogil=True): result_view[x] = (rad[x] + PI) % (2*PI) - PI return result # @cython.boundscheck(False) # @cython.wraparound(False) # @cython.nonecheck(False) # def roll_hisory( DTYPE_t[:,:,:,:] obs_feed_new, # DTYPE_t[:,:,:,:] prev_obs_feed, # DTYPE_bool_t[:,:,:] valid_mask, # DTYPE_int64_t[:,:] N_valid, # DTYPE_t[:,:,:,:] next_his_pool): # cdef Py_ssize_t vmax = N_valid.shape[0] # cdef Py_ssize_t wmax = N_valid.shape[1] # cdef Py_ssize_t max_obs_entity = obs_feed_new.shape[2] # cdef int n_v, th, a, t, k, pointer # for th in prange(vmax, nogil=True): # for a in range(wmax): # pointer = 0 # for k in range(max_obs_entity): # if valid_mask[th,a,k]: # next_his_pool[th, a, pointer] = obs_feed_new[th,a,k] # pointer = pointer + 1 # n_v = N_valid[th,a] # for k in range(n_v, max_obs_entity): # next_his_pool[th,a,k] = prev_obs_feed[th,a,k-n_v] # return np.asarray(next_his_pool) # https://cython.readthedocs.io/en/latest/src/userguide/source_files_and_compilation.html?highlight=wraparound#compiler-directives ''' binding (True): Python函数的内省, 查看函数内部的细节['__class__', '__delatrr__', ...., 'co_code', 'co_filename', 'co_argcount', 'co_varnames',...]等等 boundscheck (True): 数组的边界检查 wraparound (True) : 是否支持索引倒数,如a[-1] initializedcheck (True / False): ? nonecheck (False) always_allow_keywords (True / False) profile (False): Write hooks for Python profilers into the compiled C code. Default is False. infer_types (True / False): Infer types of untyped variables in function bodies. Default is None, indicating that only safe (semantically-unchanging) inferences are allowed. In particular, inferring integral types for variables used in arithmetic expressions is considered unsafe (due to possible overflow) and must be explicitly requested. ''' ================================================ FILE: PythonExample/hmp_minimal_modules/UTIL/win_pool.py ================================================ """ Author: Fu Qingxu, CASIA Description: Efficient parallel execting tool, Less efficient than the shm_pool (Linux only), but this one supports Windows as well as Linux. """ import numpy as np import time, psutil, platform, copy, multiprocessing from multiprocessing import Pipe from config import GlobalConfig from .hmp_daemon import kill_process_and_its_children from sys import stdout def print_red(*kw,**kargs): print("\033[1;31m",*kw,"\033[0m",**kargs) def print_green(*kw,**kargs): print("\033[1;32m",*kw,"\033[0m",**kargs) if not stdout.isatty(): print_green = print_red = print def child_process_load_config(machine_info): # This function is only needed in Windows: # Load json config or cmdline config to child process, from UTIL.config_args import prepare_args prepare_args(vb=False) # there is a 'machine_info' in GlobalConfig that must agree with main process GlobalConfig.machine_info = machine_info pass class SuperProc(multiprocessing.Process): def __init__(self, pipe, pipeHelp, index, base_seed, machine_info): super(SuperProc, self).__init__() self.p = pipe self.pH = pipeHelp self.local_seed = index + base_seed self.index = index self.machine_info = machine_info def automatic_generation(self, name, gen_fn, *arg): setattr(self, name, gen_fn(*arg)) def automatic_execution(self, name, dowhat, *arg): return getattr(getattr(self, name), dowhat)(*arg) def add_targets(self, new_target_args): for new_target_arg in new_target_args: name, gen_fn, arg = new_target_arg if arg is None: self.automatic_generation(name, gen_fn) elif isinstance(arg, tuple): self.automatic_generation(name, gen_fn, *arg) else: self.automatic_generation(name, gen_fn, arg) def execute_target(self, recv_args): res_list = [None] * len(recv_args) for i, recv_arg in enumerate(recv_args): name, dowhat, arg = recv_arg if arg is None: res = self.automatic_execution(name, dowhat) elif isinstance(arg, tuple): res = self.automatic_execution(name, dowhat, *arg) else: res = self.automatic_execution(name, dowhat, arg) res_list[i] = res return res_list def run(self): import numpy numpy.random.seed(self.local_seed) # linux uses fork, but windows does not, reload config for windows if not platform.system()=="Linux": child_process_load_config(self.machine_info) print('[win_pool]: process worker %d started'%self.index) try: while True: recv_args = self.p.recv() if not isinstance(recv_args, list): # not list object, switch to helper channel if recv_args == 0: self.add_targets(self.pH.recv()) elif recv_args == -1: print('Parallel worker exit') break # terminate else: assert False continue result = self.execute_target(recv_args) self.p.send(result) except KeyboardInterrupt: self.__del__() self.__del__() def __del__(self): self.p.close() self.pH.close() kill_process_and_its_children(psutil.Process()) class SmartPool(object): def __init__(self, proc_num, fold, base_seed=None): self.proc_num = proc_num self.task_fold = fold self.thisSide, self.thatSide = zip(*[Pipe() for _ in range(proc_num)]) self.thisSideHelp, self.thatSideHelp = zip(*[Pipe() for _ in range(proc_num)]) self.base_seed = int(np.random.rand()*1e5) if base_seed is None else base_seed print('[win_pool]: SmartPool base rand seed', self.base_seed) self.proc_pool = [SuperProc(pipe=p, pipeHelp=pH, index=cnt, base_seed=self.base_seed, machine_info=GlobalConfig.machine_info) for p, pH, cnt in zip(self.thatSide, self.thatSideHelp, range(proc_num))] for proc in self.proc_pool: proc.daemon = False proc.start() time.sleep(0.001) # shut down for i in range(proc_num): self.thatSide[i].close() self.thatSideHelp[i].close() # add an object of some class, initialize it proc_num=64 times, assigning them to proc_num/fold_num=16 python # processes def add_target(self, name, lam, args_list=None): lam_list = None if isinstance(lam, list): lam_list = lam for j in range(self.proc_num): tuple_list_to_be_send = [] for i in range(self.task_fold): name_fold = name + str(i) args = None if args_list is None else args_list[i + j*self.task_fold] if lam_list is not None: lam = lam_list[i + j*self.task_fold] tuple_list_to_be_send.append((name_fold, lam, args)) self.thisSide[j].send(0) # switch to helper channel self.thisSideHelp[j].send(tuple_list_to_be_send) # if there is index, execute one, otherwise execute all def exec_target(self, name, dowhat, args_list = None, index_list = None): if index_list is None: for j in range(self.proc_num): tuple_list_to_be_send = [] for i in range(self.task_fold): name_fold = name + str(i) args = None if args_list is None else args_list[i + j*self.task_fold] tuple_list_to_be_send.append((name_fold, dowhat, args)) self.thisSide[j].send(tuple_list_to_be_send) res_sort = [] for j in range(self.proc_num): res_sort.extend(self.thisSide[j].recv()) return res_sort else: tuple_List_List = [[None for _ in range(self.task_fold)] for _ in range(self.proc_num)] do_task_flag = [False for _ in range(self.proc_num)] do_task_fold = [[] for _ in range(self.proc_num)] result_recv_List_List = [[None for _ in range(self.task_fold)] for _ in range(self.proc_num)] # sort args for i, index in enumerate(index_list): which_proc = index // self.task_fold which_fold = index % self.task_fold name_fold = name + str(which_fold) args = None if args_list is None else args_list[i] tuple_List_List[which_proc][which_fold] = (name_fold, dowhat, args) do_task_flag[which_proc] = True # send args for which_proc in range(self.proc_num): tuple_send_buffer = [] for which_fold, item in enumerate(tuple_List_List[which_proc]): if item is None: continue tuple_send_buffer.append(item) do_task_fold[which_proc].append(which_fold) if do_task_flag[which_proc]: assert len(tuple_send_buffer) > 0 self.thisSide[which_proc].send(tuple_send_buffer) # receive returns for which_proc in range(self.proc_num): if not do_task_flag[which_proc]: continue recv_tmp = self.thisSide[which_proc].recv() for index, recv_item in enumerate(recv_tmp): which_fold = do_task_fold[which_proc][index] result_recv_List_List[which_proc][which_fold] = recv_item # sort returns res_sort = [None] * len(index_list) for i, index in enumerate(index_list): which_proc = index // self.task_fold which_fold = index % self.task_fold res_sort[i] = result_recv_List_List[which_proc][which_fold] return res_sort def party_over(self): self.__del__() def __del__(self): print('[win_pool]: executing superpool del') if hasattr(self, 'terminated'): print_red('[shm_pool]: already terminated, skipping ~') return print('[win_pool]: Sending exit command to workers ...') try: for i in range(self.proc_num): self.thisSide[i].send(-1) # switch to helper channel self.terminated = True except: pass print('[win_pool]: Closing pipe ...') for i in range(self.proc_num): try: self.thisSide[i].close() self.thisSideHelp[i].close() except: pass N_SEC_WAIT = 2 for i in range(N_SEC_WAIT): print_red('[win_pool]: terminate in %d'%(N_SEC_WAIT-i));time.sleep(1) # 杀死shm_pool创建的所有子进程,以及子进程的孙进程 print_red('[win_pool]: kill_process_and_its_children(proc)') for proc in self.proc_pool: try: kill_process_and_its_children(proc) except Exception as e: print_red('[win_pool]: error occur when kill_process_and_its_children:\n', e) print_green('[shm_pool]: __del__ finish') self.terminated = True ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/README.md ================================================ # Visual Hybrid Multi-Agent Playground (VHMAP 使用说明书) ## 面向场景和特点 面向场景: - 科研,尤其是多智能体强化学习领域 - 3D演示 - 娱乐 应用特点: - Python接口简化到极致 - 渲染在客户端,自动插帧,纵享丝滑帧率 - 服务端依赖少 - 占用服务端资源极少 - 基于ThreeJs,支持拖动,支持手机触屏 - 支持透视和投影两种视图的切换 - 支持回放 - 使用zlib压缩数据流,网络带宽需求小 ## 安装 ```shell pip install vhmap ``` ## 20行代码-展示VHMAP的简单、丝滑 实现下图,仅需要20行python代码(含初始化)
界面功能、操作介绍: - 鼠标右键平移,左键旋转,滚轮缩放 - 支持触屏,如果你笔记本或手机有触控屏幕 - 左上角显示渲染刷新率 - play fps:每秒播放多少关键帧(小于渲染刷新率,则插帧;大于渲染刷新率,则超出部分无效) - pause:暂停 - next frame:暂停并切换下一帧 - previous frame:暂停并切换上一帧 - loop to start:播放完所有数据,回到第一帧 - ppt step:以极慢极慢的速度播放一帧,方便录屏,按下后会卡顿几秒 - use orthcam:切换透视视图(物体近大远小)/投影视图(工程制图学过没), - P.S. 第一次切换到投影视图时,需要用鼠标滚轮放大画面 ```python from VISUALIZE.mcom import mcom import numpy as np class TestVhmap(): def render(self, t): if not hasattr(self, '可视化桥'): self.可视化桥 = mcom(path='TEMP/v2d_logger/', draw_mode='Threejs') self.可视化桥.初始化3D() self.可视化桥.设置样式('gray') self.可视化桥.其他几何体之旋转缩放和平移('box', 'BoxGeometry(1,1,1)', 0,0,0, 1,1,1, 0,0,0) x = np.cos(t); y=np.sin(t); z= np.cos(t)*np.sin(t) # 此帧的x,y,z坐标 self.可视化桥.发送几何体( 'box|2233|Red|0.1', # 填入 ‘形状|几何体之ID标识|颜色|大小’即可 x, y, z, ro_x=0, ro_y=0, ro_z=np.sin(t), # 三维位置+欧拉旋转变换,六自由度 track_n_frame=20) # 显示历史20帧留下的轨迹 self.可视化桥.结束关键帧() if __name__ == '__main__': x = TestVhmap() for step in range(1000): x.render(t=step/np.pi) import time; time.sleep(1000) # 启动后打开输出的url地址即可 # 这是第21行,已经写完了 :joy: ``` ## 50行代码-演示3维N体运动(低精度定步长) - 代码1详情请见:VISUALIZE/examples/nb.py 运行方法: ``` pip install vhmap python -m VISUALIZE.examples.nb ```
## 90行代码-使用dop853求解常微分方程演示三体、N体运动 - 代码2详情请见:VISUALIZE/examples/nb_3body_specials.py - 代码3详情请见:VISUALIZE/examples/nb_nbody_specials.py ``` pip install vhmap python -m VISUALIZE.examples.nb_3body_specials ```
感谢 Xiaoming LI and Shijun LIAO, Shanghai Jiaotong University, China 的三体初始值: https://numericaltank.sjtu.edu.cn/three-body/three-body-movies.htm ## 如何回放 VHMAP在接收到数据后,会自动地在 TEMP/v2d_logger/ 路径下生成backup.dp数据文件,该文件可以用于回放。 警告:数据文件会在下一次运行时被新的日志文件覆盖,必要时请手动备份! ``` python -m VISUALIZE.threejs_replay -f TEMP/v2d_logger/backup.dp.gz -p 8085 ``` 其中-f后面的是回放文件的路径,-p接端口号例如8085,之后打开 http://localhost:8085 即可。 ## API-中文 引入 ```python from VISUALIZE.mcom import mcom ``` ### 初始化 ```python 可视化桥 = mcom(path='TEMP/v2d_logger/', draw_mode='Threejs') 可视化桥.初始化3D() ``` ### 设置样式 ```python 可视化桥.设置样式('star') # 布置星空 可视化桥.设置样式('grid') # 布置2维网格 可视化桥.设置样式('grid3d') # 布置3维网格 可视化桥.设置样式('earth') # 在场景中放一个地球 可视化桥.设置样式('background', color='White') # 注意不可以省略参数键值'color=' !可用颜色(JS颜色,支持Hex颜色)参考 https://www.w3schools.com/colors/colors_names.asp # 如果label要使用中文字符,需要设置字体,否则字符会变成问号'?' # 如果label要使用中文字符,而且需要换行,则还需要额外设置行距 fontLineHeight 可视化桥.设置样式('font', fontPath='/examples/fonts/ttf/simhei.ttf', fontLineHeight=1500) 可视化桥.设置样式('skybox', path='/wget/shabby.jpg') # 设置天空盒子,注意不可以省略参数键值'path=' 可视化桥.设置样式('skybox6side', # 设置天空盒子,注意不可以省略参数键值 !! posx='/wget/mars_textures/mars_posx.jpg', negx='/wget/mars_textures/mars_negx.jpg', posy='/wget/mars_textures/mars_posy.jpg', negy='/wget/mars_textures/mars_negy.jpg', posz='/wget/mars_textures/mars_posz.jpg', negz='/wget/mars_textures/mars_negz.jpg', ) ``` ### 声明几何体 ```python # declare geo 'oct1', init with OctahedronGeometry, then (1)rotate & (2)scale & (3)translate 可视化桥.其他几何体之旋转缩放和平移('oct1', 'OctahedronGeometry(1,0)', 0,0,0, 1,1,1, 0,0,0) # 八面体 # 需要换成其他几何体,请把'OctahedronGeometry(1,0)'替换,参考网址 https://threejs.org/docs/index.html?q=Geometry 可视化桥.其他几何体之旋转缩放和平移('any_name_you_want', 'TorusGeometry(10,3,16,100)', 0,0,0, 1,1,1, 0,0,0) # 甜甜圈 # declare geo 'ball' 可视化桥.其他几何体之旋转缩放和平移('ball', 'SphereGeometry(1)', 0,0,0, 1,1,1, 0,0,0) # 球体 # declare geo 'box' 可视化桥.其他几何体之旋转缩放和平移('box', 'BoxGeometry(1,1,1)', 0,0,0, 1,1,1, 0,0,0) # 长方体 # declare geo 'Plane', 使用fbx模型,路径为/VISUALIZE/threejsmod/examples/files/plane.fbx 可视化桥.其他几何体之旋转缩放和平移('Plane', 'fbx=/examples/files/plane.fbx', -np.pi/2, 0, np.pi/2, 1,1,1, 0,0,0) # 八面体 ``` ### 发送几何体,可用颜色(JS颜色,支持Hex颜色)参考 https://www.w3schools.com/colors/colors_names.asp ```python # 注意不可以省略参数键值 x=1; y=2; z=3 可视化桥.发送几何体( 'ball|8848|MidnightBlue|0.5', # 填入核心参量: “已声明的形状|几何体的唯一ID标识|颜色|整体大小” x, y, z, # 三维位置,3/6dof ro_x=0, ro_y=0, ro_z=0, # 欧拉旋转变换,3/6dof # ro_order='XYZ', # (测试中,勿使用)欧拉旋转顺序,详情见 https://threejs.org/docs/index.html?q=object#api/en/math/Euler opacity=1, # 透明度,1为不透明 renderOrder=0, # 渲染顺序。合理使用,能解决透明物体异常遮蔽的情况 label='', # 显示标签,空白不显示,用'\n'换行 label_color='White', # 标签颜色 # label_offset=np.array([0,2,2]), # 标签与物体之间的相对位置,实验选项,测试中,勿使用 # label_size=0.5, # 测试中,勿使用 track_n_frame=3, # 是否显示轨迹(0代表否),轨迹由最新的track_n_frame次位置连接而成 track_tension=0.1, # 轨迹曲线的平滑度,0为不平滑,推荐设置0不平滑 track_color='Green', # 轨迹的颜色显示,输入js颜色名或者hex值均可 ) ``` ```python # 高级文本颜色,一句话添加不同颜色文字,支持换行,支持半透明背景色(测试中) n_blue=1 n_red=2 who_is_winning = 'Blue(MARL AI) is leading' if n_blue>n_red else 'Red(Script AI) is leading' 可视化桥.发送几何体('tower2|1004|Gray|0.2', 0, 0, 1, ro_x=0, ro_y=0, ro_z=0, # 文本的背景色 label_bgcolor='GhostWhite', # 带不同颜色的文本区 label='Blue(MARL AI)Agents Remain: %d\nRed(Script AI)Agents Remain: %d \n%s'%(n_blue, n_red, who_is_winning), # 默认颜色 label_color='DarkGreen', # 透明度 opacity=0 ) ``` 其中的“renderOrder”选项比较难以理解,如果没有显示异常,则设置为0,或者干脆删除该键值(默认0)。 用它解决的问题是简单的: ``` If: 一个(透明)物体A 被 一个透明物体B遮挡,但A部分或全部不可见 Then: 增加B的renderOrder,或者减小A的renderOrder(取值范围0~127) 此外,label标签的渲染顺序renderOrder是128,任意全透明物体的渲染顺序renderOrder是256 ``` ### 发送曲线 ```python # 画一条(0,0,0) -> (1,1,0) -> (2,2,0) -> (3,3,0) 的线 # 注意不可以省略参数键值!! 可视化桥.发送线条( 'simple|3999|MidnightBlue|0.004', # 填入核心参量: “simple|线条的唯一ID标识|颜色|整体大小” x_arr=np.array([0, 1, 2, 3]), # 曲线的x坐标列表 y_arr=np.array([0, 1, 2, 3]), # 曲线的y坐标列表 z_arr=np.array([0, 0, 0, 0]), # 曲线的z坐标列表 tension=0, # 曲线的平滑度,0为不平滑,推荐不平滑 opacity=1, # 透明度,1为不透明,不稳定仍然在测试中 ) # fat 型线条,支持调节宽度、虚线、透明度等,但是不稳定仍然在测试中 # 注意不可以省略参数键值!! 可视化桥.发送线条( 'fat|3999|MidnightBlue|0.004', # 填入核心参量: “fat|线条的唯一ID标识|颜色|整体大小” x_arr=np.array([0, 1, 2, 3]), # 曲线的x坐标列表 y_arr=np.array([0, 1, 2, 3]), # 曲线的y坐标列表 z_arr=np.array([0, 0, 0, 0]), # 曲线的z坐标列表 dashScale=20, # 此数越大,单位长度上的虚线切割越多 dashSize=1, # 虚线切割之实线 gapSize=1, # 虚线切割之实线间隔 tension=0, # 曲线的平滑度,0为不平滑,推荐不平滑 opacity=1, # 透明度,1为不透明 ) ``` ### 发射光束(从几何体src到几何体dst) ```python # 注意不可以省略参数键值!! 可视化桥.发射光束( 'beam', # 有 beam 和 lightning 两种选择 src=index_ID, # 发射者的几何体的唯一ID标识 dst=index_ID2, # 接收者的几何体的唯一ID标识 dur=0.5, # 光束持续时间,单位秒,绝对时间,不受播放fps的影响 size=0.03, # 光束粗细 color='DeepSkyBlue' # 光束颜色 ) ``` 终结这一帧(并开始下一帧) ```python self.可视化桥.结束关键帧() ``` ### 测试中-添加贴图 ```python 可视化桥.advanced_geometry_material('ball', map='/examples/planets/images/earthmap1k.jpg', bumpMap='/examples/planets/images/earthmap1k.jpg', bumpScale = 0.05, specularMap='/examples/images/earthmap1k.jpg', specular='Gray' ) ``` ## API-Eng In fact, this project is developed in Eng API, but I do not have time to write document. The api alignment can be found in mcom.py: ``` 别名对齐 = [ ('初始化3D', 'v2d_init'), ('设置样式', 'set_style'), ('形状之旋转缩放和平移','geometry_rotate_scale_translate'), ('其他几何体之旋转缩放和平移','advanced_geometry_rotate_scale_translate'), ('发送几何体','v2dx'), ('结束关键帧','v2d_show'), ('发送线条','line3d'), ('发射光束','flash'), ] ``` ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/__init__.py ================================================ ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/color.html ================================================
颜色 英文代码 形像颜色 HEX格式 RGB格式
  #330000
  #000033
  LightPink 浅粉红 #FFB6C1 255,182,193
  Pink 粉红 #FFC0CB 255,192,203
  Crimson 猩红 #DC143C 220,20,60
  LavenderBlush 脸红的淡紫色 #FFF0F5 255,240,245
  PaleVioletRed 苍白的紫罗兰红色 #DB7093 219,112,147
  HotPink 热情的粉红 #FF69B4 255,105,180
  DeepPink 深粉色 #FF1493 255,20,147
  MediumVioletRed 适中的紫罗兰红色 #C71585 199,21,133
  Orchid 兰花的紫色 #DA70D6 218,112,214
  Thistle #D8BFD8 216,191,216
  plum 李子 #DDA0DD 221,160,221
  Violet 紫罗兰 #EE82EE 238,130,238
  Magenta 洋红 #FF00FF 255,0,255
  Fuchsia 灯笼海棠(紫红色) #FF00FF 255,0,255
  DarkMagenta 深洋红色 #8B008B 139,0,139
  Purple 紫色 #800080 128,0,128
  MediumOrchid 适中的兰花紫 #BA55D3 186,85,211
  DarkVoilet 深紫罗兰色 #9400D3 148,0,211
  DarkOrchid 深兰花紫 #9932CC 153,50,204
  Indigo 靛青 #4B0082 75,0,130
  BlueViolet 深紫罗兰的蓝色 #8A2BE2 138,43,226
  MediumPurple 适中的紫色 #9370DB 147,112,219
  MediumSlateBlue 适中的板岩暗蓝灰色 #7B68EE 123,104,238
  SlateBlue 板岩暗蓝灰色 #6A5ACD 106,90,205
  DarkSlateBlue 深岩暗蓝灰色 #483D8B 72,61,139
  Lavender 熏衣草花的淡紫色 #E6E6FA 230,230,250
  GhostWhite 幽灵的白色 #F8F8FF 248,248,255
  Blue 纯蓝 #0000FF 0,0,255
  MediumBlue 适中的蓝色 #0000CD 0,0,205
  MidnightBlue 午夜的蓝色 #191970 25,25,112
  DarkBlue 深蓝色 #00008B 0,0,139
  Navy 海军蓝 #000080 0,0,128
  RoyalBlue 皇军蓝 #4169E1 65,105,225
  CornflowerBlue 矢车菊的蓝色 #6495ED 100,149,237
  LightSteelBlue 淡钢蓝 #B0C4DE 176,196,222
  LightSlateGray 浅石板灰 #778899 119,136,153
  SlateGray 石板灰 #708090 112,128,144
  DoderBlue 道奇蓝 #1E90FF 30,144,255
  AliceBlue 爱丽丝蓝 #F0F8FF 240,248,255
  SteelBlue 钢蓝 #4682B4 70,130,180
  LightSkyBlue 淡蓝色 #87CEFA 135,206,250
  SkyBlue 天蓝色 #87CEEB 135,206,235
  DeepSkyBlue 深天蓝 #00BFFF 0,191,255
  LightBLue 淡蓝 #ADD8E6 173,216,230
  PowDerBlue 火药蓝 #B0E0E6 176,224,230
  CadetBlue 军校蓝 #5F9EA0 95,158,160
  Azure 蔚蓝色 #F0FFFF 240,255,255
  LightCyan 淡青色 #E1FFFF 225,255,255
  PaleTurquoise 苍白的绿宝石 #AFEEEE 175,238,238
  Cyan 青色 #00FFFF 0,255,255
  Aqua 水绿色 #00FFFF 0,255,255
  DarkTurquoise 深绿宝石 #00CED1 0,206,209
  DarkSlateGray 深石板灰 #2F4F4F 47,79,79
  DarkCyan 深青色 #008B8B 0,139,139
  Teal 水鸭色 #008080 0,128,128
  MediumTurquoise 适中的绿宝石 #48D1CC 72,209,204
  LightSeaGreen 浅海洋绿 #20B2AA 32,178,170
  Turquoise 绿宝石 #40E0D0 64,224,208
  Auqamarin 绿玉\碧绿色 #7FFFAA 127,255,170
  MediumAquamarine 适中的碧绿色 #00FA9A 0,250,154
  MediumSpringGreen 适中的春天的绿色 #F5FFFA 245,255,250
  MintCream 薄荷奶油 #00FF7F 0,255,127
  SpringGreen 春天的绿色 #3CB371 60,179,113
  SeaGreen 海洋绿 #2E8B57 46,139,87
  Honeydew 蜂蜜 #F0FFF0 240,255,240
  LightGreen 淡绿色 #90EE90 144,238,144
  PaleGreen 苍白的绿色 #98FB98 152,251,152
  DarkSeaGreen 深海洋绿 #8FBC8F 143,188,143
  LimeGreen 酸橙绿 #32CD32 50,205,50
  Lime 酸橙色 #00FF00 0,255,0
  ForestGreen 森林绿 #228B22 34,139,34
  Green 纯绿 #008000 0,128,0
  DarkGreen 深绿色 #006400 0,100,0
  Chartreuse 查特酒绿 #7FFF00 127,255,0
  LawnGreen 草坪绿 #7CFC00 124,252,0
  GreenYellow 绿黄色 #ADFF2F 173,255,47
  OliveDrab 橄榄土褐色 #556B2F 85,107,47
  Beige 米色(浅褐色) #6B8E23 107,142,35
  LightGoldenrodYellow 浅秋麒麟黄 #FAFAD2 250,250,210
  Ivory 象牙 #FFFFF0 255,255,240
  LightYellow 浅黄色 #FFFFE0 255,255,224
  Yellow 纯黄 #FFFF00 255,255,0
  Olive 橄榄 #808000 128,128,0
  DarkKhaki 深卡其布 #BDB76B 189,183,107
  LemonChiffon 柠檬薄纱 #FFFACD 255,250,205
  PaleGodenrod 灰秋麒麟 #EEE8AA 238,232,170
  Khaki 卡其布 #F0E68C 240,230,140
  Gold #FFD700 255,215,0
  Cornislk 玉米色 #FFF8DC 255,248,220
  GoldEnrod 秋麒麟 #DAA520 218,165,32
  FloralWhite 花的白色 #FFFAF0 255,250,240
  OldLace 老饰带 #FDF5E6 253,245,230
  Wheat 小麦色 #F5DEB3 245,222,179
  Moccasin 鹿皮鞋 #FFE4B5 255,228,181
  Orange 橙色 #FFA500 255,165,0
  PapayaWhip 番木瓜 #FFEFD5 255,239,213
  BlanchedAlmond 漂白的杏仁 #FFEBCD 255,235,205
  NavajoWhite Navajo白 #FFDEAD 255,222,173
  AntiqueWhite 古代的白色 #FAEBD7 250,235,215
  Tan 晒黑 #D2B48C 210,180,140
  BrulyWood 结实的树 #DEB887 222,184,135
  Bisque (浓汤)乳脂,番茄等 #FFE4C4 255,228,196
  DarkOrange 深橙色 #FF8C00 255,140,0
  Linen 亚麻布 #FAF0E6 250,240,230
  Peru 秘鲁 #CD853F 205,133,63
  PeachPuff 桃色 #FFDAB9 255,218,185
  SandyBrown 沙棕色 #F4A460 244,164,96
  Chocolate 巧克力 #D2691E 210,105,30
  SaddleBrown 马鞍棕色 #8B4513 139,69,19
  SeaShell 海贝壳 #FFF5EE 255,245,238
  Sienna 黄土赭色 #A0522D 160,82,45
  LightSalmon 浅鲜肉(鲑鱼)色 #FFA07A 255,160,122
  Coral 珊瑚 #FF7F50 255,127,80
  OrangeRed 橙红色 #FF4500 255,69,0
  DarkSalmon 深鲜肉(鲑鱼)色 #E9967A 233,150,122
  Tomato 番茄 #FF6347 255,99,71
  MistyRose 薄雾玫瑰 #FFE4E1 255,228,225
  Salmon 鲜肉(鲑鱼)色 #FA8072 250,128,114
  Snow #FFFAFA 255,250,250
  LightCoral 淡珊瑚色 #F08080 240,128,128
  RosyBrown 玫瑰棕色 #BC8F8F 188,143,143
  IndianRed 印度红 #CD5C5C 205,92,92
  Red 纯红 #FF0000 255,0,0
  Brown 棕色 #A52A2A 165,42,42
  FireBrick 耐火砖 #B22222 178,34,34
  DarkRed 深红色 #8B0000 139,0,0
  Maroon 栗色 #800000 128,0,0
  White 纯白 #FFFFFF 255,255,255
  WhiteSmoke 白烟 #F5F5F5 245,245,245
  Gainsboro Gainsboro #DCDCDC 220,220,220
  LightGrey 浅灰色 #D3D3D3 211,211,211
  Silver 银白色 #C0C0C0 192,192,192
  DarkGray 深灰色 #A9A9A9 169,169,169
  Gray 灰色 #808080 128,128,128
  DimGray 暗淡的灰色 #696969 105,105,105
  Black 纯黑 #000000 0,0,0
================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/mcom.py ================================================ import os, copy, atexit, time, gzip, threading, setproctitle import numpy as np from multiprocessing import Process from UTIL.colorful import * from UTIL.network import get_host_ip, find_free_port from .mcom_def import fn_names, align_names, find_where_to_log class mcom(): """ 2D/3D visualizer interface The Design Principle: Under No Circumstance should this program interrupt the main program! args: draw_mode: ('Web', 'Native', 'Img', 'Threejs') rapid_flush: flush data instantly. set 'False' if you'd like your SSD to survive longer digit: the precision of float number. Choose from -1 (auto), 4, 8, 16 tag: give a name for debugging when multiple mcom object is used resume_mod: resume previous session resume_file: resume from which file image_path: if draw_mode=='Img', where to save image figsize: if draw_mode=='Img', determine the size of the figure, default is (12, 6) rec_exclude: if draw_mode=='Img', blacklist some vars """ def __init__(self, path=None, digit=-1, rapid_flush=True, draw_mode="Img", tag='default', resume_mod=False, **kargs): self.draw_mode = draw_mode self.rapid_flush = rapid_flush self.path = path self.digit = digit self.tag = tag self.resume_mod = resume_mod self.kargs = kargs if self.kargs is None: self.kargs = {} self.flow_cnt = 0 if draw_mode in ['Web', 'Native', 'Img', 'Threejs']: self.draw_process = True self.init_draw_subprocess() if draw_mode in ['Web', 'Native', 'Img']: self.init_2d_kernel() else: print亮红('[mcom.py]: Draw process off! No plot will be done') self.draw_process = False atexit.register(lambda: self.__del__()) def init_draw_subprocess(self): port = find_free_port() print红('[mcom.py]: draw process active!') self.draw_tcp_port = ('localhost', port) self.kargs.update({ 'draw_mode': self.draw_mode, 'draw_udp_port': self.draw_tcp_port, 'port': self.draw_tcp_port, 'backup_file': self.path + '/backup.dp.gz' }) DP = DrawProcess if self.draw_mode != 'Threejs' else DrawProcessThreejs self.draw_proc = DP(**self.kargs) self.draw_proc.start() from UTIL.network import QueueOnTcpClient self.draw_tcp_client = QueueOnTcpClient('localhost:%d'%port) def init_2d_kernel(self): if self.resume_mod: if "resume_file" in self.kargs: # if resume_file is specified, use it self.starting_file = self.kargs["resume_file"] else: # otherwise find previous log path _, _, self.current_buffer_index = find_where_to_log(self.path) self.starting_file = self.path + '/mcom_buffer_%d____starting_session.txt' % (self.current_buffer_index-1) # open the previous file, transfer previous data self.file_handle = open(self.starting_file, 'r', encoding = "utf-8") for line in self.file_handle.readlines(): self.draw_tcp_client.send_str(line) self.file_handle.close() print蓝('previous data transfered') # open this file again with append mode self.file_handle = open(self.starting_file, 'a+', encoding = "utf-8") else: _, _, self.current_buffer_index = find_where_to_log(self.path) self.starting_file = self.path + '/mcom_buffer_%d____starting_session.txt' % (self.current_buffer_index) print蓝('[mcom.py]: log file at:' + self.starting_file) self.file_handle = open(self.starting_file, 'w+', encoding = "utf-8") # on the end of the program def __del__(self): if hasattr(self,'_deleted_'): return # avoid exit twice else: self._deleted_ = True # avoid exit twice # print红('[mcom.py]: mcom exiting! tag: %s'%self.tag) if hasattr(self, 'file_handle') and self.file_handle is not None: end_file_flag = ('>500: self.file_handle.flush() self.flow_cnt = 0 else: self.flow_cnt += 1 return def rec_init(self, color='k'): str_tmp = '>>rec_init(\'%s\')\n' % color self.send(str_tmp) def rec_show(self): self.send('>>rec_show\n') def rec_end(self): self.send('>>rec_end\n') def rec_save(self): self.send('>>rec_save\n') def rec_end_hold(self): self.send('>>rec_end_hold\n') def rec_clear(self, name): str_tmp = '>>rec_clear("%s")\n' % (name) self.send(str_tmp) def rec(self, value, name): value = float(value) if self.digit == -1: str_tmp = '>>rec(%.16g,"%s")\n' % (value, name) elif self.digit == 16: str_tmp = '>>rec(%.16e,"%s")\n' % (value, name) elif self.digit == 8: str_tmp = '>>rec(%.8e,"%s")\n' % (value, name) elif self.digit == 4: str_tmp = '>>rec(%.4e,"%s")\n' % (value, name) self.send(str_tmp) def other_cmd(self, func_name, *args, **kargs): strlist = ['>>', func_name, '('] for _i_ in range(len(args)): if isinstance(args[_i_], np.ndarray): strlist = self._process_ndarray(args[_i_], strlist) else: strlist = self._process_scalar(args[_i_], strlist) if len(kargs)>0: for _key_ in kargs: if isinstance(kargs[_key_], np.ndarray): strlist = self._process_ndarray(kargs[_key_], strlist, _key_) else: strlist = self._process_scalar(kargs[_key_], strlist, _key_) if strlist[len(strlist) - 1] == "(": strlist.append(")\n") else: strlist[len(strlist) - 1] = ")\n" # 把逗号换成后括号 self.send(''.join(strlist)) def _process_scalar(self, arg, strlist,key=None): if key is not None: strlist += '%s='%key if isinstance(arg, int): strlist.append("%d" % arg) strlist.append(",") elif isinstance(arg, float): if self.digit == -1: strlist.append("%.16g" % arg) elif self.digit == 16: strlist.append("%.16e" % arg) elif self.digit == 8: strlist.append("%.8e" % arg) elif self.digit == 4: strlist.append("%.4e" % arg) strlist.append(",") elif isinstance(arg, str): assert '$' not in arg strlist.extend(["\'", arg.replace('\n', '$'), "\'", ","]) elif isinstance(arg, list): strlist.append(str(arg)) strlist.append(",") elif hasattr(arg, 'dtype') and np.issubdtype(arg.dtype, np.integer): strlist.append("%d" % arg) strlist.append(",") elif hasattr(arg, 'dtype') and np.issubdtype(arg.dtype, np.floating): if self.digit == -1: strlist.append("%.16g" % arg) elif self.digit == 16: strlist.append("%.16e" % arg) elif self.digit == 8: strlist.append("%.8e" % arg) elif self.digit == 4: strlist.append("%.4e" % arg) strlist.append(",") else: print('unknown input type | 输入的参数类型不能处理', arg.__class__) return strlist def _process_ndarray(self, args, strlist, key=None): if args.ndim == 1: if key is not None: strlist += '%s='%key d = len(args) sub_list = ["["] + ["%.3e,"%t if (i+1)!=d else "%.3e"%t for i, t in enumerate(args)] + ["]"] strlist += sub_list strlist.append(",") else: print红('[mcom]: input dimension > 1, unable to process | 输入数组的维度大于2维') return strlist for fn_name in fn_names: build_exec_cmd = 'def %s(self,*args,**kargs):\n self.other_cmd("%s", *args,**kargs)\n'%(fn_name, fn_name) exec(build_exec_cmd) for align, fn_name in align_names: build_exec_cmd = '%s = %s\n'%(align, fn_name) exec(build_exec_cmd) class DrawProcessThreejs(Process): def __init__(self, draw_udp_port, draw_mode, **kargs): super(DrawProcessThreejs, self).__init__() from UTIL.network import QueueOnTcpServer self.draw_mode = draw_mode self.draw_udp_port = draw_udp_port self.tcp_connection = QueueOnTcpServer(self.draw_udp_port) self.buffer_list = [] self.backup_file = kargs['backup_file'] self.allow_backup = False if self.backup_file is None else True if self.allow_backup: if os.path.exists(self.backup_file): print亮红('[mcom.py]: warning, purge previous 3D visual data!') try: os.remove(self.backup_file) except: pass self.tflush_buffer = [] self.client_tokens = {} def flush_backup(self): while True: time.sleep(20) if not os.path.exists(os.path.dirname(self.backup_file)): os.makedirs(os.path.dirname(self.backup_file)) # print('Flush backup') with gzip.open(self.backup_file, 'at') as f: f.writelines(self.tflush_buffer) self.tflush_buffer = [] # print('Flush backup done') def init_threejs(self): t = threading.Thread(target=self.run_flask, args=(find_free_port(),)) t.daemon = True t.start() if self.allow_backup: self.tflush = threading.Thread(target=self.flush_backup) self.tflush.daemon = True self.tflush.start() def run(self): setproctitle.setproctitle('ThreejsVisualWorker') self.init_threejs() try: from queue import Empty queue = self.tcp_connection.get_queue() self.tcp_connection.wait_connection() # after this, the queue begin to work while True: buff_list = [] buff_list.extend(queue.get(timeout=600)) for _ in range(queue.qsize()): buff_list.extend(queue.get(timeout=600)) self.run_handler(buff_list) except KeyboardInterrupt: self.__del__() self.__del__() def __del__(self): return def run_handler(self, new_buff_list): self.buffer_list.extend(new_buff_list) self.tflush_buffer.extend(new_buff_list) # too many, delete with fifo if len(self.buffer_list) > 1e9: # 当存储的指令超过十亿后,开始删除旧的 del self.buffer_list[:len(new_buff_list)] def run_flask(self, port): from flask import Flask, request, send_from_directory from waitress import serve from mimetypes import add_type add_type('application/javascript', '.js') add_type('text/css', '.css') app = Flask(__name__) dirname = os.path.dirname(__file__) + '/threejsmod' import zlib self.init_cmd_captured = False init_cmd_list = [] def init_cmd_capture_fn(tosend): for strx in tosend: if '>>v2d_show()\n'==strx: self.init_cmd_captured = True init_cmd_list.append(strx) if self.init_cmd_captured: break return @app.route("/up", methods=["POST"]) def up(): # 本次正常情况下,需要发送的数据 # dont send too much in one POST, might overload the network traffic if len(self.buffer_list)>35000: tosend = self.buffer_list[:30000] self.buffer_list = self.buffer_list[30000:] else: tosend = self.buffer_list self.buffer_list = [] # 处理断线重连的情况,断线重连时,会出现新的token token = request.data.decode('utf8') if token not in self.client_tokens: print('[mcom.py] Establishing new connection, token:', token) self.client_tokens[token] = 'connected' if (len(self.client_tokens)==0) or (not self.init_cmd_captured): # 尚未捕获初始化命令,或者第一次client buf = "".join(tosend) else: print('[mcom.py] If there are other tabs, please close them now.') buf = "".join(init_cmd_list + tosend) else: # 正常连接 buf = "".join(tosend) # 尝试捕获并保存初始化部分的命令 if not self.init_cmd_captured: init_cmd_capture_fn(tosend) # use zlib to compress output command, worked out like magic buf = bytes(buf, encoding='utf8') zlib_compress = zlib.compressobj() buf = zlib_compress.compress(buf) + zlib_compress.flush(zlib.Z_FINISH) return buf @app.route("/") def static_dirx(path): if path=='favicon.ico': return send_from_directory("%s/"%dirname, 'files/HMP.ico') return send_from_directory("%s/"%dirname, path) @app.route("/") def main_app(): with open('%s/examples/abc.html'%dirname, 'r', encoding = "utf-8") as f: buf = f.read() return buf print('\n--------------------------------') print('JS visualizer online: http://%s:%d'%(get_host_ip(), port)) print('JS visualizer online (localhost): http://localhost:%d'%(port)) print('--------------------------------') # app.run(host='0.0.0.0', port=port) serve(app, threads=8, ipv4=True, ipv6=True, listen='*:%d'%port) class DrawProcess(Process): def __init__(self, draw_udp_port, draw_mode, **kargs): from UTIL.network import QueueOnTcpServer super(DrawProcess, self).__init__() self.draw_mode = draw_mode self.draw_udp_port = draw_udp_port self.tcp_connection = QueueOnTcpServer(self.draw_udp_port) self.kwargs = kargs return def init_matplot_lib(self): if self.draw_mode in ['Web', 'Img']: import matplotlib matplotlib.use('Agg') # set the backend before importing pyplot import matplotlib.pyplot as plt self.gui_reflesh = lambda: time.sleep(1) # plt.pause(0.1) elif self.draw_mode == 'Native': import matplotlib # matplotlib.use('Agg') # set the backend before importing pyplot matplotlib.use('Qt5Agg') import matplotlib.pyplot as plt self.gui_reflesh = lambda: plt.pause(0.2) elif self.draw_mode == 'Threejs': assert False else: assert False from config import GlobalConfig logdir = GlobalConfig.logdir if not os.path.exists(logdir): os.makedirs(logdir) if self.draw_mode == 'Web': self.avail_port = find_free_port() my_http = MyHttp('%s/html.html'%logdir, self.avail_port) my_http.daemon = True my_http.start() self.libs_family = { "rec_disable_percentile_clamp": 'rec', "rec_enable_percentile_clamp": 'rec', 'rec_init': 'rec', 'rec': 'rec', 'rec_show': 'rec', 'v2d_init': 'v2d', 'v2dx':'v2d', 'v2d_show': 'v2d', 'v2d_pop':'v2d', 'v2d_line_object':'v2d', 'v2d_clear':'v2d', 'v2d_add_terrain': 'v2d', } self.libs_init_fns = { 'rec': self.rec_init_fn, 'v2d': self.v2d_init_fn, } def run(self): setproctitle.setproctitle('HmapPlotProcess') self.init_matplot_lib() try: # self.tcp_connection.set_handler(self.run_handler) from queue import Empty queue = self.tcp_connection.get_queue() # self.tcp_connection.set_handler(self.run_handler) self.tcp_connection.wait_connection() # after this, the queue begin to work while True: try: buff_list = [] buff_list.extend(queue.get(timeout=0.1)) for _ in range(queue.qsize()): buff_list.extend(queue.get(timeout=0.1)) self.run_handler(buff_list) except Empty: self.gui_reflesh() except KeyboardInterrupt: self.__del__() self.__del__() def run_handler(self, buff_list): while True: if len(buff_list) == 0: break buff = buff_list.pop(0) if (buff=='>>rec_show\n') and ('>>rec_show\n' in buff_list): continue # skip self.process_cmd(buff) # # print('成功处理指令:', buff) def __del__(self): self.tcp_connection.close() def process_cmd(self, cmd_str): if '>>' in cmd_str: cmd_str_ = cmd_str[2:].strip('\n') if ')' not in cmd_str_: cmd_str_ = cmd_str_+'()' prefix = self.get_cmd_lib(cmd_str_) if prefix is not None: eval('%s.%s'%(prefix, cmd_str_)) def get_cmd_lib(self, cmd): cmd_key = None func_name = cmd.split('(')[0] if func_name not in self.libs_family: print蓝('绘图函数不能处理:', cmd) return None family_name = self.libs_family[func_name] if self.libs_init_fns[family_name] is not None: self.libs_init_fns[family_name]() self.libs_init_fns[family_name] = None return 'self.%s'%family_name def rec_init_fn(self): from VISUALIZE.mcom_rec import rec_family self.rec = rec_family('r', self.draw_mode, **self.kwargs ) def v2d_init_fn(self): from VISUALIZE.mcom_v2d import v2d_family self.v2d = v2d_family(self.draw_mode) class MyHttp(Process): def __init__(self, path_to_html, avail_port): super(MyHttp, self).__init__() self.path_to_html = path_to_html self.avail_port = avail_port def run(self): from flask import Flask app = Flask(__name__) @app.route("/") def hello(): try: with open(self.path_to_html,'r') as f: html = f.read() except: html = "no plot yet please wait" return html app.run(port=self.avail_port) ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/mcom_def.py ================================================ import os fn_names = [ "v2dx", "flash", "plot", "figure", "hold", "box", "pause", "clf", "xlim", "ylim", "xlabel", "ylabel", "drawnow", "v2d", "v2d_init", "v3d_init", "v2L", "title", "plot3", "grid", "v3dx", "v2d_show", "v2d_pop", "v2d_line_object", "v2d_clear", "v2d_add_terrain", "set_style", "set_env", "use_geometry", "rec_disable_percentile_clamp", "rec_enable_percentile_clamp", "geometry_rotate_scale_translate", "test_function_terrain", 'line3d', 'advanced_geometry_rotate_scale_translate', "advanced_geometry_material", "skip" ] align_names = [ ('初始化3D', 'v2d_init'), ('设置样式', 'set_style'), ('形状之旋转缩放和平移','geometry_rotate_scale_translate'), ('其他几何体之旋转缩放和平移','advanced_geometry_rotate_scale_translate'), ('其他几何体之材质','advanced_geometry_material'), ('发送几何体','v2dx'), ('结束关键帧','v2d_show'), ('发送线条','line3d'), ('发射光束','flash'), ('空指令','skip'), ] def find_where_to_log(path): if not os.path.exists(path): os.makedirs(path) def find_previous_start_end(): start = None; end = None; t = 0 while True: is_body = os.path.exists(path + '/mcom_buffer_%d.txt' % t) is_head = os.path.exists(path + '/mcom_buffer_%d____starting_session.txt' % t) if is_head: start = t if is_head or is_body: end = t; t += 1 else: new = t return (start, end, new) prev_start, prev_end, new = find_previous_start_end() return prev_start, prev_end, new ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/mcom_rec.py ================================================ import os, fnmatch, matplotlib import numpy as np from functools import lru_cache from config import GlobalConfig # 设置matplotlib正常显示中文和负号 # matplotlib.rcParams['font.sans-serif']=['SimHei'] # 用黑体显示中文 # matplotlib.rcParams['axes.unicode_minus']=False # 正常显示负号 StandardPlotFig = 1 ComparePlotFig = 2 class rec_family(object): def __init__(self, colorC=None, draw_mode='Native', image_path=None, figsize=None, smooth_level=None, rec_exclude=[], **kwargs): # the list of vars' name (with order), string self.name_list = [] # the list of vars' value sequence (with order), float self.line_list = [] # the list of vars' time sequence (with order), float self.time_list = [] # the list of line plotting handles self.line_plot_handle = [] self.line_plot_handle2 = [] # subplot list self.subplots = {} self.subplots2 = {} # working figure handle self.working_figure_handle = None self.working_figure_handle2 = None # recent time self.current_time = None self.time_index = None self.smooth_level = smooth_level self.figsize_given = figsize self.colorC = 'k' if colorC is None else colorC self.Working_path = 'Testing-beta' self.image_num = -1 self.draw_mode = draw_mode self.rec_exclude = rec_exclude self.vis_95percent = True self.enable_percentile_clamp = True logdir = GlobalConfig.logdir self.plt = None if not os.path.exists(logdir): os.makedirs(logdir) if self.draw_mode == 'Web': import matplotlib.pyplot as plt, mpld3 self.html_to_write = '%s/html.html'%logdir self.plt = plt; self.mpld3 = mpld3 elif self.draw_mode =='Native': import matplotlib.pyplot as plt plt.ion() self.plt = plt elif self.draw_mode =='Img': matplotlib.use('Agg') import matplotlib.pyplot as plt self.plt = plt self.img_to_write = '%s/rec.jpg'%logdir self.img_to_write2 = '%s/rec.jpeg'%logdir if image_path is not None: self.img_to_write = image_path self.img_to_write2 = image_path+'.jpg' else: assert False def rec_disable_percentile_clamp(self): self.enable_percentile_clamp = False def rec_enable_percentile_clamp(self): self.enable_percentile_clamp = True def rec_init(self, colorC=None): if colorC is not None: self.colorC = colorC return @lru_cache(500) def match_exclude(self, name): for n in self.rec_exclude: if fnmatch.fnmatch(name, n): return True return False @lru_cache(500) def get_index(self, name): return self.name_list.index(name) def rec(self, var, name): if self.match_exclude(name): # if var is backlisted return if name in self.name_list: # if var is already known, skip pass else: # if var is new, prepare lists self.name_list.append(name) self.line_list.append([]) #新建一个列表 self.time_list.append([]) self.line_plot_handle.append(None) self.line_plot_handle2.append(None) # get the index of the var index = self.get_index(name) if name=='time': # special var: time self.current_time = var if self.time_index is None: self.time_index = index self.handle_all_missing_time() else: assert self.time_index == index else: # normal vars: if time is available, add it if self.time_index is not None: if len(self.line_list[index]) != len(self.time_list[index]): self.handle_missing_time(self.line_list[index], self.time_list[index]) self.time_list[index].append(self.current_time) # finally, add var value self.line_list[index].append(var) def handle_all_missing_time(self): for name in self.name_list: if name=='time': continue index = self.get_index(name) if len(self.line_list[index]) != len(self.time_list[index]): self.handle_missing_time(self.line_list[index], self.time_list[index]) def handle_missing_time(self, line_arr, time_arr): assert len(line_arr) > len(time_arr) for i in range(len(line_arr) - len(time_arr)): time_arr.append(self.current_time - i - 1) def get_figure_size(self, image_num): if self.figsize_given is None: expand_ratio = max((image_num - 10)/4, 1) return (12*expand_ratio, 6*expand_ratio) else: return self.figsize_given def rec_show(self): # the number of total subplots | 一共有多少条曲线 image_num = len(self.line_list) # 是否启动高级曲线绘制 draw_advance_fig = False for name in self.name_list: if 'of=' in name: draw_advance_fig = True if self.working_figure_handle is None: self.working_figure_handle = self.plt.figure(StandardPlotFig, figsize=self.get_figure_size(image_num), dpi=100) if self.draw_mode == 'Native': self.working_figure_handle.canvas.set_window_title(self.Working_path) self.plt.show() # default row=1 rows = 1 # check whether the time var exists 检查是否有时间轴,若有,做出修改 time_var_met = [False] # time_var_met is list because we need it to be mutable | 有时间轴 time_explicit = ('time' in self.name_list) if time_explicit: assert self.time_index == self.get_index('time') image_num_to_show = image_num - 1 else: image_num_to_show = image_num if image_num_to_show >= 3: rows = 2 #大与3张图,则放2行 if image_num_to_show > 8: rows = 3 #大与8张图,则放3行 if image_num_to_show > 12: rows = 4 #大与12张图,则放4行 cols = int(np.ceil(image_num/rows)) #根据行数求列数 if self.image_num!=image_num: # 需要刷新布局,所有已经绘制的图作废 self.subplots = {} self.working_figure_handle.clf() for q, handle in enumerate(self.line_plot_handle): self.line_plot_handle[q] = None # 需要刷新布局,所有已经绘制的图作废 if draw_advance_fig: self.subplots2 = {} if self.working_figure_handle2 is not None: self.working_figure_handle2.clf() for q, handle in enumerate(self.line_plot_handle2): self.line_plot_handle2[q] = None self.image_num = image_num self.plot_classic(image_num, rows, time_explicit, time_var_met, self.time_index, cols) # draw advanced figure, current disabled if draw_advance_fig: self.plot_advanced() # now end, output images if self.draw_mode == 'Web': content = self.mpld3.fig_to_html(self.working_figure_handle) with open(self.html_to_write, 'w+') as f: f.write(content) return elif self.draw_mode == 'Native': self.plt.pause(0.01) return elif self.draw_mode == 'Img': if self.working_figure_handle is not None: self.working_figure_handle.tight_layout() self.working_figure_handle.savefig(self.img_to_write) if self.working_figure_handle2 is not None: self.working_figure_handle2.tight_layout() self.working_figure_handle2.savefig(self.img_to_write2) def smooth(self, data, sm_lv=1): if sm_lv > 1: y = np.ones(sm_lv)*1.0/sm_lv d = np.convolve(y, data, 'same')#"same") else: d = data return np.array(d) def plot_advanced(self): #画重叠曲线,如果有的话 group_name = [] group_member = [] time_explicit = ('time' in self.name_list) image_num = len(self.line_list) for index in range(image_num): if 'of=' not in self.name_list[index]: #没有的直接跳过 continue # 找出组别 g_name_ = self.name_list[index].split('of=')[0] if g_name_ in group_name: i = group_name.index(g_name_) group_member[i].append(index) else: group_name.append(g_name_) group_member.append([index]) num_group = len(group_name) image_num_multi = num_group rows = 1 if image_num_multi >= 3: rows = 2 #大与3张图,则放2行 if image_num_multi > 8: rows = 3 #大与8张图,则放3行 if image_num_multi > 12: rows = 4 #大与12张图,则放4行 cols = int(np.ceil(image_num_multi/rows))#根据行数求列数 if self.working_figure_handle2 is None: self.working_figure_handle2 = self.plt.figure(ComparePlotFig, figsize=self.get_figure_size(num_group), dpi=100) if self.draw_mode == 'Native': self.working_figure_handle2.canvas.set_window_title('Working-Comp') self.plt.show() for i in range(num_group): subplot_index = i+1 subplot_name = '%d,%d,%d'%(rows,cols,subplot_index) if subplot_name in self.subplots2: target_subplot = self.subplots2[subplot_name] else: target_subplot = self.working_figure_handle2.add_subplot(rows,cols,subplot_index) self.subplots2[subplot_name] = target_subplot tar_true_name=group_name[i] num_member = len(group_member[i]) _xdata_min_ = np.inf _xdata_max_ = -np.inf _ydata_min_ = np.inf _ydata_max_ = -np.inf for j in range(num_member): index = group_member[i][j] _ydata_ = np.array(self.line_list[index], dtype=np.double) if self.smooth_level is not None: _ydata_ = self.smooth(_ydata_, sm_lv=self.smooth_level) # 如果有时间数据,把x轴绑定时间 if time_explicit: _xdata_ = np.array(self.time_list[index], dtype=np.double) else: _xdata_ = np.arange(len(self.line_list[index]), dtype=np.double) limx1 = _xdata_.min() if limx1 < _xdata_min_: _xdata_min_ = limx1 limx2 = _xdata_.max() if limx2 > _xdata_max_: _xdata_max_ = limx2 limy1 = _ydata_.min() if limy1 < _ydata_min_: _ydata_min_ = limy1 limy2 = _ydata_.max() if limy2 > _ydata_max_: _ydata_max_ = limy2 name_tmp = self.name_list[index] name_tmp = name_tmp.replace('=',' ') if (self.line_plot_handle2[index] is None): # 第一次绘制 if time_explicit: self.line_plot_handle2[index], = target_subplot.plot(_xdata_, _ydata_, lw=1,label=name_tmp) else: self.line_plot_handle2[index], = target_subplot.plot(_ydata_, lw=1, label=name_tmp) else: # 非第一次,则只需要更新数据即可 self.line_plot_handle2[index].set_data((_xdata_, _ydata_)) #标题 target_subplot.set_title(tar_true_name) target_subplot.set_xlabel('time') target_subplot.set_ylabel(tar_true_name) self.change_target_figure_lim(target_subplot, _xdata_min_, _xdata_max_, _ydata_min_, _ydata_max_) target_subplot.grid(visible=True) target_subplot.legend(loc='best') def plot_classic(self, image_num, rows, time_explicit, time_var_met, time_index, cols): for index in range(image_num): if time_explicit: if time_index == index: time_var_met[0] = True # skip time var continue # 有时间轴时,因为不绘制时间,所以少算一个subplot subplot_index = index if time_var_met[0] else index+1 subplot_name = '%d,%d,%d'%(rows,cols,subplot_index) if subplot_name in self.subplots: target_subplot = self.subplots[subplot_name] else: target_subplot = self.working_figure_handle.add_subplot(rows,cols,subplot_index) self.subplots[subplot_name] = target_subplot _ydata_ = np.array(self.line_list[index], dtype=np.double) # 如果有时间数据,把x轴绑定时间 if time_explicit: _xdata_ = np.array(self.time_list[index], dtype=np.double) else: _xdata_ = np.arange(len(self.line_list[index]), dtype=np.double) if (self.line_plot_handle[index] is None): # 第一次绘制 if time_explicit: self.line_plot_handle[index], = target_subplot.plot(_xdata_, _ydata_, lw=1,c=self.colorC) else: self.line_plot_handle[index], = target_subplot.plot(_ydata_, lw=1, c=self.colorC) else: # 后续绘制,更新数据 self.line_plot_handle[index].set_data((_xdata_, _ydata_)) if 'of=' in self.name_list[index]: #把等号替换成空格 name_tmp = self.name_list[index] name_tmp = name_tmp.replace('=',' ') target_subplot.set_title(name_tmp) target_subplot.set_xlabel('time') target_subplot.set_ylabel(name_tmp) target_subplot.grid(visible=True) else: target_subplot.set_title(self.name_list[index]) target_subplot.set_xlabel('time') target_subplot.set_ylabel(self.name_list[index]) target_subplot.grid(visible=True) _xdata_min_ = _xdata_.min() #target_subplot.dataLim.xmin _xdata_max_ = _xdata_.max() #target_subplot.dataLim.xmax _ydata_min_ = _ydata_.min() #min(self.line_list[index]) _ydata_max_ = _ydata_.max() #max(self.line_list[index]) if self.enable_percentile_clamp and len(_ydata_)>220 and self.vis_95percent: limy1 = np.percentile(_ydata_, 3, interpolation='midpoint') # 3% limy2 = np.percentile(_ydata_, 97, interpolation='midpoint') # 97% self.change_target_figure_lim(target_subplot, _xdata_min_, _xdata_max_, _ydata_min_, _ydata_max_) def change_target_figure_lim(self, target_subplot, limx1, limx2, limy1, limy2): if limy1!=limy2: meany = limy1/2 + limy2/2 limy1 = (limy1 - meany)*1.2+meany limy2 = (limy2 - meany)*1.2+meany target_subplot.set_ylim(limy1,limy2) if limx1 != limx2: meanx = limx1/2 + limx2/2 limx1 = (limx1 - meanx)*1.1+meanx limx2 = (limx2 - meanx)*1.1+meanx target_subplot.set_xlim(limx1,limx2) ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/mcom_replay.py ================================================ # import os # print(os.getcwd()) import os, sys, gzip import argparse from VISUALIZE.mcom import * # DEBUG_OOM = True class RecallProcessThreejs(Process): def __init__(self, file_path, port): super(RecallProcessThreejs, self).__init__() self.buffer_list = [] self.file_path = file_path self.port = port self.client_send_pointer = {} def init_threejs(self): import threading t = threading.Thread(target=self.run_flask, args=(self.port,)) t.daemon = True t.start() def __del__(self): pass def run(self): self.init_threejs() try: new_buff_list = [] with gzip.open(self.file_path, 'rt') as zip: try: for line in zip: new_buff_list.append(line) if len(new_buff_list) > 1e2: self.run_handler(new_buff_list) new_buff_list = [] except: print('File has bad ending! EOFError: Compressed file ended before the end-of-stream marker was reached!') print('存档的末尾是破碎的, 少量数据可能丢失了. 完整的部分已经读取完成.') self.run_handler(new_buff_list) new_buff_list = [] # if DEBUG_OOM: # for i in range(100): # print(i) # with gzip.open(self.file_path, 'rt') as zip: # try: # for line in zip: # if 'v2d_init' in line: continue # new_buff_list.append(line) # if len(new_buff_list) > 1e2: # self.run_handler(new_buff_list) # new_buff_list = [] # except: # print('File has bad ending! EOFError: Compressed file ended before the end-of-stream marker was reached!') # print('存档的末尾是破碎的, 少量数据可能丢失了. 完整的部分已经读取完成.') # self.run_handler(new_buff_list) # new_buff_list = [] while True: time.sleep(1000) except KeyboardInterrupt: self.__del__() self.__del__() def run_handler(self, new_buff_list): self.buffer_list.extend(new_buff_list) # too many, delete with fifo if len(self.buffer_list) > 1e9: # 当存储的指令超过十亿后,开始删除旧的 del self.buffer_list[:len(new_buff_list)] def run_flask(self, port): from flask import Flask, url_for, jsonify, request, send_from_directory, redirect from waitress import serve from mimetypes import add_type add_type('application/javascript', '.js') add_type('text/css', '.css') app = Flask(__name__) dirname = os.path.dirname(__file__) + '/threejsmod' import zlib @app.route("/up", methods=["POST"]) def upvote(): # dont send too much in one POST, might overload the network traffic token = request.data.decode('utf8') if token not in self.client_send_pointer: print('[mcom_replay.py] Establishing new connection, token:', token) current_pointer = 0 else: current_pointer = self.client_send_pointer[token] if len(self.buffer_list)-current_pointer>35000: tosend = self.buffer_list[current_pointer:current_pointer+30000] current_pointer = current_pointer+30000 else: tosend = self.buffer_list[current_pointer:] current_pointer = len(self.buffer_list) self.client_send_pointer[token] = current_pointer # use zlib to compress output command, worked out like magic buf = "".join(tosend) buf = bytes(buf, encoding='utf8') zlib_compress = zlib.compressobj() buf = zlib_compress.compress(buf) + zlib_compress.flush(zlib.Z_FINISH) return buf @app.route("/") def static_dirx(path): if path=='favicon.ico': return app.send_static_file('%s/files/favicon.ico'%dirname) return send_from_directory("%s/"%dirname, path) @app.route("/") def main_app(): with open('%s/examples/abc.html'%dirname, 'r', encoding = "utf-8") as f: buf = f.read() return buf print('\n--------------------------------') print('JS visualizer online: http://%s:%d'%(get_host_ip(), port)) print('JS visualizer online (localhost): http://localhost:%d'%(port)) print('--------------------------------') # app.run(host='0.0.0.0', port=port) serve(app, threads=8, ipv4=True, ipv6=True, listen='*:%d'%port) if __name__ == '__main__': parser = argparse.ArgumentParser(description='HMP') parser.add_argument('-p', '--path', help='directory of chosen file') args, unknown = parser.parse_known_args() if hasattr(args, 'path'): path = args.path else: assert False, (r"parser.add_argument('-p', '--path', help='The node name is?')") load_via_json = (hasattr(args, 'cfg') and args.cfg is not None) rp = RecallProcessThreejs(path) rp.daemon = True rp.start() rp.join() ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/mcom_rt.py ================================================ import os, copy, atexit, time, gzip, threading, zlib, asyncio import numpy as np from colorama import init from multiprocessing import Process from UTIL.colorful import * from UTIL.network import get_host_ip, find_free_port mcom_fn_list_define = [ "v2dx", "flash", "plot", "figure", "hold", "box", "pause", "clf", "xlim", "ylim", "xlabel", "ylabel", "drawnow", "v2d", "v2d_init", "v3d_init", "v2L", "title", "plot3", "grid", "v3dx", "v2d_show", "v2d_pop", "v2d_line_object", "v2d_clear", "v2d_add_terrain", "set_style", "set_env", "use_geometry", "geometry_rotate_scale_translate", "test_function_terrain", 'line3d', 'advanced_geometry_rotate_scale_translate', "advanced_geometry_material", "skip" ] 别名对齐 = [ ('初始化3D', 'v2d_init'), ('设置样式', 'set_style'), ('形状之旋转缩放和平移','geometry_rotate_scale_translate'), ('其他几何体之旋转缩放和平移','advanced_geometry_rotate_scale_translate'), ('其他几何体之材质','advanced_geometry_material'), ('发送几何体','v2dx'), ('结束关键帧','v2d_show'), ('发送线条','line3d'), ('发射光束','flash'), ('空指令','skip'), ] # The Design Principle: Under No Circumstance should this program Interrupt the main program! class mcom(): def __init__(self, path=None, digit=8, rapid_flush=True, draw_mode=False, tag='default', **kargs): # digit 默认8,可选4,16,越小程序负担越轻 (All data is float, you do not need anything else) # rapid_flush 当数据流不大时,及时倾倒文件缓存内容 (set 'False' if you'd like your SSD to survive longer) self.draw_mode = draw_mode self.path = path self.digit = digit self.tag = tag if kargs is None: kargs = {} if draw_mode in ['Web', 'Native', 'Img', 'Threejs']: self.draw_process = True port = find_free_port() print红('[mcom.py]: draw process active!') self.draw_tcp_port = ('localhost', port) kargs.update({ 'draw_mode': draw_mode, 'draw_udp_port': self.draw_tcp_port, 'port': self.draw_tcp_port, 'backup_file': self.path + '/backup.dp.gz' }) DP = DrawProcess if draw_mode != 'Threejs' else DrawProcessThreejs self.draw_proc = DP(**kargs) self.draw_proc.start() from UTIL.network import QueueOnTcpClient self.draw_tcp_client = QueueOnTcpClient('localhost:%d'%port) else: print亮红('[mcom.py]: Draw process off! No plot will be done') self.draw_process = False if not self.draw_mode=='Threejs': _, _, self.current_buffer_index = find_where_to_log(self.path) self.starting_file = self.path + '/mcom_buffer_%d____starting_session.txt' % (self.current_buffer_index) self.file_lines_cnt = 0 self.file_max_lines = 5e8 # limit file lines to avoid a very large file self.rapid_flush = rapid_flush self.flow_cnt = 0 print蓝('[mcom.py]: log file at:' + self.starting_file) self.current_file_handle = open(self.starting_file, 'w+', encoding = "utf-8") atexit.register(lambda: self.__del__()) # on the end of the program def __del__(self): if hasattr(self,'_deleted_'): return # avoid exit twice else: self._deleted_ = True # avoid exit twice # print红('[mcom.py]: mcom exiting! tag: %s'%self.tag) if hasattr(self, 'current_file_handle') and self.current_file_handle is not None: end_file_flag = ('>500: self.current_file_handle.flush() self.flow_cnt = 0 # step 3: check whether the file is too large, if so, move on to next file. if self.file_lines_cnt > self.file_max_lines: end_file_flag = ('>>rec_show\n') def rec_end(self): self.send('>>rec_end\n') def rec_save(self): self.send('>>rec_save\n') def rec_end_hold(self): self.send('>>rec_end_hold\n') def rec_clear(self, name): str_tmp = '>>rec_clear("%s")\n' % (name) self.send(str_tmp) def rec(self, value, name): value = float(value) if self.digit == 16: str_tmp = '>>rec(%.16e,"%s")\n' % (value, name) elif self.digit == 8: str_tmp = '>>rec(%.8e,"%s")\n' % (value, name) elif self.digit == 4: str_tmp = '>>rec(%.4e,"%s")\n' % (value, name) self.send(str_tmp) def 发送虚幻4数据流(self, x, y, z, pitch, yaw, roll): x = float(x) y = float(y) z = float(z) pitch = float(pitch) yaw = float(yaw) roll = float(roll) str_tmp = 'UE4>>(\"agent#1\",%.6e,%.6e,%.6e,%.6e,%.6e,%.6e)\n' % (x, y, z, pitch, yaw, roll) self.send(str_tmp) def 发送虚幻4数据流_多智能体(self, x_, y_, z_, pitch_, yaw_, roll_): str_list = ['UE4>>'] for x, y, z, pitch, yaw, roll in zip(x_, y_, z_, pitch_, yaw_, roll_): x = float(x) y = float(y) z = float(z) pitch = float(pitch) yaw = float(yaw) roll = float(roll) str_tmp = '(\"agent#1\",%.5e,%.5e,%.5e,%.5e,%.5e,%.5e)' % (x, y, z, pitch, yaw, roll) str_list.append(str_tmp) str_list.append(';') str_list.append('\n') cmd = ''.join(str_list) self.send(cmd) def other_cmd(self, func_name, *args, **kargs): # func_name = traceback.extract_stack()[-2][2] strlist = ['>>', func_name, '('] for _i_ in range(len(args)): if isinstance(args[_i_], np.ndarray): strlist = self._process_ndarray(args[_i_], strlist) else: strlist = self._process_scalar(args[_i_], strlist) if len(kargs)>0: for _key_ in kargs: if isinstance(kargs[_key_], np.ndarray): strlist = self._process_ndarray(kargs[_key_], strlist, _key_) else: strlist = self._process_scalar(kargs[_key_], strlist, _key_) if strlist[len(strlist) - 1] == "(": strlist.append(")\n") else: strlist[len(strlist) - 1] = ")\n" # 把逗号换成后括号 self.send(''.join(strlist)) def _process_scalar(self, arg, strlist,key=None): if key is not None: strlist += '%s='%key if isinstance(arg, int): strlist.append("%d" % arg) strlist.append(",") elif isinstance(arg, float): if self.digit == 16: strlist.append("%.16e" % arg) elif self.digit == 8: strlist.append("%.8e" % arg) elif self.digit == 4: strlist.append("%.4e" % arg) strlist.append(",") elif isinstance(arg, str): assert '$' not in arg strlist.extend(["\'", arg.replace('\n', '$'), "\'", ","]) elif isinstance(arg, list): strlist.append(str(arg)) strlist.append(",") elif hasattr(arg, 'dtype') and np.issubdtype(arg.dtype, np.integer): strlist.append("%d" % arg) strlist.append(",") elif hasattr(arg, 'dtype') and np.issubdtype(arg.dtype, np.floating): if self.digit == 16: strlist.append("%.16e" % arg) elif self.digit == 8: strlist.append("%.8e" % arg) elif self.digit == 4: strlist.append("%.4e" % arg) strlist.append(",") else: print('输入的参数类型不能处理',arg.__class__) return strlist def _process_ndarray(self, args, strlist, key=None): if args.ndim == 1: if key is not None: strlist += '%s='%key d = len(args) sub_list = ["["] + ["%.3e,"%t if (i+1)!=d else "%.3e"%t for i, t in enumerate(args)] + ["]"] strlist += sub_list strlist.append(",") elif args.ndim == 2: print红('[mcom]: 输入数组的维度大于1维, 目前处理不了。') else: print红('[mcom]: 输入数组的维度大于2维, 目前处理不了。') return strlist for fn_name in mcom_fn_list_define: build_exec_cmd = 'def %s(self,*args,**kargs):\n self.other_cmd("%s", *args,**kargs)\n'%(fn_name, fn_name) exec(build_exec_cmd) for 别名, fn_name in 别名对齐: build_exec_cmd = '%s = %s\n'%(别名, fn_name) exec(build_exec_cmd) def find_where_to_log(path): if not os.path.exists(path): os.makedirs(path) def find_previous_start_end(): start = None; end = None; t = 0 while True: is_body = os.path.exists(path + '/mcom_buffer_%d.txt' % t) is_head = os.path.exists(path + '/mcom_buffer_%d____starting_session.txt' % t) if is_head: start = t if is_head or is_body: end = t; t += 1 else: new = t return (start, end, new) prev_start, prev_end, new = find_previous_start_end() return prev_start, prev_end, new class DrawProcessThreejs(Process): def __init__(self, draw_udp_port, draw_mode, **kargs): super(DrawProcessThreejs, self).__init__() from UTIL.network import QueueOnTcpServer self.draw_mode = draw_mode self.draw_udp_port = draw_udp_port self.tcp_connection = QueueOnTcpServer(self.draw_udp_port) self.buffer_list = [] self.backup_file = kargs['backup_file'] self.allow_backup = False if self.backup_file is None else True if self.allow_backup: if os.path.exists(self.backup_file): print亮红('[mcom.py]: warning, purge previous 3D visual data!') try: os.remove(self.backup_file) except: pass self.tflush_buffer = [] self.client_tokens = {} def flush_backup(self): while True: time.sleep(20) if not os.path.exists(os.path.dirname(self.backup_file)): os.makedirs(os.path.dirname(self.backup_file)) # print('Flush backup') with gzip.open(self.backup_file, 'at') as f: f.writelines(self.tflush_buffer) self.tflush_buffer = [] # print('Flush backup done') def init_threejs(self): http_port = find_free_port() ws_port = 8765 # http_port+1 t = threading.Thread(target=self.run_flask, args=(http_port,)) t.daemon = True t.start() t2 = threading.Thread(target=self.run_ws, args=(ws_port,)) t2.daemon = True t2.start() time.sleep(2) if self.allow_backup: self.tflush = threading.Thread(target=self.flush_backup) self.tflush.daemon = True self.tflush.start() def run_ws(self, port): import asyncio import websockets self.connected_ws = None self.new_ws_connection_flag = False async def echo(websocket): self.connected_ws = websocket self.new_ws_connection_flag = True while True: try: # not supposed to receive anything, just to maintain connection await self.connected_ws.recv() except websockets.ConnectionClosed: print(f"Previous Websocket Terminated") self.connected_ws = None break async def run_ws(): async with websockets.serve(echo, "localhost", port): await asyncio.Future() # run forever self.init_cmd_captured = False init_cmd_list = [] def init_cmd_capture_fn(tosend): for strx in tosend: if '>>v2d_show()\n'==strx: self.init_cmd_captured = True init_cmd_list.append(strx) if self.init_cmd_captured: break return async def run_ws_main(): while True: await asyncio.sleep(0.01) if self.connected_ws is not None: # 本次正常情况下,需要发送的数据 # dont send too much in one POST, might overload the network traffic if len(self.buffer_list)>35000: tosend = self.buffer_list[:30000] self.buffer_list = self.buffer_list[30000:] else: tosend = self.buffer_list self.buffer_list = [] # 处理断线重连的情况,断线重连时,会出现新的token if self.new_ws_connection_flag: self.new_ws_connection_flag = False if (not self.init_cmd_captured): # 尚未捕获初始化命令,或者第一次client buf = "".join(tosend) else: print('[mcom.py] If there are other tabs, please close them now.') tosend = [""] buf = "".join(init_cmd_list + tosend) else: # 正常连接 buf = "".join(tosend) # 尝试捕获并保存初始化部分的命令 if not self.init_cmd_captured: init_cmd_capture_fn(tosend) # use zlib to compress output command, worked out like magic buf = bytes(buf, encoding='utf8') zlib_compress = zlib.compressobj() buf = zlib_compress.compress(buf) + zlib_compress.flush(zlib.Z_FINISH) print('await start') if not self.connected_ws.open: continue await self.connected_ws.send(buf) print('await done') async def main(): task1 = asyncio.create_task(run_ws()) task2 = asyncio.create_task(run_ws_main()) await task1 await task2 asyncio.run(main()) def run_flask(self, port): import json from flask import Flask, request, send_from_directory from waitress import serve from mimetypes import add_type add_type('application/javascript', '.js') add_type('text/css', '.css') app = Flask(__name__) dirname = os.path.dirname(__file__) + '/threejsmod' import zlib self.init_cmd_captured = False init_cmd_list = [] @app.route("/") def static_dirx(path): if path=='favicon.ico': return send_from_directory("%s/"%dirname, 'files/HMP.ico') return send_from_directory("%s/"%dirname, path) @app.route("/") def main_app(): with open('%s/examples/abc_rt.html'%dirname, 'r', encoding = "utf-8") as f: buf = f.read() return buf print('\n--------------------------------') print('JS visualizer online: http://%s:%d'%(get_host_ip(), port)) print('JS visualizer online (localhost): http://localhost:%d'%(port)) print('--------------------------------') # app.run(host='0.0.0.0', port=port) serve(app, threads=8, ipv4=True, ipv6=True, listen='*:%d'%port) def run(self): self.init_threejs() try: from queue import Empty queue = self.tcp_connection.get_queue() self.tcp_connection.wait_connection() # after this, the queue begin to work while True: buff_list = [] buff_list.extend(queue.get(timeout=600)) for _ in range(queue.qsize()): buff_list.extend(queue.get(timeout=600)) self.run_handler(buff_list) except KeyboardInterrupt: self.__del__() self.__del__() def __del__(self): return def run_handler(self, new_buff_list): self.buffer_list.extend(new_buff_list) self.tflush_buffer.extend(new_buff_list) # too many, delete with fifo if len(self.buffer_list) > 1e9: # 当存储的指令超过十亿后,开始删除旧的 del self.buffer_list[:len(new_buff_list)] class DrawProcess(Process): def __init__(self, draw_udp_port, draw_mode, **kargs): from UTIL.network import QueueOnTcpServer super(DrawProcess, self).__init__() self.draw_mode = draw_mode self.draw_udp_port = draw_udp_port self.tcp_connection = QueueOnTcpServer(self.draw_udp_port) self.image_path = kargs['image_path'] if 'image_path' in kargs else None return def init_matplot_lib(self): if self.draw_mode in ['Web', 'Img']: import matplotlib matplotlib.use('Agg') # set the backend before importing pyplot import matplotlib.pyplot as plt self.gui_reflesh = lambda: time.sleep(1) # plt.pause(0.1) elif self.draw_mode == 'Native': import matplotlib # matplotlib.use('Agg') # set the backend before importing pyplot matplotlib.use('Qt5Agg') import matplotlib.pyplot as plt self.gui_reflesh = lambda: plt.pause(0.2) elif self.draw_mode == 'Threejs': assert False else: assert False from config import GlobalConfig logdir = GlobalConfig.logdir if not os.path.exists(logdir): os.makedirs(logdir) if self.draw_mode == 'Web': self.avail_port = find_free_port() my_http = MyHttp('%s/html.html'%logdir, self.avail_port) my_http.daemon = True my_http.start() self.libs_family = { 'rec_init': 'rec', 'rec': 'rec', 'rec_show': 'rec', 'v2d_init': 'v2d', 'v2dx':'v2d', 'v2d_show': 'v2d', 'v2d_pop':'v2d', 'v2d_line_object':'v2d', 'v2d_clear':'v2d', 'v2d_add_terrain': 'v2d', } self.libs_init_fns = { 'rec': self.rec_init_fn, 'v2d': self.v2d_init_fn, } def run(self): self.init_matplot_lib() try: # self.tcp_connection.set_handler(self.run_handler) from queue import Empty queue = self.tcp_connection.get_queue() # self.tcp_connection.set_handler(self.run_handler) self.tcp_connection.wait_connection() # after this, the queue begin to work while True: try: buff_list = [] buff_list.extend(queue.get(timeout=0.1)) for _ in range(queue.qsize()): buff_list.extend(queue.get(timeout=0.1)) self.run_handler(buff_list) except Empty: self.gui_reflesh() except KeyboardInterrupt: self.__del__() self.__del__() def run_handler(self, buff_list): while True: if len(buff_list) == 0: break buff = buff_list.pop(0) if (buff=='>>rec_show\n') and ('>>rec_show\n' in buff_list): continue # skip self.process_cmd(buff) # # print('成功处理指令:', buff) def __del__(self): self.tcp_connection.close() def process_cmd(self, cmd_str): if '>>' in cmd_str: cmd_str_ = cmd_str[2:].strip('\n') if ')' not in cmd_str_: cmd_str_ = cmd_str_+'()' prefix = self.get_cmd_lib(cmd_str_) if prefix is not None: eval('%s.%s'%(prefix, cmd_str_)) def get_cmd_lib(self, cmd): cmd_key = None func_name = cmd.split('(')[0] if func_name not in self.libs_family: print蓝('绘图函数不能处理:', cmd) return None family_name = self.libs_family[func_name] if self.libs_init_fns[family_name] is not None: self.libs_init_fns[family_name]() self.libs_init_fns[family_name] = None return 'self.%s'%family_name def rec_init_fn(self): from VISUALIZE.mcom_rec import rec_family self.rec = rec_family('r', self.draw_mode, self.image_path) def v2d_init_fn(self): from VISUALIZE.mcom_v2d import v2d_family self.v2d = v2d_family(self.draw_mode) class MyHttp(Process): def __init__(self, path_to_html, avail_port): super(MyHttp, self).__init__() self.path_to_html = path_to_html self.avail_port = avail_port def run(self): from flask import Flask app = Flask(__name__) @app.route("/") def hello(): try: with open(self.path_to_html,'r') as f: html = f.read() except: html = "no plot yet please wait" return html app.run(port=self.avail_port) ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/mcom_test.py ================================================ def validate_path(): import os, sys dir_name = os.path.dirname(__file__) root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') os.chdir(root_dir_assume) sys.path.append(root_dir_assume) validate_path() # validate path so you can run from base directory from VISUALIZE.mcom import mcom mcv = mcom( path='./TEMP', # path to generate log draw_mode='Img', # draw mode resume_mod=True, # resume from previous session # figsize=(48,12), # manual fig size resume_file='ZHECKPOINT/RVE-drone2-ppoma-run1/logger/mcom_buffer_0____starting_session.txt', # pick up from a specific session txt image_path='./temp2.jpg', # target image directory smooth_level=40, # smooth line level # rec_exclude=["r*", "n*", # "*0.00*", # "*0.01*", # "*0.04*", # "*0.06*", # "*0.11*", # "*0.18*", # "*0.25*", # ], ) input('wait complete') ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/mcom_v2d.py ================================================ import numpy as np import matplotlib import matplotlib.pyplot as plt import time plt.ion() V2dPlotFig = 3 # import matplotlib.rcsetup as rcsetup; print(rcsetup.all_backends) ''' ['GTK3Agg', 'GTK3Cairo', 'MacOSX', 'nbAgg', 'Qt4Agg', 'Qt4Cairo', 'Qt5Agg', 'Qt5Cairo', 'TkAgg', 'TkCairo', 'WebAgg', 'WX', 'WXAgg', 'WXCairo', 'agg', 'cairo', 'pdf', 'pgf', 'ps', 'svg', 'template'] ''' class v2d_family(): def __init__(self, draw_mode) -> None: self.v_name_list = {} self.style_list = {} self.trival_line_list = {} self.trival_line_pair = [] self.v2d_fig_handle = None # self.v_name_list = { 'char_index':{ 'color': ?,'pos':(x,y),'shape':(xxxx,yyyy) } } # self.style_list = {'red': {'style_name_list':[], 'plot_handle':handle} } self.draw_mode = draw_mode assert draw_mode=='Native', ('only support native') pass def v2d_init(self): pass def v2dx(self, name, xpos, ypos, dir=0, **kargs): str = ' ' split_res = name.split('|') char_shape = 'cir' char_color = 'k' radius = 4 if len(split_res) >= 1: char_shape = split_res[0] if len(split_res) >= 2: char_index = split_res[1] if len(split_res) >= 3: char_color = split_res[2] if len(split_res) >= 4: radius = float(split_res[3]) if char_index in self.v_name_list.keys(): # 曲线已经被注册 previous_color = self.v_name_list[char_index]['color'] # 样式发生变化 if previous_color != char_color: # 取消旧的样式 assert char_index in self.style_list[previous_color]['style_name_list'] self.style_list[previous_color]['style_name_list'].remove(char_index) # 注册新的样式 self.add_to_style(char_index, char_color) self.v_name_list[char_index]['color'] = char_color else: # 第一次出现 self.v_name_list[char_index] = {'color':char_color} if self.v2d_fig_handle is None: self.init_fig() self.add_to_style(char_index, char_color) # if char_shape == 'cir': xc, yc = self.circle_data(0,0,radius,0,360) elif char_shape == 'rec': xc, yc = self.rec_data(0,0,radius,0) elif char_shape == 'tank': xc, yc = self.tank_data(0,0,radius,dir, **kargs) self.v_name_list[char_index]['pos'] = (xpos, ypos) self.v_name_list[char_index]['shape'] = (xc, yc) def init_fig(self): self.v2d_fig_handle = plt.figure(V2dPlotFig, figsize=(8, 8), dpi=100) # self.v2d_fig_handle.canvas.set_window_title('V2dPlotFig') self.v2d_fig_handle.show() plt.show(block=False) def add_to_style(self, char_index, char_color): if char_color not in self.style_list.keys(): self.style_list[char_color] = {'plot_handle':None, 'style_name_list':[]} self.style_list[char_color]['plot_handle'] = self.v2d_fig_handle.gca().plot(0,0, lw=1, c=char_color)[0] # 样式已经被注册 self.style_list[char_color]['style_name_list'].append(char_index) def rec_data(self, x,y,r,dir): tmp = np.array([[r, 0, -r, 0, r], [0, r, 0, -r, 0]]) dir = -dir tmp = np.array([[np.cos(dir), np.sin(dir)],[-np.sin(dir),np.cos(dir)]]).dot(tmp) xp = tmp[0,:] yp = tmp[1,:] return xp,yp def tank_data(self, x,y,r,dir,**kargs): x_ = np.array([-0.74, -0.74, -0.55, -0.55, -0.05, -0.05, -0.55, -0.55, -0.55, -0.05, -0.05, 0.07, 0.07, 0.07, 0.07, 0.57, 0.57, 0.07, 0.57, 0.57, 0.75, 0.75, np.nan, -0.74, -0.74, -0.55, -0.55, -0.05, -0.05, -0.55, -0.55, -0.55, -0.05, -0.05, 0.07, 0.07, 0.07, 0.07, 0.57, 0.57, 0.07, 0.57, 0.57, 0.75, 0.75, np.nan])*r y_ = np.array([ 0.45, -0.53, -0.53, -0.62, -0.62, -0.42, -0.42, -0.53, -0.42, -0.42, -0.53, -0.53, -0.42, -0.53, -0.62, -0.62, -0.42, -0.42, -0.42, -0.53, -0.53, 0.45, np.nan, -0.45, 0.53, 0.53, 0.62, 0.62, 0.42, 0.42, 0.53, 0.42, 0.42, 0.53, 0.53, 0.42, 0.53, 0.62, 0.62, 0.42, 0.42, 0.42, 0.53, 0.53, -0.45, np.nan])*r tx_ = np.array([-0.15, -0.15, 0.45, 0.45, -0.15, 0.45, 0.45, 1.45, 1.45, 0.45, np.nan])*r ty_ = np.array([-0.14, 0.14, 0.14, -0.14, -0.14, -0.14, -0.04, -0.04, 0.04, 0.04, np.nan])*r if 'vel_dir' in kargs: theta_gun = dir theta_ve = kargs['vel_dir'] else: theta_gun = dir theta_ve = dir tx = tx_*np.cos(theta_gun) - ty_*np.sin(theta_gun) ty = tx_*np.sin(theta_gun) + ty_*np.cos(theta_gun) x = x_*np.cos(theta_ve) - y_*np.sin(theta_ve) y = x_*np.sin(theta_ve) + y_*np.cos(theta_ve) xp = np.concatenate((x,tx)) yp = np.concatenate((y,ty)) # fan if len(kargs)>0: fan_r = 1 if kargs is None else kargs['attack_range'] xfan, yfan = self.fan_data(0,0,fan_r, dir, np.pi/4) xp = np.concatenate((xp,xfan)) yp = np.concatenate((yp,yfan)) return xp,yp def circle_data(self, x,y,r,dir,rad,step=15): dir = -dir rads = dir - rad/2 rade = dir + rad/2 ang = np.arange(start=rads, stop=rade+1e-5, step=step) # rads:45:rade xp=r*np.cos(ang*np.pi/180)+x yp=r*np.sin(ang*np.pi/180)+y return xp, yp @staticmethod def dotify_vec(p_arr): lxp = len(p_arr) new_arr_len = lxp + lxp//2 if lxp%2 !=0 else lxp + lxp//2 - 1 dot_arr = np.arange(new_arr_len)%3 dot_index=dot_arr + 2*(np.arange(new_arr_len)//3) # dot_index[==2] = 0 p_arr = p_arr[dot_index]; p_arr[dot_arr==2] = np.nan return p_arr @staticmethod def line(p1,p2, sep): from UTIL.tensor_ops import repeat_at lam = np.arange(start=0.7, stop=1. + 1e-5, step=0.05) p1s = repeat_at(p1, insert_dim=0, n_times=len(lam)) p2s = repeat_at(p2, insert_dim=0, n_times=len(lam)) lam = repeat_at(lam, insert_dim=-1, n_times=2) p_arr_line = p1s*lam + p2s*(1-lam) p_arr_line = np.concatenate((p_arr_line, sep),0) return p_arr_line def fan_data(self, x,y,r,dir,rad): #to do: dotted line rads = dir - rad/2 rade = dir + rad/2 ang = np.arange(start=rads, stop=rade+1e-5, step=np.pi/45) # rads:45:rade xp=r*np.cos(ang)+x yp=r*np.sin(ang)+y sep = np.array([[np.nan,np.nan]]) p_arr = np.stack((xp,yp)).transpose() p_arr = self.dotify_vec(p_arr) orin = np.array([x,y]) L1_arr = self.dotify_vec(self.line(p_arr[0], orin, sep)) L2_arr = self.dotify_vec(self.line(p_arr[-1], orin, sep)) p_arr = np.concatenate((p_arr, sep),0) arr = np.concatenate((L1_arr, p_arr, L2_arr),0) return arr[:,0], arr[:,1] @staticmethod def get_terrain(arr, theta): A = 0.05; B=0.2; X=arr[:,0]; Y=arr[:,1] X_ = X*np.cos(theta) + Y*np.sin(theta) Y_ = -X*np.sin(theta) + Y*np.cos(theta) Z = -1 +B*( (0.1*X_) ** 2 + (0.1*Y_) ** 2 )- A * np.cos(2 * np.pi * (0.3*X_)) - A * np.cos(2 * np.pi * (0.5*Y_)) return -Z def v2d_add_terrain(self, theta): self.theta = theta return def v2d_draw(self): # self.v_name_list = { 'char_index':{ 'color': ?,'pos':(x,y),'shape':(xxxx,yyyy) } } # self.style_list = {'red': {'style_name_list':[], 'plot_handle':handle} } # from UTIL.tensor_ops import my_view # X = np.arange(-6, 6, 0.1) # Y = np.arange(-6, 6, 0.1) # X, Y = np.meshgrid(X, Y) # 100 # X = my_view(X, [-1,1]) # Y = my_view(Y, [-1,1]) # arr = np.concatenate((X,Y), -1) # Z = self.get_terrain(arr, self.theta) # d = int(np.sqrt(X.shape[0])) # X = X.reshape(d,d) # Y = Y.reshape(d,d) # Z = Z.reshape(d,d) # plt.contourf(X, Y, Z) from UTIL.tensor_ops import my_view X = np.arange(-6, 6, 0.1) Y = np.arange(-6, 6, 0.1) X, Y = np.meshgrid(X, Y) # 100 X = my_view(X, [-1,1]) Y = my_view(Y, [-1,1]) arr = np.concatenate((X,Y), -1) Z = self.get_terrain(arr, self.theta) d = int(np.sqrt(X.shape[0])) X = X.reshape(d,d) Y = Y.reshape(d,d) Z = Z.reshape(d,d) from matplotlib.colors import LinearSegmentedColormap cmap_name = 'my_list' colors = [(0.4,0.4,0.4),(0.7,0.7,0.7)] cmap = LinearSegmentedColormap.from_list(cmap_name, colors, N=10) plt.contourf(X, Y, Z, levels= 10,cmap=cmap) for style in self.style_list.keys(): style_name_list = self.style_list[style]['style_name_list'] line_handle = self.style_list[style]['plot_handle'] x_data_concat = (np.nan,) y_data_concat = (np.nan,) for char_name in style_name_list: xpos, ypos = self.v_name_list[char_name]['pos'] xc, yc = self.v_name_list[char_name]['shape'] xc_ = xc + xpos yc_ = yc + ypos x_data_concat = np.concatenate((x_data_concat, (np.nan,), xc_)) y_data_concat = np.concatenate((y_data_concat, (np.nan,), yc_)) # [y_data_concat, np.nan, yc_] line_handle.set_data((x_data_concat,y_data_concat)) axes_handle = self.v2d_fig_handle.gca() axes_handle.relim() axes_handle.axis('equal') axes_handle.autoscale_view(True,True,True) # self.v2d_fig_handle.gca().set_xlim(-2,2) # self.v2d_fig_handle.gca().set_ylim(-2,2) for AB in self.trival_line_pair: indexA, indexB = AB self.v2d_line_object(indexA, indexB) def v2d_line_object(self, indexA, indexB): indexA = str(int(indexA)) indexB = str(int(indexB)) line_name = 'line:%s->%s'%(indexA,indexB) x1,y1=self.v_name_list[indexA]['pos'] x2,y2=self.v_name_list[indexB]['pos'] if line_name not in self.trival_line_list: self.trival_line_pair.append([indexA,indexB]) self.trival_line_list[line_name] = self.v2d_fig_handle.gca().plot([x1,x2],[y1,y2], lw=1, c='k')[0] else: self.trival_line_list[line_name].set_data(([x1,x2],[y1,y2])) def v2d_clear(self): self.v_name_list = {} self.style_list = {} self.trival_line_list = {} self.trival_line_pair = [] if self.v2d_fig_handle is not None: self.v2d_fig_handle.clf() def v2d_show(self): self.v2d_draw() plt.draw() plt.pause(0.02) # print('v2d_show') if __name__ == '__main__': v2d = v2d_family('Native') # v2d.v2dx('cir|0|r|0.42', xpos=1, ypos=0) # v2d.v2dx('cir|1|r|0.45', xpos=1, ypos=1) # v2d.v2dx('rec|2|b|0.1',0,0) # v2d.v2dx('rec|0|b|0.1',1,0) # plt.pause(15) v2d.v2d_init() v2d.v2dx('cir|0|b|0.04',1.6974286259771310e+00,5.1136334271362971e+00) v2d.v2dx('cir|1|b|0.04',1.7323874630544438e+00,4.8441353012872579e+00) v2d.v2dx('cir|2|b|0.04',1.7466729589216059e+00,4.5011681684285119e+00) v2d.v2dx('cir|3|b|0.04',1.9834755845632670e+00,4.5303563729102958e+00) v2d.v2dx('cir|4|b|0.04',1.7282481584048974e+00,4.2215967719027470e+00) v2d.v2dx('cir|5|b|0.04',1.6961323762900244e+00,4.2162281616241906e+00) v2d.v2dx('cir|6|b|0.04',1.9538386636567124e+00,3.8049981311503873e+00) v2d.v2dx('cir|7|b|0.04',1.8802200761399963e+00,3.6045336461487691e+00) v2d.v2dx('cir|8|b|0.04',2.0157571619860644e+00,3.4048652151235701e+00) v2d.v2dx('cir|9|b|0.04',1.7335519021352239e+00,3.2864204601228306e+00) v2d.v2dx('cir|10|b|0.04',1.7570958563011358e+00,2.9889104070926833e+00) v2d.v2dx('cir|11|b|0.04',1.8973304349256952e+00,2.8428589222323897e+00) v2d.v2dx('cir|12|b|0.04',1.9387993015459224e+00,2.8276797742219721e+00) v2d.v2dx('cir|13|b|0.04',1.9015358683680221e+00,2.3111528346056227e+00) v2d.v2dx('cir|14|b|0.04',1.8004736376121440e+00,2.2936583436192697e+00) v2d.v2dx('cir|15|b|0.04',2.0107659893617642e+00,2.1508119100878571e+00) v2d.v2dx('cir|16|b|0.04',1.8444440354811558e+00,1.9332926354355557e+00) v2d.v2dx('cir|17|b|0.04',1.8371937746969322e+00,1.7330254976821911e+00) v2d.v2dx('cir|18|b|0.04',1.9129340340533809e+00,1.4154660738691236e+00) v2d.v2dx('cir|19|b|0.04',2.0030211676034568e+00,1.4298957885890118e+00) v2d.v2dx('cir|20|b|0.04',1.8856683487925392e+00,1.0541625663320517e+00) v2d.v2dx('cir|21|b|0.04',2.1527191655945512e+00,1.0831699476943015e+00) v2d.v2dx('cir|22|b|0.04',1.8649326792157024e+00,6.2217390250533311e-01) v2d.v2dx('cir|23|b|0.04',2.0898862160995813e+00,6.2584505914386446e-01) v2d.v2dx('cir|24|b|0.04',1.8549554422225740e+00,2.9299012314167772e-01) v2d.v2dx('cir|25|b|0.04',1.9339051071960001e+00,1.7110109106481508e-01) v2d.v2dx('cir|26|b|0.04',2.1857357920928222e+00,-2.2904476331429088e-01) v2d.v2dx('cir|27|b|0.04',2.2898372430290381e+00,-2.7931351208642319e-01) v2d.v2dx('cir|28|b|0.04',2.1052665060702345e+00,-5.2811184015975321e-01) v2d.v2dx('cir|29|b|0.04',2.0113388563475842e+00,-5.6067217211506271e-01) v2d.v2dx('cir|30|b|0.04',1.9572075843799210e+00,-1.0074934509032205e+00) v2d.v2dx('cir|31|b|0.04',2.1019847044222217e+00,-1.2161640572227850e+00) v2d.v2dx('cir|32|b|0.04',2.0810334514385351e+00,-1.3997226595437748e+00) v2d.v2dx('cir|33|b|0.04',2.1243209345552660e+00,-1.4827073696986688e+00) v2d.v2dx('cir|34|b|0.04',2.2812126964925952e+00,-1.6582712682744103e+00) v2d.v2dx('cir|35|b|0.04',2.2908731504091189e+00,-2.0073663392281826e+00) v2d.v2dx('cir|36|b|0.04',2.1953372997254261e+00,-2.1534518843321200e+00) v2d.v2dx('cir|37|b|0.04',2.2671821392880260e+00,-2.2919289063843635e+00) v2d.v2dx('cir|38|b|0.04',2.2425046928689309e+00,-2.5855765908809776e+00) v2d.v2dx('cir|39|b|0.04',2.1873276051357129e+00,-2.8948300838305192e+00) v2d.v2dx('cir|40|b|0.04',2.3101330238715105e+00,-2.7157436369318897e+00) v2d.v2dx('cir|41|b|0.04',2.2267736125783628e+00,-3.2590231662938782e+00) v2d.v2dx('cir|42|b|0.04',2.1349942847250238e+00,-3.2192762220613687e+00) v2d.v2dx('cir|43|b|0.04',2.3555422145111429e+00,-3.4157163022751194e+00) v2d.v2dx('cir|44|b|0.04',2.1962188506393736e+00,-3.7758337123121120e+00) v2d.v2dx('cir|45|b|0.04',2.3299247859251206e+00,-3.8747477804098480e+00) v2d.v2dx('cir|46|b|0.04',2.3401721136660556e+00,-4.0948149979730406e+00) v2d.v2dx('cir|47|b|0.04',2.2628131518739241e+00,-4.3771607601792715e+00) v2d.v2dx('cir|48|b|0.04',2.2953110060505013e+00,-4.4678125321108535e+00) v2d.v2dx('cir|49|b|0.04',2.1851814684351956e+00,-4.6915756064926688e+00) v2d.v2d_show() v2d.v2dx('cir|100|g|0.04',-2.3340955768552440e+00,4.9801948853894231e+00) v2d.v2dx('cir|101|g|0.04',-2.1605749241742314e+00,4.5709127191456567e+00) v2d.v2dx('cir|102|g|0.04',-2.0823477095029426e+00,4.4102702646117429e+00) v2d.v2dx('cir|103|g|0.04',-2.3605530527535703e+00,4.1945309409909948e+00) v2d.v2dx('cir|104|g|0.04',-2.0230254518634014e+00,4.0238393739258154e+00) v2d.v2dx('cir|105|g|0.04',-2.3192264590733878e+00,3.8879694623878889e+00) v2d.v2dx('cir|106|g|0.04',-2.1216222051303726e+00,3.7463705645806860e+00) v2d.v2dx('cir|107|g|0.04',-1.9886601275560660e+00,3.6043159982101840e+00) v2d.v2dx('cir|108|g|0.04',-2.1645619676070860e+00,3.1803008917826565e+00) v2d.v2dx('cir|109|g|0.04',-2.1347591528027725e+00,3.0688599530093263e+00) v2d.v2dx('cir|1010|g|0.04',-2.0320894589688505e+00,2.7911223806467045e+00) v2d.v2dx('cir|1011|g|0.04',-2.1154684593180639e+00,2.5239221640697265e+00) v2d.v2dx('cir|1012|g|0.04',-2.0346350044844881e+00,2.5034762252365295e+00) v2d.v2dx('cir|1013|g|0.04',-2.0529558583644394e+00,2.1800419006883898e+00) v2d.v2dx('cir|1014|g|0.04',-2.0592568875840254e+00,1.8981848761033615e+00) v2d.v2dx('cir|1015|g|0.04',-2.1020681290870282e+00,1.6573285174084387e+00) v2d.v2dx('cir|1016|g|0.04',-2.0784374145153635e+00,1.5212721940167344e+00) v2d.v2dx('cir|1017|g|0.04',-2.0408300876939038e+00,1.2753795759495554e+00) v2d.v2dx('cir|1018|g|0.04',-2.0044318724067858e+00,1.1335091565257751e+00) v2d.v2dx('cir|1019|g|0.04',-1.9962920458871403e+00,1.0817376787031874e+00) v2d.v2dx('cir|1020|g|0.04',-1.9654666619042238e+00,8.6786883386340830e-01) v2d.v2dx('cir|1021|g|0.04',-1.9629752482380298e+00,7.2412441693175056e-01) v2d.v2dx('cir|1022|g|0.04',-1.9827771806940886e+00,4.1249121462499094e-01) v2d.v2dx('cir|1023|g|0.04',-1.8148178315400298e+00,3.0620976611163397e-01) v2d.v2dx('cir|1024|g|0.04',-1.9913627283935029e+00,-6.7511623558157152e-04) v2d.v2dx('cir|1025|g|0.04',-1.9085102331050483e+00,-1.2091692519257527e-01) v2d.v2dx('cir|1026|g|0.04',-1.7567025512845775e+00,-4.0655182374325061e-01) v2d.v2dx('cir|1027|g|0.04',-1.7573126091984348e+00,-4.7886369068066464e-01) v2d.v2dx('cir|1028|g|0.04',-1.7946528244114961e+00,-7.4632155601468431e-01) v2d.v2dx('cir|1029|g|0.04',-2.0154230209100197e+00,-9.3702290308563940e-01) v2d.v2dx('cir|1030|g|0.04',-1.8071068705401963e+00,-1.2335533997391062e+00) v2d.v2dx('cir|1031|g|0.04',-1.7306796885227855e+00,-1.4972874453342571e+00) v2d.v2dx('cir|1032|g|0.04',-1.9800566158586790e+00,-1.5443765055388390e+00) v2d.v2dx('cir|1033|g|0.04',-1.7202354172897882e+00,-1.7671372514658545e+00) v2d.v2dx('cir|1034|g|0.04',-1.8391528999004707e+00,-2.0335429832622496e+00) v2d.v2dx('cir|1035|g|0.04',-1.8519982797134462e+00,-2.1369212711162078e+00) v2d.v2dx('cir|1036|g|0.04',-1.6969571202242850e+00,-2.3581547587302154e+00) v2d.v2dx('cir|1037|g|0.04',-1.6711310745287675e+00,-2.5002987709398945e+00) v2d.v2dx('cir|1038|g|0.04',-1.8271627532433052e+00,-2.7791709457111011e+00) v2d.v2dx('cir|1039|g|0.04',-1.7163698381455172e+00,-3.0549299320603716e+00) v2d.v2dx('cir|1040|g|0.04',-1.8300183826082943e+00,-3.0255021281117158e+00) v2d.v2dx('cir|1041|g|0.04',-1.7901509034098166e+00,-3.1658675039835322e+00) v2d.v2dx('cir|1042|g|0.04',-1.7876326339702986e+00,-3.5312100308804002e+00) v2d.v2dx('cir|1043|g|0.04',-1.6117960847909396e+00,-3.8080053094967141e+00) v2d.v2dx('cir|1044|g|0.04',-1.7178237689728504e+00,-4.0787409605324969e+00) v2d.v2dx('cir|1045|g|0.04',-1.6671618187791717e+00,-4.2900765366717764e+00) v2d.v2dx('cir|1046|g|0.04',-1.6222484609708783e+00,-4.3662799716158425e+00) v2d.v2dx('cir|1047|g|0.04',-1.7738583714976750e+00,-4.4039107431128137e+00) v2d.v2dx('cir|1048|g|0.04',-1.7155332780077144e+00,-4.8044841550151869e+00) v2d.v2dx('cir|1049|g|0.04',-1.7551213302728401e+00,-5.0045607725649397e+00) v2d.v2d_show() time.sleep(5) input("enter omega: ") ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/read_group_replay.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "\n", "import os\n", "import socket\n", "import time\n", "import traceback\n", "import numpy as np\n", "from colorama import init\n", "from multiprocessing import Process, Pipe\n", "init()\n", "def get_files_to_read(base_path):\n", " starting_file_index = -1\n", " ending_file_index = -1\n", " pointer = 0\n", " while True:\n", " es = os.path.exists(base_path+'mcom_buffer_%d____starting_session.txt'%pointer)\n", " ee = os.path.exists(base_path+'mcom_buffer_%d.txt'%pointer)\n", " if (not es) and (not ee): break\n", " assert not (ee and es), ('?')\n", " if es: starting_file_index = pointer; ending_file_index = pointer\n", " if ee: ending_file_index = pointer\n", " pointer += 1\n", " assert pointer < 1e3\n", " assert starting_file_index>=0 and ending_file_index>=0, ('查找日志失败:', base_path)\n", "\n", " file_path = []\n", " for i in range(starting_file_index, ending_file_index+1):\n", " if i==starting_file_index: file_path.append(base_path+'mcom_buffer_%d____starting_session.txt'%i)\n", " else: file_path.append(base_path+'mcom_buffer_%d.txt'%i)\n", " assert os.path.exists(file_path[0]), ('?')\n", " return file_path\n", "\n", "def read_experiment(base_path):\n", " files_to_read = get_files_to_read(base_path)\n", " cmd_lines = []\n", " for file in files_to_read:\n", " f = open(file, 'r')\n", " lines = f.readlines()\n", " cmd_lines.extend(lines)\n", " dictionary = {}\n", "\n", " def rec(value,name): \n", " if name not in dictionary:\n", " dictionary[name] = []\n", " dictionary[name].append(value)\n", " return\n", "\n", " for cmd_str in cmd_lines:\n", " if '>>' in cmd_str:\n", " cmd_str_ = cmd_str[2:].strip('\\n')\n", " if not cmd_str_.startswith('rec('): continue\n", " eval('%s'%cmd_str_)\n", " return dictionary\n", "\n", "def stack_cutlong(arr_list, min_len=None):\n", " if min_len is None:\n", " min_len = min([len(item) for item in arr_list])\n", " print([len(item) for item in arr_list],'\\tselect:', min_len)\n", " return np.stack([item[:min_len] for item in arr_list])\n", "\n", "\n", "def smooth(data, sm=1):\n", " if sm > 1:\n", " y = np.ones(sm)*1.0/sm\n", " d = np.convolve(y, data, 'valid')#\"same\")\n", " else:\n", " d = data\n", " return np.array(d)\n", "\n", "\n", "def tsplot(ax, data, label, resize_x, smooth_sm=None, **kw):\n", " if smooth_sm is not None:\n", " print('警告 smooth_sm=',smooth_sm)\n", " data = smooth(data, smooth_sm)\n", "\n", " print('警告 resize_x=',resize_x)\n", " x = np.arange(data.shape[1])\n", " x = resize_x*x\n", " est = np.mean(data, axis=0)\n", " sd = np.std(data, axis=0)\n", " cis = (est - sd, est + sd)\n", " ax.fill_between(x,cis[0],cis[1],alpha=0.4, **kw)\n", " ax.plot(x,est, linewidth=1.5, label=label, **kw)\n", " ax.margins(x=0)\n" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "\n", "\n", "party = [\n", " # {\n", " # \"Method\": \"AddBnAddAe\",\n", " # \"path\": [\n", " # \"ZHECKPOINT/pymarl-starcraft-original-addbn-addae-r1\",\n", " # \"ZHECKPOINT/pymarl-starcraft-original-addbn-addae-r2\",\n", " # \"ZHECKPOINT/pymarl-starcraft-original-addbn-addae-r3\",\n", " # ]\n", " # },\n", "\n", "# ZHECKPOINT/pymarl-starcraft-sim-AddBn-r1\n", " # {\n", " # \"Method\": \"AddBn\",\n", " # \"path\": [\n", " # \"ZHECKPOINT/pymarl-starcraft-sim-AddBn-r1\",\n", " # \"ZHECKPOINT/pymarl-starcraft-sim-AddBn-r2\",\n", " # \"ZHECKPOINT/pymarl-starcraft-sim-AddBn-r3\",\n", " # ]\n", " # },\n", "\n", " # {\n", " # \"Method\": \"Original\",\n", " # \"path\": [\n", " # \"ZHECKPOINT/pymarl-starcraft-sim-original-r1\",\n", " # \"ZHECKPOINT/pymarl-starcraft-sim-original-r2\",\n", " # \"ZHECKPOINT/pymarl-starcraft-sim-original-r3\",\n", " # ]\n", " # },\n", "\n", "\n", "\n", "\n", " {\n", " \"Method\": \"AddBn\",\n", " \"path\": [\n", " # \"ZHECKPOINT/pymarl-starcraft-5m-sim-AddBn-r1\",\n", " \"ZHECKPOINT/pymarl-starcraft-5m-sim-AddBn-r2\",\n", " \"ZHECKPOINT/pymarl-starcraft-5m-sim-AddBn-r3\",\n", " ]\n", " },\n", " {\n", " \"Method\": \"Original\",\n", " \"path\": [\n", " \"ZHECKPOINT/pymarl-starcraft-5m-sim-original-r1\",\n", " \"ZHECKPOINT/pymarl-starcraft-5m-sim-original-r2\",\n", " \"ZHECKPOINT/pymarl-starcraft-5m-sim-original-r3\",\n", " ]\n", " },\n", "\n", "]\n", "for ex in party:\n", " for i, path in enumerate(ex['path']):\n", " ex['path'][i] = '../' + ex['path'][i] + '/logger/'" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "# smooth_sm = 4\n", "# main_key = 'reward'\n", "# main_key_name_on_graph = 'Mean Episode Rewards'\n", "# drop_data = 50 #None # 5\n", "\n", "\n", "smooth_sm = 1\n", "main_key = 'test-win-rate'\n", "main_key_name_on_graph = 'Test Win Rate'\n", "drop_data = 1 #None # 5\n", "\n", "max_raw_x = {\n", "}\n", "# drop_data = 50 #None # 5\n", "# drop_data = 500 #None # 5" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "readings of ../ZHECKPOINT/pymarl-starcraft-5m-sim-AddBn-r2/logger/\n", "readings of ../ZHECKPOINT/pymarl-starcraft-5m-sim-AddBn-r3/logger/\n", "readings of ../ZHECKPOINT/pymarl-starcraft-5m-sim-original-r1/logger/\n", "readings of ../ZHECKPOINT/pymarl-starcraft-5m-sim-original-r2/logger/\n", "readings of ../ZHECKPOINT/pymarl-starcraft-5m-sim-original-r3/logger/\n", "警告 平滑系数smooth_sm= 1\n", "警告 平滑系数smooth_sm= 1\n", "警告 平滑系数smooth_sm= 1\n", "警告 平滑系数smooth_sm= 1\n", "警告 平滑系数smooth_sm= 1\n" ] } ], "source": [ "samples = []\n", "\n", "for ex in party:\n", " pathes = ex['path']\n", " for path in pathes:\n", " ex['readings of %s'%path] = read_experiment(path)\n", " print('readings of %s'%path)\n", "\n", "def shift_x(x):\n", " return x* 8\n", "\n", "for ex in party:\n", " # ex_ydata_batch = []\n", " for path in ex['path']:\n", " ydata = ex['readings of %s'%path][main_key]\n", " ydata = np.array(ydata)\n", " if smooth_sm is not None:\n", " ydata = smooth(ydata, smooth_sm); print('警告 平滑系数smooth_sm=',smooth_sm)\n", " for x, y in enumerate(ydata):\n", " if (drop_data is not None) and (not x%drop_data==0): continue\n", " if (ex['Method'] in max_raw_x) and (x > max_raw_x[ex['Method']]): continue\n", " samples.append({\n", " 'Training Episodes':shift_x(x),\n", " main_key_name_on_graph: y,\n", " # 'color':party[exp_name]['color'],\n", " 'Method':ex['Method'],\n", " }) " ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ ":25: UserWarning: FixedFormatter should only be used together with FixedLocator\n", " ax.set_xticklabels(xlabels)\n" ] }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjIAAAFFCAYAAAD2Awe9AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAADcB0lEQVR4nOx9Z4AcxbX1qeqeuDkqJ4QQCCUUQAJJZITA8MDGAQQODz6MsR+2weY5vYeNMfCwCTYYbIwJJhkQGQQSWSAJFBCKoBxX2hwnT3fX96M6d09a7Uq7oo8tdqarurq6Z6br9L3n3ksYYwwePHjw4MGDBw/9EPRwT8CDBw8ePHjw4KG78IiMBw8ePHjw4KHfwiMyHjx48ODBg4d+C4/IePDgwYMHDx76LTwi48GDBw8ePHjot/CIjAcPHjx48OCh30I83BPor2hpiUBRDi5yvaIijLa2WA/NyMOhgPeZ9T94n1n/xJH0udXUlBzuKRzR8CwyhxGiKBzuKXgoEN5n1v/gfWb9E97n5iFfeETGgwcPHjx48NBv4REZDx48ePDgwUO/hUdkPHjw4MGDBw/9Fh6R8eDBgwcPHjz0W3hExoMHDx48ePDQb+ERGQ8ePHjw4MFDv4VHZDx48ODBgwcP/RYekfHgwYMHDx489Ft4RMaDBw8ePHjw0G/hERkPHjx46GXsru+CJCuHexoePByR8IiMBw8ePPQyGBjSkkdkPHjoDXhExoMHDx56Gezg6st68OAhCzwi48GDBw8ePHjot/CIjAcPHg47FIXhQEv0cE+j1+AZZDx46D14RMaDBw+HHWlJQTItH+5pePDgoR/CIzIePHjoE/B0JB48eOgOPCLjwYOHPoEjmscc0SfnwcPhhUdkPHjw0CtojyQhK3mGHBMclsU+mkgjkZIytnfGUkhLhbm8kikZ0UTasb09kiz4+B4KA1MUKNG2wz0ND4cYHpHx4MFDr6A9kkQ82bd1L03tcTS3JzK2t3Ym0BFNFTRmc2cCTe1x21bmqgFqbIujuSPz8T0UCCkJFus43LPwcIjhERkPHjz0CgrXvBx6k0yuOXZXt+O2n6cBOjRg3oX+0sEjMh48eOgVFLqcHEnrj/1UMp3bEXTKHjwcNnhExoMHD0hLClI9GP5s133EEpLlSdn+XoMkK2jrSvb6U3UiJUGSlZxEIld7zEULY2Yt8aQERX3vOpbHZHoB3kX9skE83BPIhHg8jgceeAALFy5EQ0MDKisrcfrpp+O6665DZWVlQWO99NJLePbZZ7F582Ykk0lUVlZi+vTpuPrqqzF27NheOgMPHvoP6ltjkBUFIweW9th45vWksT2G2vIwwkFRf19THkJR0GfZr6UjgVhSgt9HHW09ifrWGMJB30GteYwxNLbHMXKgyzyZcZyq0mC2Ubo/AQ8ePADooxaZeDyO+fPn4+9//zsA4Mwzz0RJSQmefvppXHLJJWhpacl7rP/5n//Bf//3f2P9+vWYNGkSzjzzTBQXF+O1117DJZdcguXLl/fWaXjw0G/AGIPSg2sqN0Iw3bJiN7Bk05Aw8CCm3oTCMkzCjm5raLTzZlmtS0eSO61vwLugX0b0SSJz//33Y+PGjTjnnHPw5ptv4p577sFrr72GK664AnV1dbj11lvzGmfdunV49tlnUVVVhYULF+Lhhx/Gn//8ZyxcuBA33HADUqkUbrnlll4+Gw8e+gl6ksjk2GrnEcS9W++BOV50e5hcREUniJm6eWuvBw8HhT5HZFKpFJ566in4fD7cdNNNEEXD+3XjjTeisrISCxcuRGNjY86xVq5cCQD4j//4DwwbNszSdtVVVyEUCmHbtm2Ix+2hkh48fBlR+IqaSsv6P8W8oDMrUXEbOZWWIck8z8w1d36AJ9/a3O15aJAVRR8zO5jLKyt4uDSzvNdCqJNpGcy2c1oyrgFjfC6MMfzj1Y34w+OrLf2Sqcx6JIUxpKU88+9o+ygsz/PuGUiyAqUnTXg9Cm4FY1LusHmmyGCyl8env6PPEZnVq1cjEolg2rRpqK6utrT5/X6cfvrpUBQFS5YsyTmW3+/P2ScYDCIYzObD9uDhyEd3xLWxhIT9LVH9X0ekgHwrjCeb29sY0Tftboiocyl4KjrqW+PY1xTJ2U93Y2U5lrmIZVpScKAlqm/b3xyFbCMOdc1RtHUmTfvHoAD4Yk+7dlS934HWKOJJCYwBzEalWjsTqGvOfQ5mNLXnd949hX1NETQ6cuX0HbBkBErb/pz9lM4GKK37DsGMPPQm+hyR2byZP5WNGzfOtf34448HAGzZsiXnWDNnzgSlFK+88gr27t1raXvooYcQj8dx0UUXgZDe9sh78ND3USiBYGB8Idb/MUurrbOOVFrGLf9ahRWfN7oSqIMhMorCekx3wuxWJWYnQDwdMbO0GxYZRWGu5h7zNXM9hyxtmSD34HnnA/38+iQYoFrDciLffh76NPpc1NKBAwcAAAMHDnRtHzBggKVfNhx99NH42c9+hj/+8Y8477zzMH36dJSWlmLr1q3YvXs3vvGNb+DXv/51z03eg4d+ioO5lbuJc/XF2qV/Z4xbbpatP4Cpx9S4zKP7s2HMbt/I1K8bY6t/FZZZxOzsbXJhuVqBXETW7NCSkiMX+V5E72L3d/Q5IhOLxQAAoVDItT0cDgMAotGoa7sdV155JSoqKnDTTTdh6dKl+vaRI0dixowZebmf3FBVVdyt/eyoqSnpkXE8HDociZ9Ze0KGLCs5z02WFRBCwAD4YymkGNGJTEVJEFVlQTAALdE0AIby8jCqykJojaVRVV2C4pAPaZX2UIGioiJsOWZpWQiCX0R1dQmKQjysmTEGRWEQBMOArEVZCdRKoToSXKdSU1Oi78cYQ2VVsaVvcySF8pIgBH8Sfh/V5yDLCgSB4r7nPsPm3W344SWTUFkaREVpEF1p7kqqqipGazSNyqoidKVkVFUVgwBoi0soKw4gkZLgj6cRCoiIxo08M9q5tsb4turqYsQkBQGfYLkGEqEQo0nHZ8EY18H4RAGywkAAUPWcEgrPWZPz87PtZ39vhqJwUqhdN+3aQD3XUEC0HE+z0LiN9eSbX+CDNfvw4C/Pyjo/fp6Gyy7b+TBFBqGCdc4JihTCEMuKIYlxBHNcjxQrgpIWc/bz0LfR54hMT0JRFPz2t7/FggULcM011+CrX/0qysvLsWHDBvzhD3/A9ddfj/r6elx55ZUFj93SEjlo02pNTQmamroOagwPhxZH6mfW3haFrDA0NWXO3cIYw+6GLoT8IpKquFcPYyYELC2hvrETiZQEWeYEoqsrAWVgKdrbYwhSIB70YcPWJj6ewtDWFkNT2DhmQ2MX0pKCIpEipuac6Ygk0RZJWnLcNLXHEU2kLdsSKQlt7TEoCtAUEtHWlURHNInhQyqwp67N0retPQYlLaErloZPpGjyC5BkBfuaIhg5sBSLPt7Nr0t7DEySIKfSaG+LAiDYkkyjLZJEJJKALDOs6eRaEYUBcjKNZFpGNCkh7hMQTxpCUu1c29rUhzWBoL09Bp8ooCRgLMitbTF0xdKW6wIAjW0xRBMSRg0qxe76LoQCAmor+INda2sUibSMplD2W/qu+k6Egz7UlvMHRfs49uPFkzJGDCxBWlJQ1xzRr2Fbewwxn4CgSZywt7ELokAxqKrIMda/VSF3Pr8duXUfQCgGHjM2Y38mS1Ba90GoGWndnopBbouBSl1QIhGI/uzHk9siYFI6Z7+DxZH48NOX0Oc0MprFJVMkkWaxKSpy/ljseP755/HMM89g/vz5uO666zB06FAUFxdjxowZ+Pvf/45wOIx7770Xra2tPXcCHjz0Q2h6l5z9GBe+ygrD44s2Y8WmBr6fuq8kKWAMWLW5EU+8tcXVnZLtAUDLP2OGJDvnllaPY4aiaKHOvIFHDfEoIUceG4CTL5P7yy2XzgMvb9BFvZoGRpIVPSrLondR921qj+PeBevQGUvlruWU5zb7ObtGNuX5+Umm/bJFSKUlRVcAMbu7y+VYaYlBknvATSNLgOySMdkMloe2Je+peK6l/o4+R2QGDRoEAKivr3dtb2hosPTLhldffRUAMHfuXEfb4MGDMXHiRMTjcWzcuLG70/Xg4YhAvpoMZvq380AXFq/ca2oxXi1asRd7GtyjaLSehDiP61gwHaNn3kaIc6NTqWLaYDuQmYxoaO1MosvkHnILKWe2LR9vakA0IeHzXW2uM3cKffMXPOvaHPt29+4Hjcwh9G5zZj2SyPDQi289ItPf0eeIjFYyYNOmTa7tGuk45phjco6lkZ6SEneznra9s7Oz4Hl68NCf0BOLg2bsYIxh3bZmffuuA1145t1tRh4Z06E+3dKEp9+2RhgaU3HRZWQSuroRFNeIINvK6zKnDBuc+zvazJYrpv6f4e2Ve7Hyi0ZLP7ejGNutjElhbsc1LCFm6EJj5nYGuT9j63zyE0YD7iTRMTZjOLQBoBnZnvWvaxcGJRlF4v2HoHR5Fvn+jj5HZKZOnYri4mKsWrXKUYoglUrhvffeA6UUc+bMyTlWTQ2PiNiwYYOjTZZlnSwNHjy4B2buwUPh2FXfibQkO7btqu9Ec0ccexqsvvu0JGNXfaelr7bY7a7vQnskqW/XikAyxrBjf6fFhdDYFrPkSbE/52tzMP+LJSTwKBuGV5bu0vs+v2Q7tu7rQCIpW0YDgNeX78Zbq3ieDi3vCFP4PAjhRMB8Pkw1izgXRIb9zVF9DH7N3K0CjPH5RxJpG3kA9jVGUNcU4du1g6h99rfkCiBgeiZfPeEduAXmzU/2gDHwY7pEKmnYVd8JoWOvnoSNgbvaduzn11gL2WZq3x37Oy2J7jSdEsBdXA2t/HO0f4e0Y3VEko7tGuHcsb8TQsd+kFjmhTyTFUZz2ZnnZnZQ7arvRCSehpLogty0K+P4+RxVibXrY8hNu6DErQ+ecvNuKNE23rfDmiiVSSnX4yuRFqTWL4bSvBvSF+8XOD8PfQ19jsj4/X5cdtllSKfT+N3vfgdJMsRyd9xxB1pbW3HeeeehtrZW337nnXfi3HPPxRNPPGEZ64wzzgAA/OUvf8HOnTv17bIs46677kJdXR0GDx6MCRMm9PJZefDgDr4gOl0cjAGJlOxoUxT+z9JXa2PMRF6g6xUYtDwjxljxpIy4ObusbcHVcpkoivqX8fwvjAFf7G7LeU5u27Tt2trX1pXEuu3N+HDtAWu/DGMm0jISqnjW7RicGBn97dcJANKygrTM9RVdsRReXbZLJ3iaBsZxbHUcpnbSr7fCsK/RSjS168UnZB1n54EO7G+OYfHGCJgsWciarHB9jrlStkaW7ONruXIUhSGRkpFIya65ZxQFeiZi42SgXySZMRBFApGcZMeOt1fvw+e2z93x3WXWtrSkAFLq4K2BUtoU7s4AW8ZepihA2u0cGLiWxkUDJKeND9ZDv0efjFq69tprsXTpUixatAjz5s3D+PHjsW3bNmzZsgVDhgzBr371K0v/pqYm7Ny5E21t1h/a/PnzsXjxYqxZswYXXnghTjjhBJSXl2PTpk3Yu3cvAoEAbrnlFksZBA8eDiWcegnTeuB2/yfWxdbiKgB0K4Ob24HZ32U6rvqGaceyiVmefW97hv0YMvsWGLSV3VzK4PXle6y9XK6HpdHEDjISJmhEwHBTmT1MmpfkjY/3YP2OFhw1qBRHDy2zOXyyTME05hOLt9raTdfVNtjT72xDwEeRTCuYOUlx2c9kF9POw3Y9FMagKPwyKwogUKO/c65ON53d9ZYvyXhO/czPnzkya7+DpCx5wPwpuXzLzR84/0FkGUb7DLyEqP0dfc4iA/AcMk888QSuvvpqKIqCt956C+3t7fjWt76FBQsWoKqqKq9xAoEA/vWvf+HnP/85xowZg/Xr1+Odd95BOp3GhRdeiAULFuCUU07p5bPx4CEb7NYYZrlX22/VBNoC5bZy8T+b97RhtUmzAea0Nph3b+tK4p3Vey1PvbFEGotX7IEkyfr+mZCtbpB5atoQcpbIlrqmCD7d0pR5nAxiV4C71lZ83gCNBehaFtse7V1JLFl7QKd2THXTvLViL6Jxl7o7Jg+U9vF8trUZK79osHR7/7P9UBRFJ2runE4lmi5rsfYZLV6xB+1dCQfR0C1B6iuFMTAwbK/rwFqTZskYj1nrXzl7dCvypzOawtur9jmshZl3M949/fZWROJpvLN6H7bt68h+bNcxzAQ8E6Gxb3e3vMh71vIXHo/p9+izpohwOIwbbrgBN9xwQ86+t99+O26//XbXNr/fj6uuugpXXXVVT0/Rg4eDAte3AA1tMVSVBlESdk/OuKu+EwMqwmhsi2NQVRgKA3Ye6MSIgSUOMwoB8H9PrQEATD+uFqGAqNb0YYglJNS3xkCg+WD4zo+88Tk27GjFqMFlqCkPo6zYj/fW1GHN1mZUl4cwZUy1bhnIte7Rtj1gIWdEoXa4XfWdULKY9J98m1s4Zk3kY9SWh9EVT6nuLW7x2VXfqROVHfs7cNTgMuyq79TP+6RxA1Qhq5UEdESSAAMWvL8d+1tiGFRhXO+125rxyecN6IwlMazWmuySaQs246RHad+PRSud9XmWrj+ACaMrTcckTiKqkyKG+laeSkIjMIwBexujeP+z/fh8dxv+3wXH6+Hgmo5IYXw+jBKdhDytXrOJo6vQ2pmArDDUlIegwMXaxwBJLaxpJXouUCdr1lLJkVY8vrgOq7c0YeTgEgxXr1W6cSeglABM1K9VRzSJMlve0LdW7UUyLWGJ6k78x5VHgZYNAPFbE6BmJljudjOWTqjaK6a/B1OgdBzgn5uUhNJ2ALRiMIjoB8BMNZYImCKB0D67HHrIgT5pkfHg4csAMzmw6FVsJhlNL6OYHuN59WGtgzGeffxYQtKtCImUrBMC8yKvWUgYY4gm0gAzcr0wTY+h75PPU7izTzSWxtL1ByDLDAdaYjmvjZYTJpGS+OINhvauJD7eUI/d9V34Yncrvw6q9sdNqmGOymGMi3L3NXYZuU5M+pMv9rQB4LqiSNyZw4QxYNu+duw60AU5lcg47w3bs0fAaPPZuKsDO/Z3Yun6A1i7vRn1rTGucVJFu5qmSbcZaSSSMZ2k2a0tjAGReNqYv82iA9NYOw904oPP9uew2HAkTN9NlozqAl9m0SAxy/XWjuMGs0WOV6nOrdFxNVFarFkMFjGTesGYKh5imlZHz09j+8Ioua2KHvouPArqwcNhguEkIZZt1gWAQcvOwWC5R+elbzAv5PaFXYOWUV4nLxaaY2pjcGx3HtDd2vLqsl3YVd+FYbXFehRT1mFU9sU0CwQDXv5oJ/abSNCvLp9inIhtgdOuEzGN98y72wBAz2pr3uWd1XVqP7icPceCD3YAAH5zfmbX9kfrD2Dy0dUA1GPbhtLm885n9QCsubL++7IT9OR7miyJKca3RD8vGBYcO+zfHWubQUUfX7QZ+5qimDagHCFnUl/LGJbsMAymIrtWQgIXK5jbPKyunNzfYdtJOBm7Ngaxb9VsYjoLdD+s51rq9/AsMh48HEZks3Ew+wsGbNjRgqb2uGohsd7TzQu3vpupE1M7KbKClV80QtLDoFURrmKMY+YHmkVmx4FO7G1yT3JngHBdjQ2aG0WW84sUkZlmaVH/KoqFxGhz21bXgX1NERup41eVmRavfEWtCmOgNnGLfc/P9+eyIGSzTWWGrDBs2ct1I5QQNLRFsX0/dynVN8ewfT9vYwzYuLMNrZ1JfLKpwTFOBqMFtu/vxK4DXQADWjoTanseM8xw7ewSGTNXyHa5P9lkDpHO9wplOivtfab2TOzbRvKSMaQ2vZf398RD34JnkfHg4XDB9KAYS6TR1kWRMqXT1566CWFo60oCYPjrizwn0g3fmqzfdOtbY6pLwm1lMRET9e/a7S1YvHIv4kkJflEwwo/V/TujKRBiXQwUBvzt5ewZsJNpGWEAH3/uFOtq7ol8XBkAt0Qoqg5EATK6oxa8vwML3t+BX10+Vd+mKAxEIFAYQBgvpmhddPkb4rLqcg2JdY7RRBp1zQaBe35NdjKnDVdocjjGuOWK70vwz9e/AACcMKYaD73Oc1795OuToDCm97ONoP9hjIEmO0GYD3sPpIBoK25dwCPEfvPtqcZ3TAHkdAr1e/agprYKBzoZBhdL4JRYdLorbd8x4xz5FywtKUa+IsavXaLT6oqTLGQ2T+LAACaloBETpW0/wJiRI4YxsGTMNJ7pL1NUSyGD0tkEWh12HDe57ElIO1dBqBoGYcDR+c3JQ5+BZ5Hx4OGwwXrTbYsk9aRzZksCY4AkW+sFMYVhX2MEB5pjSKr5XRhgMcms296iR5bobgUGPddMPClBUQytheZaWr+zBa2d3OrQGU2hM5bG9n3tOc+GRy8xbNqdua+dJGSCrgdR36RcrDzm62HOtyLJPL/K5r3tUBhDNJ7GVsv8+Y4HOjRyZbTsqu/ER+v2W47D87vkNW0AwMZdXCezfkcLNu20JvXMxm3MJE+zYGnno6G5nRdyzARDVQPQeAdIog2bdzWho9OU/NBscVNJTzraCSnaibSsQIm0gsXaLf3dXpu5gJbDhzGYPiuGzmgK8ZRLJJjLGNaNzgaWipvIucnqpp1LvMPl5MwDaP0VSDs/NTUQsCS/PiydWf/koe/Cs8h48HCYwHTfDzEt3NZIF4UBAuEkQ6DW7bc+wW/Gv/n2NOiBSCa8snQXOqMpnDx+oK6rUGDKrQLNHWUKCWbAY29s1sdYtqEem3a1oj2SQi5ox2/uyLwYKPl5lrhOhxHdiuSuBzE2mkOBl22oR0tnAlv3daC69Fi89NEubN3bnnPefEzg/TVWImPWquQDjXjsaYg46k3Fs4SqKxncbmZX3RNvbcXxIyvymocmwH3qgzqUmMpUmwmTJdGc0QMgon7tFXe2YbXOaJ+TqqjJ20PD+Pc/b+gDayTFRFrM2SKz5DNSYh1IrX7RupnoQrH85+Khz8CzyHjwcBjBmP2f8UTN27WoIaCuKWrdUcXexi7E4jzaiNgWhV31XdhW14HWzqTVyqEehIGBqHcBBcw1NDofEgMAW/e2Y2dz9r5d0XwiVICd+3loemcshf3NhlbEDE0zAlgtJpF4Wi/V0NKZyEpiAPsi7t6eqQBmT2JLhrwqsm1+9a3xnGMlUhJ2tqR07XVXwlzmwOi3rUl112ht6n+0LSlJwi7btc/mMuP78ZSDluR0GZCfqzETk7WnbmbGNsuxba8Vl8raVFD7yJAbd0AxWaQ89H14FhkPHnIgLcmIxCWUFvnQ2BYrOMghEk+DEiAc9AHg+UyCAav+wHgyNpMYfqMXVI3Ko29+YfQ3jf/Ym5tRUx7Elecf5zj2rvou7KrnafRv+u50xwOwWSDMFOCzbS2OMfLFq8t25+yzOI+IJYBHCH37nDF47oMdGV0pC97fob9WLBYTw+pkrgmlobHdSqZyraWKAjy+eHP2Tj2ANz7Z47rdbqlp6cxi8VKZyD9e3YS127rw07OduYnMxO3tLxIYUhnAyCofEtEuUElCYzKKmkoKkmjBwmVN2LTHIFjRWAJMC1VORVHX0A4IPpTGVHIgJdFSHwFNJSGHcycufWdTF+bOHAAl0gISLgfRCIU213RC1b4ATK2nxN0/2jkoOkEx8sg4JO9g0Xb9Peu0f8cZt0ABYIqC+Es3gwSKUfyd+3LO30PfgGeR8eAhByJxCe2RJBIpGZ15Wic0MMawZkuTxd3S0pVEZzRl6mPqrz0Omx4iE2kZ+5utBQ0b2qzi16b2hCoMzjIXqHoYU5i1+cZf1xxBJObytHqYsGN/Z1Y9iBkHTNdnT0Mkq3urUMSSh/eaZEmEbIGZ7mji6IjL9auzfZeiSQXJtIyuWBL76pogyQpSaRkkGUFLh7VvVzSl1zoiqTiQiEBhQFc8hbqWGGgyAhZrx4HGdkPbk2X+rRFVpxTrBMz6FG3XaBu0WklMFewa0Wg21i9Lth+TST+jmzwBJW61fLFYO0DVpZCptcqSvW+B89Bz8IiMBw85YMm/kne4KMf7n+3HI298YS20yIybsVV0qT8/wvzqpQ934t/vWGv6/PudbS7zzOdcrPd+M/n5dEtzHuHVhw4fbXCGFmfCM+8Z16OzYDKW/cI9sXhLgeP1LPLVm8iSop+KoCYHSkvOnZ9YvMVSTFITzC7fFsfDS9uxXXc3MQgOZmwmCgpPSKgwrN6VwNPv7sTm/TE0dKbxz4868MFndcb4GUBMvyxkyk8DzdJiWF8sX2LT3Ky6HdXVxBiknSuhtB8AGHPoY5TWfSBEtQR5ifH6JTwi48FDPshbvWiFlt69tctwZ2gVkq2kRbtBG1lcmzviUPLMhEsJtxx0xTJbjNq6kmjpTCCtZWYFXyjMa1VHgRanIwHd+2QPHfIlz9wCwvtqRCaVjzlHdUdxAgO0x3j0mSQzdCbsC7slPR5aOlNISzI+r+f7tkUlRNR9GkwlGDJhc4P2fbO7hEyT007LVXBt/e1YD2bYqNJrFyL54SN8j85G2KF0qSkDPLFvv4SnkfHgIQfsEZ2aQDGelFCk6l4yQRPfKowhnpR4qDQYYknJQmHMxwKA1s4E/vHa5zhp3AAE/QLiySwhrAAoJbjvhQ1Z+9z7/HrHNoVZBcLKl/BG3k2OesiQpTSVBcs2HMBZ04YBAAQ1xC2Vzr2zon7Bt6ikQlEY2joTeG51FyIJ2/7MCHWPJBUseGsfAEP3RJU0GOPHJkyCLCtZidje1jRa2rpQ4XPvwxSrFYZHRSnGj9LuT9UsncmYzYVqbne5Bs271BcGcVOibaBF+UWIeTi8OCiLjKIoaG1txf79+3N39uChH8McshxLSIjG02hqzx09Yr7H1rfGsL85Csa4daS9K4l4UkJbhFtKtKillKToOob9zdGcJAbgRKZQdERSSKQkyxyj8SPfIhP09a+c9Il0fu6OLXvbIUkyoglJzxWUDweKJo1iiwAnMolkGpvrXb4LzEh41xxxmRdTdE0LZbIeKZcN8Ugss1mMmebGLKZMdZtmsbGTFZulxjJefmCJvuNm9ZAd3bLIfPjhh3jkkUfw6aefIplMghCCTZs26e0PPPAAtm3bhl//+teorKzMMpIHD30fzOx7V8N88rg/W8ew3E8ZHnptk6UYHwCcP3MExo2swPrtLXh7NX/KlRXm6OcGp5YhN3Ye6MR9L2zAhKOM3yhf1I5sJNLWD66PG2Twj1c35e4EoC2Swj0L1iFpssLIebiWFn8ex/GDjKVAK8Lp5skxf+dX7XaG0hNm9c5o7stskBUFYBm0OOa6GbYwai3rktLRAJaKQqgeCblpJ0i4HIh1gIRKQHxBy6Sl3Z9lnYvFetO/+O6XGgVbZO666y5cffXVWLZsGSRJgiiKji9qdXU1Fi5ciLfffrvHJurBw+FEJu98Lmj8gjEGSZaRkmT+xOtCTvY0dCGVVrC7oUvflsrzaZx0wyLjQUVfZzIFIGlzJeVZ2spC7tIyT8LnngMw+8UyV0jnRTNzX1xZIyn6j8U8oCFgZooCJhmVvbWng+SSfyL18b8BMKRXvYDUkoeRWvU8ksufVvuazm39mznnY8D7TfUXFERk3n77bTz44IOorq7GAw88gLVr12LChAmOfmeffbbe34OHfg+T+aW7egqFMdz+5Br8ZcF63PH0Z6591u9oxZ8XrMNWU2K0fMOIhYMgMqQb1hwP/QP51rZ64EPDjfLO1jQWfBbPmU3ZDQSGJYdX8FawpSF7FBlP/8IARQaTtb6Mvza5ltKblyC5+M9gUlLfllUILSXV9u5aGb3fRX9BQUTmySefBCEEd999N04//XQIguDar7y8HIMHD8YXX3zh2u7BQ3+C1Y3ErA05oIkt8zHxHwzsFZsLwZf9dq0XOTwCIXXze7er1f2aEJcv/Qn+nTgtwN1fvLSEUTSSpOLY1uym8WL4evhjDBFaeDZpxgs6yi2GcDjVuEfXxTA5DXkfF7OnVj7Pk/LZ/LvS7jUuh9ES5XUDX/YfRj9CQURm48aNGDBgAKZNm5azb1VVFdra2ro9MQ8e+gq0mjX8taUl577vreG5NORCqg52Ax3R7ot0127vfjbfIwHaZ3QkQs6yiFMoOFqsL2g8xfRb0PDd4g9xcdEq/oYxPPspF8Fva0pDT/BoQxFJYlZwC64teVt1f2mdtPmqhEiPWJKh1dJQmnaqIdRWIW96/eKCzsUN1hIfHpPpLyiIyCQSCZSXl+fVN5lMwufLHprqwUN/gKwYlaO7S0d6m8h48OCGbBqZuaF1+K/SxThKzD/xoKVWlwvsrizGsrt/CLSK1GYhr7az+h81G7XFBZqv2p7p/8kL5rl6Ltf+g4KITG1tLfbs2ZMz10QsFsPOnTsxZMiQg5qcBw99AVodH1kxMvHq99csBEUyrSKfbCpgsfDgoYfw/tbMRToHCu0AgFKSO42Aho37U+5h2SrsVcIXrOrAmn1O15I5rR7/XWnuKHveGrVNMSwyHAoSb96F9JaPss438cafIK3L31KT/vwD/XX02V9C2v953vt6OHwoiMjMnDkT8Xgc//73v7P2e/jhh5FKpTBnzpyDmpwHD30B2lNmW5dTeLu/JerYpmFvY1fGNg+HDyf5t2Kw0HrYjj9UaME0//aM7aOr3bWHvQVWgAvlk905hLs2K8kXGUiP1ksXB+sPCEytmQTweuzMcC85Et8B8vZPcs5Zrs+/4KfStMPyPv3Fkrz39XD4UBCRueqqq+Dz+XDbbbfhkUceQVeX9Ubd2tqKu+++G/fffz+Kiorw7W9/u0cn68HD4YKRSJRZtklZbPd9PWPskYpcy/Jlxcvx32WvHZK5uOHnZa/jiuKlGdvzJTJ+pFFSgDUFAMpIDAK6X0+oriPz972YxPHB9tzJGwHzZ8S4VZMppsKOagkNSeYWmVgbF/cSr6KOB3cU9M0YMWIE/vjHP4JSijvuuAMzZszA+vU87fmsWbMwa9YsPPjgg/D7/bjrrrswYMCAXpm0Bw+HFMzyx9KQjax4spjMOPekYfjaqUf1zuD9UNpw/GC/8XpAfnlKf1z6Jm6peA5njC/Pqz8Bw80VC/Dt4g/V9z2LP1Q8l3dfLfqJQHPPGmYZvdq1IoElY0h+8DCkLUstFhlWQIZeD0c+Cqa4c+fOxYIFC3D22WdDFEWk02kwxtDc3AxBEHD66afjmWeewamnntob8/Xg4bDCoUWEEdVkj+aw6wWOJJxzbOCg9h8xoARHDSzqodlYQcAjcn5W+hqO9WWPSBpcdmjdOOVB9+2zRgdRSzvwP2UvQJRzFwkFgKEijwqdODScV3+NPEz07c2r/6EAIUwl/DbxmepiYmnuzlVa9lgtMoesJtiR+xs+ktCtEgVjxozBX/7yF6TTaezevRudnZ0Ih8MYOXIkgsEMv1QPHvorTBlHTbdb/XVdUxSSemMdObCUtzOG7PEd/RsHm0hYkOPwd7b3yFzsIADGVSYxDK24tGg5bmq/JGNfXwYeM3dcGIs25UcoCsG3JgBQ5TEXTQzipXV8oRaJhNODG1EtRNDasRPASEz178S61HCk1dv0sb46NMjlaFOsBJDK+SVNzIRD8S2d7N+Fz1ODkQS3PFHTUY8bFAQU9VqrJEbpbEQqkgSLcQ0ai3cAZi+a0n33WEE4cn/CRxQOyuno8/lw9NFHY8qUKTj22GM9EuPhiEM8KeGJxZvREU3pPnzdwq3e5NIZdDJHciFp8SDlCgIBr2JcIPg+ObLLEuD4IaG8xvNnIDJVeRg5iFaJ2YSTR/kz9OYoCxj9jx9oHFw0uUoIAcaI9fh28Ue4MLxa3/6DkndwY9lrjmtAcmbbVUDAIOS43lRtp93OhOu+7yChDd8rXoJLi5ab5mS4lsJ+AqgZeLVMvMn3/4HWd/+F9KrnXY/D5ENV3NRjMv0BBd2OjjvuOFx++eV59b3iiiswbty4bk3Kg4e+gjVbm7B1XweWrjsAwH5bc+bMMLfkSufe13DRpPxcFJQcXCZhACgJCnmrof9zZjFmH8VzUt1T+QS+X/xuzn1mjMrt+jparIdPyHQeuef2veIPcE/lEzhxKDB9BD9erstivuGauwoUGFlptIYIjw4qp1arUJgkcU/lEzgv9JkxDsk+13sqn8A9lY/jT5VPWbYbWXoJBgrtuLvyCXw1vAJ3Vz6BkWJT9hPJgLsrn3BsCxAuAK4QnBF+BKq4V6upZK2umhHp1S91a34ejkwURGTcdAC5+nvw0J/hE/lTs6QoTg2MTQRs/7bvb84cmt0XMXaAD7XFuW8JE/x7MSy28aCORQjNmHBssn8XTvJv1d8PrxAslo5x/uy6F0IMl5F2BB8kXFq0DGFiuGGO8R2AaLLImPUrohTDN8PLskb4TPLvAQCce7SCkJ9YjpcJZosFUSR8M7wc5TQKSoCqMDWNkf3eOTe0PseRMsM+8pUl7+OMIP88Tw3ysjJuGX9DJIlLi5YiSFIYLLThgtBq13n6YQ3RdlsGzBYZiy4m30R3hwp9aS4eMqJbGpl8EI/HIYq9NrwHDweFlo4EJFnBgMrMVgi5aRdEwl0Uksx0K0tTu+Gst9/m6poiSKRkCALBk29vRV+BQHNXQiYAvj6lCI99EkUkmbnzfxa9B7QDwGhH28BSivrOzPtOHF2FscPKueWCACceXYYd9RE0RwzC8L1inrvjk9Yx6hYGUaQQc4QNl9MoKBi6UGxagPjfGYGtmBHYhiSz3pN8Jt5m/ixHti5HVXAbtkkDsTqVPbqKEqofjxJg8mCKz/ZnugbGUcRoI04ObkWN0AmB/ofOgqjJwqLleHGrcaQf/yDCqTWcFLDmtVFcKNmswBbMCGxHu1KEUwOfI0TTWBx3Fg0OkjRSLHNW96+PTWLlds3SpGWWlKG01QHBkqzn6sGDG3olMH/Hjh3YunWrF37toc8imkgjlsye84IxBpGqibdkZsolw0OrmW4K13dANCFBVpQ+9yCX0YNiBiGoKRFx3oT8o4mOqbUSgwvGW4lhccB6i7nwlFEYUlMMQrjgc96EYkweml1XAvDq3t80aSzsqCyi+F3587ip/AW+BNuuv0g4sTg5sMWy3SxaNn9mYgFqZl7l2dh52rDMt1ViHlfgi30pjYOatheHnCTg9GBmC1hNcbZSMM4vopDDFcX3cp6/RgKLSNLkznL2oznGn9X0DH5a+oa+NwMDk9JIfvgo0p++0rdkKV6Yd79AVpPJY489hn/961+WbRs2bMCZZ56ZcZ9kMomWFl6ELls/Dx76PpiuBdGK5emRSOprxjSNAlEXQgZF6Vv3YgB8ocynEjLJ7h4ZUm5VxzoizAnDrFEikoqAlbtd0uMTdS4gIOqzdz6iX0qA40ItyNRVMLmp/lj2L7DWcwAAIR/BsQN9ENr5jj5iDMBAINrK92goDhC9jxnH+/bi6pL37Kdkem3kFqqgEfy2/AXc03kuhlWH8LXUi2iLzzUdUNaPYac+hktMxp8r/4WsYJktMtmsG9naLgqvxsfJMYgzv6P/7KCRKfeOyqcd+15atAwlJIE7Oi+wbB8pNuPi8ErLNpEokJt2g/o5eVZadiO1/s2M8zrk6GtPJB5ckZXIdHV1oa7O8EcTQpBMJi3b3FBUVIRzzz0XP/nJT3pkkh489Aa6dYtinNQsW1+PzlgKZ00dCp8awsPJDtAVS2HH9s4enasZxwwtw5Z9HQXtI+Rje1VNGdkEq5dOKwE+zTIEAc44xo+WpN+VyBDYQ7cZaJ5PvWE/gAyRxvY5y3vWAOCWqO/MLMEXi91LEgRMd0DzmiWoljj7d+S84k0uozAE5ShmB7aDkAn6PseIXCA+M7AVR1dUAw1AsM1IgU80IsMIxGQ7/E3OVPq59EDaOMf56pBiIrZL3AouQMYZwY1YkjjOdZ9LwrlT+x8jHkAJjeOj5LF8TJLf53Ssj593KYnhzNBG7JGq9LbTgs7aRfLuNUCN4b5TGrbldZxDA4/I9AdkJTLf+c53cPHFFwPgptOzzjoLEyZMwD333OPanxCCYDCIysrKHp+oBw89DZLogtKVBC2pydjHXDuGqZV/9zdH8d4avsCUhf2YeXwthM46xIKDQQnBix/uRH1rz+cg0XDy0aHCiQzRQlwzg4JXGLb3Ouu4Irz9ORcuh2xeIOcDK1FzhBgNU4YH8OkeTmrEjj0IphXQ0ioIlEBhcs55cZsFcX06ri2h8AsEPskmrJZ4eC4/bYYpgd0uowIDSwgCApCU+fvJQ31cN+MmUCVAeYg4yZScwlmpdxEsakJd2UCIxTUoCzDLGIRySxaVDSEsVePzS2gcRZ8/bxPAFoB0HNeUvAMA+HErLwtzfugznBna6Ih60jA7uBn7pfKsw/5nCS+g+EnyaKQhoowWVg7hxMB2nBb8HPVyWY6eCtIbFhU09sGA1oyE0rQrr77sSM6hcAQhK5EpKSlBSUmJ/v7iiy/GqFGjvKrWHo4I0FQUSjyVhcgwncjsa4pAS0Aqm1w0aVnRq8GnJBmtncleJTEAMLxEwhWnVOPxpc1575OPRUagFAMrQtjfZbWkzDgqpBMZgVoHsq/3qtfIUkvnPyYV6UTGLxAoMsHQ6mIoYidS8OdcuAkYBlaEkHDRrUwe4sMpRTtRvvNta0PcIHoZLUyMwC8A3z8lhL8siQMM+H+n1qCpqQ1kj5b0kCDsA2JpICAAQdE5WNn6ZyAHeCLEITtfQfSo03H9KUOw0FSYmQicyAQ6dhnbVEtUMU1aXWYFGgHEjx91bDszxDU1s4JbHG0a8rWwaATLzZqSDReEuVVsoJCDdB9io4d41Ayk8iQynkWmf6Agse9tt92Gq6++urfm4sHDIYNii2pxh1rQDpnTWxCi1opRjRCrvmjs6alaUFbkB4VNNJoHchGZk0YFQQkBASxh0VVF9h2dIeinjTUEvtxy4hR8nn5sGCUBroohhIFQAkIAwiScFnkdx/rqcEPp65hYZl30jh/kUz1emRPh+bv2Zz+5rGAI+zgBO/sYqoZjM52Jfa94CYYJPKdKCYlBjBxwHUVIGq7Eoh3v6RYhgEcEVez70GUvd20Lbd6alyBXg92GVkXzq7o+QMjP/UkAfD38cd7zKRiHXIdiOp4vRxJXT+zbL+CVE/Xw5UWu7GXMKmbdvr9DFacaONASQ0tnAgBDJJ7Grvr8FhE3/GpeBU6fPDBrn+9feDwAxTUHS215CBNHVzl3gjNqaaTYpC94c8eFcfrYsHo5jMIKxw7w4QezikHSMZwZ3IBa2uFYNBkYLj2pDAHRmkfFOj+G2UeH8KNTi6zCVMaAaCtC8Qb8oOQdDBdbcEX1Wr35pnNCuHhiEL7mrWByOsuCl9s1pQRKHFsn+XcDigKfQPCL03w4sfgAwBgnW6ZjXRhYAQCY6XNqWDLB11mX29KUYZFkbftw+fGFuXE0TPDtwZlZIpy6g5FiU1bLzsFCadqRu1MvIXj2j7J38MS+/QLdSvSyf/9+vP766/jiiy/Q3t6OdDrt2o8Qgscee6xbE4vH43jggQewcOFCNDQ0oLKyEqeffjquu+66bmlwNm7ciIcffhgrVqxAW1sbysrKMGbMGHz961/H+eef3605euibkGQFbV1J1JTnSlOf+yZldiO9uGQnRg+rBDOtUFv3dWDrvg789sIaPPvediRS3c/pIagWkWwQo9ziQ2WnkHbS0VVoaHNfAM0GnBNH+DG/i4e//rj12xheqYbvqnoS7ebN9SUyyjc8gwvDbbgw/Ck68F+WcZlqvDAITCZLF1HT5HOiwCKtrv2sEUAKgi2bEd7xNmRELH0HlQAHNM6YK0U/AZg/DCStJHOw2A7Svga0diYqNv0bQjoKjJngKAEQ9vPnPZqnKwYAiJxCrttrJiIDAMLulRnbsuGqkvexDz2b9uLa0rdzd+pPMH9dcj7MeESmP6BgIvP444/j//7v/yBJkv7UZc6hYN6WKXNnLsTjccyfPx8bN27EsGHDcOaZZ2Lbtm14+umnsWTJEjz33HOoqnJ/8nTDU089hVtuuQWEEEyaNAnTp09HU1MTNmzYgNLSUo/IHGGIJyVEE2nUIDORYbr3IPuNyl5HSVYYdtc7TfKMcYvMwYDmYR+lUgKggJvn4dgRFRmJjPZTvOpEEUOrQ4DJU1BdIoAxhqqSIJicsv6emQQx0WYayelaYvEu/F/pY3gqMhMgU2APWq4pC6GhPQHCGMJ+AUVFfjAmgSW6IH/wYMZzJUwGTUUAANI2aw6ZURUEV9HnEW4GSEUemr0MC5JfiqC6LAglrYqFFQmVNAq50UgQF1Zz4eQbXQUA4bqVmBwYlL3P9nfyHq8QDEVDr4x7xMBvlK/I+ejguZb6BQoiMsuWLcMf/vAHVFdX4yc/+Qkee+wxbNu2DY8++ig6Ojqwdu1avPjii4jH4/jZz36GY445pluTuv/++7Fx40acc845uPvuu/UMwbfccgsef/xx3HrrrbjzzjvznvPNN9+MESNG4IEHHsBRRxlhfslkEjt37uzWHD30beR8kGKAlo4r2yCKbaD3P92HNZudOpj1dS45U7JgaE0R9jU5SxgMry3OviNRJajULdGZ9aY8spJiV6tWCJAjUxAGgVoIUmFgihE94yhI6PJWifC8UScGdoBhiioxMToKZkuPIvNkc7IMucspVva17bLMKdDm7nb4WuxpiEIKUICEi3XKmCCzVFp2a/eLVA9Ekht2gCSsRDUYa8CNpa+iWXG6p7LhOJ+7nsbD4YMwcgpomclildMi4xGZ/oCCNDL/+te/QAjB3XffjUsuuUSPaJoxYwbmzp2LG2+8EYsWLcK4cePw5z//GYMGZX8icUMqlcJTTz0Fn8+Hm266yVLm4MYbb0RlZSUWLlyIxsb8RJW///3vQSnFfffdZyExABAIBHDssccWPEcPRxByEB77wt/S4W7xaOzMniXY77P+1CaP4ZFSR1dbc+QPrgrhZxcMzzKSqkVxm7fppvyVCWF8fVLA0jSAtoMqKcfOvAgk+E2bMTD15k1yJMcD1JB0tb+iE0NmfdJlzOSuUf8pcl5sU3Ol2SEqhpg22L4r6yhEnYN7mwJzyFB6zStgiYij3xCxDaPE3hVye+h9iGNm2awwnmvpSEBBRGbdunWorq7G9OnTM/YpLS3FnXfeiVgshvvuu6/gCa1evRqRSATTpk1DdXW1pc3v9+P000+HoihYsmRJzrFWrVqFHTt24MQTT8SYMWNy9vdw5CD3Emn0kGQFHRHjqZ4xBdG2FqQlWY9a0pDptpeUsh9RsEUZVZfxaImx1dafoCDFs4bFEi1hnctEArFGaGcuUmIJlRYg41flr2BE/buuWWSJyWqiuZZcXW9uhTMVrgsy1+cJqs8fEwcJABSDzDBOYJiSW0uU1ZJSAKhbzQIVvPqydZu8c1Xmvh4OPUjPxaQQQQAIAa111glz7R8q7bFje+g9FORa6uzstFgw/H6eHSsSiaC42DCJDxo0CGPGjMHy5Zlro2TC5s08MmDcuHGu7ccffzyef/55bNmSW0X/ySc8e+UJJ5yAeDyO1157DZs2bYLP58OkSZNwzjnnwOfLVqfEw5EKvUQSY4glJDS2x1FWrFowpBRaGw5AhAKZWcMzU2n3BTiX5YISJ5G58Zvj4W/egte/MLYLyQ4IWUoJVJQEEYnGdeNLaZCgM8H7UyWJoG75MeKramgnfkpfAgCUJA4g4bOWGTDAIDdux4TPFyKAb8Ht9iB07NVfzy/6CO9jjh6JpDACH+FiXr8I3HzxQMjRVmhWmJrSkBpWzHRCkw2hbT0jMhXeuzdjW0VsN1KfvpzXOCW0MPehh54BrT2q57L9EgqAwD/jUiAVzUmSfGNn98xxPfQqCiIylZWViMfjlvcAsHfvXhx3nDUVdiqVQltbGwrFgQPcrzxwoHsYqlaIUuuXDdu3c8Geoii46KKLsGvXLkv70UcfjQcffNBL8HckIq+HZ0MjY+/OFACUOSwyB1rck93lcrUfM6wca7YamhBKgKCPQmHA6Boftjel9Ygfs/FmRKWI3a2G2yogUsSIUZsnYOIkBEzXyTBDzYyJ/j2WPj6TeDeo3wE4AUlv+wQEDNVCJ0ZUFoMwq4A5sH+N/vrEwA7skUeBdcT0EcAIaKwZVErAP3wC4mBgiSj8DZsQHHsCmJRGeudqCLWjoTRn16eJHfuytucDKuUOY1aaPJ1cX4bv+LOR7Ckio1opCRUA0Z/7h9uD1iAPvYeCiMyQIUN0cgBw68jChQvxyiuvWIjMpk2bsGvXrm5Vv47F+E0xFHKPOAmHefKtaNQplLSjo4Mn13rooYdQXV2NBx98EFOnTkVdXR1uu+02LF++HP/1X/+F559/vuAIq6qqHKLMPFFTU5iA0ENu+CNJpAnNem1bomkQMYTyMoZQeTESivFZKCkR7fUB+KmMUCp3ZWYAkFj278/XzjoGMyYMxgMvrAMAVFYWI0BkJDv8uOasckTiEkpCBCCCJXfND86uwS+e4aT9v+dPQnlZDBJTUJzk1gGeGI/vUFocQDDI5xEMiCguEgHEIDHrzVhcYRQh/M0F1fAH/GCKhNLSANoEAgnANScHEBpYAiStvzORpSzvLxHegaSuMTIoSor8KP34RT6H40+EkPJBWfcaipr3onjcOChSDK0b3wb2fga5M//MxB6+vKioqUTspAvQ+cmrxkYq6C7NQlBeXgRCBBCfHyxNQQQfslHdsrIQwt49us+jICJz8skn47PPPsOWLVtwzDHH4IILLsB9992HRx99FI2NjZgyZQqamprw9NNPgzGGc889t7fmnRc0X78kSbj33nsxceJEAMDYsWPx97//HWeffTY2btyIpUuXYtasWQWN3dIScTytF4qamhI0NXU/gZoHd3TFUmhvi6FRSIP4OSFOpmRQCvh46la0t8cgRBNokyXE5Qja26M4UC9CURh8SKOrKwEfkdHVkd8T2epd2Z/8I10JBExZ6TqbmyH6/EA0ARFJgDF0dSkApWCqvWVUlYBEzKTdibSjHRK6uuJgqotraBlFY5eiHiOGRJyfXyKZRjTK+8hZpHBSMolUSgEUCR2CBFkNNy/d+ALaS64FkkmUm/orsoLMjimCaMy4Dp2dUSQjcYhxLp7t7IgCSd4ueSTmS4Ek/AgglbtjFrR3xIGSEfDP/i5SHz56UGN1dCQAQkFEGSydAITsKRM6OmKI9sA92ntg7V0UZDc799xzMWPGDN1FU1tbi9///vcQRRGvv/46brnlFvz9739HR0cHJk+ejOuuu67gCWkWF7MLywzNYlNUVJT3WEcffbROYjQEAgF85StfAQCsXNm95FMe+i6IlIDSYeTT2NsYQaMpx4omz4gm0vr75o4E9jWrFggtEifpjGApFNdePB4EgF80fm6+OE8IR8BQVepHoH4dSvYtA1Els788pxiXTi1GwBTtJKS69Iij0iDBD+cUY96xflx/egg/P6eMe5JMqfE1I6OUkXqAi3AJT87G2vaDqaHUAFBR7EchtWYm+fegvGG1/r44KKIkbC0vLe1Ykfd4/QFysAx04NjDPY0+i070gOWaEOPfwUJ1J9GSamNsIYtO0tN39wsUZJEZM2YMHnnkEcu2r3zlK5g8eTIWLlyIffv2IRgMYtq0aTjrrLNA88nwZYMWsl1fX+/a3tDQYOmXDYMHDwaAjBqYoUOHAgBaW1td2z30X/AAHPNdiEG2vXekSGFGDhVNYiL3QBqJiuIAFJUwGAfj4ciEEPhEiqLtXNgaGD0HiTRQU0TRmQSKgrabLNP+w1BTTMFkBRWpFjAahiSUWMYnoLwWVFa3F58Xk2WkVz9raRHadoEotoUoh0C3osGI+BEIEPQJ0GxKrKMeSouzCnV/hlQ0AIGyAVDq8y9fcKiQGDgRwfp1xgbiXkG8N5ESiwDpYO+vKomxWMCN7zSpGAxaNgjyrtXOXe0jhUrBYu2AP6gTIxIqMWWa9tAf0a0SBXYMHTq0x4pJjh3Ln242bdrk2r5xI68jkk+yPU2309npXhytvb0dQGY9jocjF5rE135fl/UU/bzhYN2HZx0X5gUSZfdM145N2oSYAkAAGPDDU8sQIkk1/NcoAaDlwyjdsAAA0HritUZ5AcbH/r+vhNG6KwDUuc+PgkFhQOiAM+RYWv0SglW231nBCcKME0xveKvAffs+mBhA7pi1w4NUzbEWIiOMmg75EFvEqosIkKP4dTbQquFGfgDTd088+iRIW5YCAIgYhO+40/IiMiDU9KPrm5+bh8LRa5LsxsZG3HrrrQXvN3XqVBQXF2PVqlVoaWmxtKVSKbz33nuglGLOnDk5x5ozZw4EQcDmzZt10mLGihX8R50p1NvD4YMkK0hLCuRMqWjzAs+JwuQ0X/7ltG6lYQB+/+IevLSmE5Ks6NxFt8goABjDq58dxF0YQElQrSidTkBLvMa3MQS3v4fy1Q+DSYaf3iArBmGpKRFQEmA2IkMc+WQIzNeKlwkoC1GUBbLcsBkDUSTQtLsrV0haz1+IF/DkyhRkTHpzhEAJlgGyMxmiVFRzGGZjh03kPWxihn49i6YTrgIAKL4QCv3sA3P+E6SMR6xWnHYZ/DO+abiVRMM66TtmFnxTLgIAMLlQDU7+likSCOfu5OGwo8eJTF1dHW666SacddZZePzxxwve3+/347LLLkM6ncbvfvc7SJJxk7jjjjvQ2tqK8847D7W1tfr2O++8E+eeey6eeOIJy1hVVVW46KKLEIvFcMsttyCVMr7wCxYswPLly1FRUYGzzz67G2fqoTexrymCuuYI6jOEO+cLloxAaeXmCKHrAJimeVHvYx9uS6AjmoJmnTHubwxSN6IiHMdX0/UL0SZU0Bi+d1otrj6F3+DD29/jnUwp9ouDPpSG/aoZ3XSz1dL+M4bSkB8BkUDoakB45wd6F+uSobqyGOCzl742gYKHYDPB3ThLlewZi7OBgUHavQYseuSa7dNVY8AUp2A0yyXvNki4rKD+mrBd31/0QRxzck9OyRWECOg6+lx0HnNB4TtTwfgREgLtfwCBUGyur0dAfEbeJzfC5DvhAtCaUda5UQoaLObHEUS+H+HXSRg+ifcpM1J/CAOOLvwcPBxy5OVa2rNnD9566y1dA3Pcccdh3rx5lmRy+/fvx7333otXX30VsiyDMdbt9P/XXnstli5dikWLFmHevHkYP348tm3bhi1btmDIkCH41a9+Zenf1NSEnTt3uuat+cUvfoG1a9fi1VdfxerVqzF+/HjU1dVh48aNCAQC+OMf/5iXcNjDoQVTXSNyN107OiFRFEMrozBTzQFj3HufX4ejBpfiq7NH69sVMKyvO7gikNohh9WWYF8rQ0VYxKQRJehojIBBMe69pqdDn0jgE0REImn4WveAFR1rlJdWywH4k20IlFYi8dFzjuOVhfigAZGAMhkAQQhxZKIjNeVBMCmFrgwWGZpo7+6pg8kypO2fdHv/vgRlyCTQurXOBkpBXLSAJNLU85PwBVGIn6a8LARLCj9CQUp7tjI2C5SA2KqKgwByyQB0yxJn8rUS3Q2k/rPldCElNaC1o+Eba404FcefDRZphTDwGMh1XKJASmshDBoLgICWqg/BhJqshoA4+DjIe9aClg2A3OGu0fTQN5GTyDzyyCO48847IcvWp9O//vWveOyxxzBw4EA8+OCDuO+++5BOc9P95MmTcc011+C0007r1qRCoRCeeOIJPPDAA1i4cCHeeustVFRU4Fvf+hZ+/OMf64n48kFpaSmeeeYZ3H///Vi0aBHee+89lJSUYO7cubj22mu9Wkt9FLqmtdtPtjaLhr3V1NQeSeHTLc24ePZRlg4fbD34TK5+Lc+Lrn1hGicxHcvpPqOfvYjiWCsQPwAMPxWEMX7f3bYcye3L4J/1bdfjXTCxGAE5huNqCYg6rrR1WeYJMgWpVc/D35mhjtDBWKWkgyeCfQXKiGlQ2uogxuxh4wS96KG3HarA41D77b2HIn8sQ7qMR4y2QvNzARRC7VGQOhtAQyXQCQxxITKCiMCJlwCApeSFb8QJ3N1ECGjtKCiN2+EbdxZoxSAbUSJeKaUjBFmJzKpVq3DHHXeAMYZQKISRI0cikUhg79692LNnD2666SaMHTsWDz74IACeZ+aaa67BiSeeeNATC4fDuOGGG3DDDTfk7Hv77bfj9ttvz9heXFyMG2+8ETfeeONBz8tD96CoC3jeN7YsJIYpiutTsKIwUFNaXKZbc3hlJUVhUGQZkJ01lMyH1V70xD2uKEBUTqUVSlQMrYum17G4b/h2ElPdMe37gGH8JQHAOnhyvNRH/4IdNB2D9O6/MG3UmVDIQIT2LENqT/b8NqnVL4FlIjEHCXnfutyd+gsIRXTs+Shb85hju1m7kQv+GZci9fHT3Z5DYf1tYfc9QGJSRQPgjxppDdznxAkM66ZFRjjqRAgjT4AQLgXiMDQy9t880f+TYQ4U4vATIA4apwuGSbZrKPLklyTYM8lOPRw6ZCUyTz75JBhjOOecc3Drrbfq9ZR27NiBH/3oR1iyZAmWLl2Kqqoq3H777Zg926tL4cEdexq6UBLyo6osmLszgExMhkkpKG37IdSMdB6jsQsjBpSoexPUt8YwuMSgI4wxHKjbj6C/EURxFoNLqknmpLpNoJEmAPnO1R1BERhRqS0mDExOwcfSEClFwE/1dYVJxtMk04mOAYFqZ5TdyuSLNACpGEq2vonoMeci0LgJuaTSSuveHD26D+mLD3J36i8g1MXCwTMrC4OOhdK6FywRyUkK7ToXEiyB7/gzkVr9Uu4pkMLKaBJBgH/615Fa+Zw2WcdCrgh+YOSJCJRWIL3mVZdRrGCCNdO1ebz4mLORIn6LRabghwFKeZSfv4Q/9KgEhliijaC+pkaGX0IQOPVKsGi7Yb3RLELBIrBUDISKTjJkgjhkHDDzUvjGzoE0cgpoSV8QbHvIB1kp/tq1axEIBHDLLbdYikIeddRR+MUvfgHGGGRZxl//+lePxHjICanApCyuN8EstmDFZkXR3VP6rgxMAdJpWc9gax+aMSDx1r2o2vTvgzY7Tx0qIBz0AcQgIT4BqCiiKLMkijO5bxTFpOPhieoGVIRRW547AqSsnJM4oqRR/EXuRenLCmHkVJCi/N3THMT18ocCfhBfEP4pFyJw8vw8Dm613tDKYaDluXNi8SmYbte+IGjtaNCBY3RLgrM/AS0bYOznapEhYEMmQKgdDVI+CEKOyCb7CJpllA0aj9IRYyGVDjYMJRksQFmPYT5H7bVOIM3jWZPkEVX7Igw5ztSXWPsKYuZrpfbzT5gL4g/BN3IqhKrhWfp66EvISmSam5sxfPhwlJY6n15POOEEADyHzOTJk3tlch6OHBRKClgW4wPLNJhpH05iFNWpZO3PGEOWAtO2AQ8GplBqI74bybf/itQKU/I5sw6FyZB2rzFPVh8j1/MtO4gIo74OWj2yx8YSx8xEYPZ3C9wrQxg5IWqVT+reboctOowUV+XvMjIRg+DpV8M/9SL4T/gPBE79f5n7m7UldquG0REgQGDa1+AbmyOthW13ZyQVVbtYrwdRrRvi0TPhO/4MkHC545z4FAVYliWTdSfjZIjpr8aiCKBVutbmxV/ar/XhSw1wxhlnYOzYsdi37+CLo/YErrjiCowdOxaffNL/BPpZf0GpVMqVxABASQl/+qup8cxvHnoL+RMJxtScMaZ9LOHUaqZfhSl4blUHGtpyV0U+WBD9wKa/6oSU5t2myZstMgzp9YtMbQypz16H0lGPnNfDJZ/JkQBaNQK+8Wf13IDdqWicSV+iLZZ56k/MrhgSruAhvzRLCQnHsfQ3AIirVswAdWpL3M49q8XGCrPuxTfta6DVI4zpAGBEdZmaLCVu8/JPvwT+2d91zoeYXlgS19mIpL7JTjD565/cfDtO+/plOO0bl+Nnv79NPY6TyBFCsLkxgnn/2oA555+PWbOm4dNPnckhC8Wnn67CP//59x4Zy0NuHLTcvnBVuocvI7pj23A3vGSPROqIpPRuTAH2t0QhywrEzn0AA1qjMtbtS+K1TzKkunU5UrY8LNlAAMgyM+lQGJgb2bBYZKwuLyYlIe9dh9Snr+Seb+QIKsToD+suBWHQWBAhvyrkAEBCZRDHnZG5nWayTGSGz+8HddnH59NykeRJZkx9hAGj+VwykBFaNRzWxdv2mhKAipnvwbqlwtBpuffT/lJkMlDQEVMgDRiH6AjDYkPLjXwrVCd0FCQV5wRGt5A450V8QVAx4EKs7Jl3rVFGwbP/C+LYUwFowl3N+kIApva3kZtPN2xES0cHCKGWa0WCxSCBMN6PGTnJegpr1qzGI4/8A2vW5JFt2MNBI2f4dVdXV9aiirnap0+f3r2ZefCQEc4bMgOPjIqnZIhqcUbNAMIYw+b9cSTTMoZX8yRabu6pj9YfwJDqIi1ICC1R3mdAuR/7WroTis3zvnDNi2qRMbt/NNJiTm3gKACl9tGVwZmPJm37uBtz7JsQR02HvOczsHiHmnMnf+IhHncahNrRkDa962wUfMjbDWRCMOhHQGRIqO9p9QgozbsRCohgsgRCBQtJpYOOhXLgC+P94HFQ9m+yLNy0apjV/UFFy/eDFFUCZUOBHTx8Xhh0LJTG7Wqjau2gFMRF90HKBurWGHHkCTwE3+fPcN6atUIBGEDC5bwekXk8SpEeeTKYIpo36udDBROJEEUIogDXpNxagigqAEyCMHIq5O0fW8fUvuQ6EQM/z6JyCOGyDDoXYuxn0jMNGzwIe/cfwDsffYzLrv6J9ayLqyBJEt776CMUF5cgEPA7ssl76B/ISWS2bt2Kb3/bPWcFISRne6aaSR489DQUy52Tu5mISgyeW82Tdv3odL/a6sQHn+0HAJxk04HSwtY8HScNU2/uigyiFWc0W1y0StWmbQ6CpbVJKVR8cv+Xpxgvk0GrhkPet95K9DQEioBk1Ln5tP8HEsyW4DKLpQCAOO4MiIOPQ+Ltv9p2sz7l+6d9VR+Lu3dUV0+oDCzeAf/Ec8EmzkVy0Z95/0nzgONOtVgEaPkQXdMRuvBXkOu3WbVTALdMARDHnMKFvWtdJm37zghDx0Mcf44a5UQhjj4J4ojJIEIAxG+NxFNC5WpEEAUvoUEQOO0qpFa/DKVhq/PqWaxCzmSAjFBuASIUhLhHHhJCwagAKBS+Y06B75hZ/Hqn4yofIfr4gGx6rZEVmzZGzzVjvNVw9uxT8Mizz2Pxh0tx2dU/dcxl+fKP0N7ejgsuuBgrVx45DwJfNuQkMhmFlXngYPb1cIShG18F969PhhWIATKzGaOZSzIYIy9dThwj7scWaXDB7tMRQhMGCh0o8h2jzkE2BDtuB7a4k2yPsVKhdWSOECiyIYxVJAAE/lOuQGqpWvYk2wdoz59iadP/A//s7yH14SO29gzWGofbxeRO0iwMsgT/jG9Aad3PCY7Fc++m8yDQvfuEgpbVwjdxHtLr3jD6DD4eEcWPqmEjbS4oLUkcwGRb4kEt7FgjKEzRj0vLB3OS4+eFcttpLUo0Yse4VYZk0uwQOM9JtzAZLh2mEhlGMkQpEs2dZlzrwIxvQUl0qeRLcZJN7TprPIbYrqdWj8N2jWuqqzH5+HFYs2Ejtu/YhqOPthZBXbRoIQDg3HPPy0pk9u7dg6eeehyrVq1AS0sT/P4Axo49Dl/72jcwZ85plr6zZk3TXz/yyD/wyCP/0N9ffPHFrjnP1q1bh/vvvx+ffvopkskkxowZg+9///sZy+dIkoRnn30WL7/8MrZt24Z0Oo2hQ4fi7LPPxpVXXplR29rQ0IB77rkHS5YsQVdXF4YMGYKLLroIV155ZcZz7w/ISmS++OKLbM0ePPQKdtVr1coZFJcFKxpPI9kWg6wwlIT9KAoKkJt3Q1FKIQimsotqxE9bZ8IxRj5E5oelb+PHrd8u2CJzfRlfiFoxBgQKwBRQKNqsHP2VaFthE/sSgCkyxBGTobQfgDCYF3UVKobo7b7xZyP96cuWfUjZQDUaJkc2Q3UhpEUVIMESsISRYj9jwjQqwElCwBdkzV1CABIogVA5FEgn1ZIC5mPa56FZQphOsIQBo2GhJZQiXXssCE3zPChuJ0MFoGoE4iUjEG79XK0ZZFiLeGJIrilhBPCPORlKtJWTjU4FWqI4RgzXjDh2FtKxdrAuXmpBHDoBSJvOG+CmSmadC+dmAqgogKQk/aSFwcdB3g8II6cAgggCBkapbmQhoVIIRRUm15JVd0SIACL4LBl8NcLGtB9oBt50zqmzsWbDRixa9IaFyHR1dWHZso8waNAQTJw42X1nAO+//w5uvvl/kEqlEAqFMGzYCHR2dmD16hVYvXoF5s//Dn7wg//S+0+YMAkNDfVobGxAbe0ADBgwED4fJ4cjR450jP/BBx/gtttuU8cehrq6Oqxfvx4/+tGPcNddd+H888+39E8mk/jBD36ApUuX6mOGw2Fs3boVf/vb3/Daa6/hsccew9ChQy377d69G5dddhmam5vh8/lwzDHHoLOzE3fddRfWrl3brw0Phyi3tgcPhUQgZd8jlpQQTUiIp2TEEmkeZs14BeugesPQtDEKgLQpZ4wW1VTIT9ZN5JkPCACqcGtMbYmPa2VcbhbS5+8Zb/rxzaRQBM/5MXyTTDdpU0ZVWjoAJFiMwMmXgwSKLIsaAAiVQ0FC1qfOwPSvgQhiVteRtqBr0TSOq03tlhd1L0GERZiqzocWVWnDmvYnRiRSwOzmsg1sDhd2iagx2ohLm8mPQgmCUy5ExbgTEZz9PdBQmWG1MOlM9ErOgmCdjt7PYFtCSQ0CJ33DOFywGICtCCUVQMq4UJZWDTOilQhB0C9iQGXYMJgEihA45QrQUCmEmqNgseboh1bJF0x/tctaPUK3IpmvG60ewccjotrkvFannnwSAn4/3n57kcX9/O67byGVSmHu3HkZra47dmzDzTf/LxRFwU9/+nO88cZ7eOyxp/Hiiwtx330PoqqqGk8++RiWL1+q7/PAA//E+edfCAA4//wL8cAD/8TTTz+Np59+Gtdcc43jGLfddht+8IMfYNmyZXjhhRewfPlyfO973wPACyUrNrHRn//8Zz0R7TPPPINFixbhxRdfxDvvvIPJkydj3759+NnPfmbZhzGGn//852hubsb06dPx/vvv44UXXsDbb7+Nf/7zn1i+fDk+++wz12vQH+ARGQ99DjmypfCHOFMos74XM4Kv9Vc2I4hWhLIQvpCLyNTQTvy+/FmUEadmQ6tYzVJxxN+6D0pn9mKCSc110g/ROfFSpI8+Nf8dCIFQO8p4r34oXeO/BmHAaMAckeJYoAggBlzHJMiRij5buHHG/YjF5aJnmjUTANWyYc7XEjh5PgKzv+c+H72/8R4gEI+ewd8z9TxhbrfOyTmW2fRDMv/Tw5GpaZv2ktjGUbdR+zYKoWo4gmdeC3HYRNNhqXt5Ao2oaC4yt2y95utqCuG2RBvp7IgTF3PWX1pWq/fSEA6FMOvEaWhqasTq1UZgyptvvg4AOOecec65qnj44QeRSiVx9dXX4mtf+yZE0bCKTZ48BT//+S8BAP/+95MZx8iFk08+GT/84Q/1IsyUUlx//fWorq5GfX09Nm/erPeNRCJ4+mle4uI3v/mNJYfbgAEDcPfdd0MURaxZs8aSD2bFihVYu3YtfD4f7rzzTlRXV+tts2bNwo9+9COk0/23NppHZDz0PeSwyGjOIwePgcmaw9RMv1rEkArt4SYp5c9kCAFOHpU5/PeU4GaU0gROCOy2bPdF6kGirVDaD0DasQJIRpCyuUMcSMXynldfgf/k+fCdcCGYPwxWVJ17Bw0EVuKg1Z7yl8Jgq1pH89+sA6ovs/TNFiqdSSOjtvlnfxe+yV8xWQxsC6xtISaCH8QXMIiBTSxrFayq/3whfZNhUXEhFnonG6nSrUPEOo6NB9rPzUG2HH1s2hkq8HMMl1lIR+Zrbzt3aljGoJFPTTjtNmfHWHYSRED8YZfzJDjnNB42rmli6ur2Yf36tRg3bjyGDx/heoR0Oo1ly5aCUoqvfOUi1z4zZpwCURSxfv1aSFL38jh94xvfcGzz+/16QeM9e/bo21evXo1YLIba2lrMnTvXsd/gwYNx1lk859KHH36ob1+yZAkA4JxzzsGAAc4K6N/85jd1ItUfkVPs68FDT8DNAtIeSUIUKIpDuX9ASmcTGuICqgISUpKiu45iSQnROEFbawzwl6t+Xq2qrdP6kqFWZFZQJqleewYKBQSAZDKz6/dN29jF23hiO4tc1y7M7OcQR5/ENSEASH0zaLwtY19aPRJK8y5jAxEAMIhjToa061OIY2dDWr8IzBcEQAEpDvgC+mJkgRsZyWRNcExEADKUy6CqDkcYOh5KZxNYZ4OxD6GgpTUg5YO5gNay2KrHo1Rf4PX9mKEVcZkwdK0HtMgh82kSEMLcz1dtZ9r10DdqBMlsHTITKRP5MQZyuW4E4lEnQarbCABgDjJkFexqVhNCuFXBesr2fU3WK+1a8gum/yWZSKXL52utQ2VnMgTTJk1GVVUVPvjgPfzsZ7/E4sVcx3buuVb9iRl79+5BKpWEKIr47//+acZ+hBCkUkl0dnagsrIqY79MGDHCnUhVVfGxYjHj4Wbnzp0AgFGjRkEQ3EXZY8aMwZtvvoldu3Y59hs9erTrPsXFxRgwYECfyTJcKDwi4+GwwY3IaAJde4kCJRFBOiYgLaV4YDVj3OLC+DjqXro4WKurBADJtMki0w0NiqDeg79X/AEm+fnT0e/bL0KzWnhSKx3QrWq/fQi0ajiUlj2WbSRcznOy7HJP7EUHHQvVfILq8hCUhIhMz6XCgNEWIkMEEYzJ8B0zi4cHB4ohDp8Iv6J++LpFwfSErh/YJr7lHW1/DQROuxrJ9x8ErRgEQkU154vRL3D2dfxzFAOAnIJ/8lfAFAmJhX/iIwZLwLREb4C6ALuEhYOoifyYMU9FUMmNbW66wYSAhEpAUjEenm+ytgQDImqDApCKGDsEwro7hZbWQEl0wUgGR/UxtWtHoApi7cTFfL0s1hz1NaXwjTsdvnGng6XisOR4AUAoAQ0WQYm16/sxwY/qMgofhZro0WQVsn82hECoHApJ+06oCeuYWTvjRmAzWd3cjqG+Fv1+nHXWXDzzzFP44IN3sWjRQoiiiDPPdI8KAoBIhIvAJUnC+vVrM/bTkEx2J9cUEAqFXLdTrY6V6Z6lkRqza8gOjQBFo4arW9tPa3NDdXW1R2Q8eCgUGUsmmawppq2mcgNcxKsoqhLGJaxZYcYN4I2Nxg9azklknO38fsl0EgMAtUInOpQwLi1ahqmBXfyYIJhV24GMK3lfh4uOwz/1YkAQQWtGIv35B87swXpIMAEVRR6JkvkALsczWQwIAWGAIFB1QTcWZQdJdLXIqNtsCy4AIBCC/5QrDIGwm9uCUE6eZDivhdnFoy2wWr4Ti7VD7WOL5slkRTAKIwoZzolCoDwfDAhB4LT/x4mSdp46oTMRBjdLC3ER+Jrfu8GsJSIAY8Q2Pe1aW61BAjUntTO2W1xX2vUQ/c5rY3GbOSbl+j3NeTJUxNy55+GZZ57Cgw/ej4aGesyefSrKysoz7hIKcXF0RUUlXn11ceaxDyHCYT6n5ubMWby1pH5FRYbQXNsvW8K/bGP2dXgaGQ+HDnkYLFiOd7oGRlGtL+p7RVawrzECRWHY1xRBPCGhMyGhuTOFtphhrt/dkt21Q12IjCPKFECaCZga2KmTGACYfZQPlyivZx2/b0P9gNQkbABA/AEQQkErhsB3gmGG98/6DoRhk0CDZaZFUXAsMuKxJvGvyyJtzY1iLPiEmCwuukUG8J9yBcQxpwCEwj/1YggjpkAYNR2BWd+xjmE6VnjMdBAxAKF8EIgmEHYTD1NTpIyNEDj0I2ZrEQBH1IuZ1GiRTOp5iGNnwz/jW6ZtZsuI24Jv/KWlNWr0kZk4mc/HTKpMZM1OMO3n70YeiI1cERdCaXSGlmOGWOZitNuHdzt3+5iOm4abFsjR18FQATAcc8yxOOqo0WhoqAeQ3a0EAMOGDYfP50N7exva2lqz9rWjt0r3jBo1CgCwY8cOyG6JIsGT2ALWUG9tv+3bt7vuE4lE0NDQ0IMzPbTwiIyHwwpJVnCgJYpk2vhRuuWx4w1AKiVxr0OsTbfckHQMX9Sn8cKSnVi+sR5/evozPPvRXvxtaQL/fHsfRFMimPe3ZDf/ulWYJgR6hmB93hAcFiV/f7dvaouypUKzYTHRQ40B0JJq+I6do7pXiLFg2xYRcfgk0/jOxTSjDkKzNJgXZxAIVcPhGzsLIAS0uBL+CefAd+wc0PJB4ATIEIySokqAUBRPOJVHHNkWTnN4L99MM7YbnbRtqtDVvlibrEvWbYb1xTd2DmjlUKPNbPmg1LYvjPM3j02pWm4BsERQmSxNRDuuTgyNMYl2fm6kQyVGRDBbewgodbHImK8L0SK7nJ8nsbu2tP3cyJSFQFoJjnauxDZnYvruEWL7VPTvGXDppVdg6tQTMWPGyZg5c5ZjnmYEg0GcdNJMMMbw7LNPZ+1rRyDACXN33U2ZMHXqVITDYTQ1NWHRokWO9gMHDuCdd94BAMyePVvfrr1evHgxGhsbHfs9++yzXtSSBw+54JrQVv2bSMlojyRNG5nF+mI0MESTaQAMyWgH2rsSPBdZvA1tMe7POdDCfcGtXYbEVizgW+5qkXHppzCCy4qXW7b5+vuvyc0tZFkgibO/qc0cBmsawDqWpcnkWrE/TRNiqwptWsTNVotMVgVCEDjzGgS/8t98kRPMxMg4L83VRATRRMScC6rZSgUQ0GCx+8KsH15zGWnXxkRKTAsrCECDxTxXDkzbtX0tFiKoBMMHUAFC+SDj0IIPBvFRyUtxpfu1Ycb5Oz8fUz+VKGnkqLwkgJrysJp0UIPp9yL4IIfKQUtq9GECoolAmf+aCJqFUBaV6+cJACRUAlpuirIJFPHT9AUtcyeltRCquOicVgwy6jERgFYOASnlYdnz5n0Ff/7z/fjTn/4Cvz93IdKrrvoBAoEAnnzyMTz88IMW4S0AdHS047XXXsKjjz5k2T54MBeNb9iwrtvRTG4oLi7GZZddBgD4wx/+gLVr1+ptDQ0N+OlPf4p0Oo0TTjgBJ510kt520kknYcKECUin07jhhhssLqZly5bhvvvu86KWPHg4GDjVMO69GAjPycKAR5Z2oC3Whl9dPhWAEY2USDrNrUIBBMNHnPsTwnBUarNjmx2B9h0gSn8VyMCFVKivtVwnjoVbjcixmPqNPsLQ8QAB/CdfDialAFMGXR2U8g+cmiNY1DE0ImNxb6jz1CJ5zBWPrY/h3NVBARAGQkQws3hJ7eufdB6U9gMgwRJe60c/Bh/Xf9I3QYoq1IrZpmvgCxiDuFgStNpFWr0ji7XK7lLxBY1rzGzXWLcyaPszY5EX/aoI10ZA9PZAZhKqjaW9VkssmK+fHVQQQRmD76L/hbR1qaOduwpFQNQKc6ofqX482/eKGudnXEOrLgdUsGQ01i0xvoAlVQG3BAmm8zZZcYTuL9BHHz0GN998O37721/j4YcfxBNPPIphw0bA7/ejvb0N9fUHwBhziIZPPHEGSkpKsW7dZ/ja176CESOGQxRFzJ49G1dffXW35wMA1113HTZt2oRly5bhG9/4BkaNGoVQKIStW7fqpQr+9Kc/WfYhhOCOO+7A5ZdfjhUrVuC0007DmDFjEIlEsHv3bpxxxhk5C0D3ZXSLyCxbtgwffPAB9uzZg1gsljG1MSEEjz322EFN0MORggwmGe1+monNmP5qXzPCmEn3wtQ23mh2UQFACYlDVIIopzG0K5mLCVbQCNqUIviJk4hQAkxPWG/cx4oHHP18Xc5tfQXimJPBEhHIB77IWL/JPWkboFVHtrbbrBuAzS0CkBDXz9CichB/GEqkFaSoAkxKwXfMbOgkyVxbx/w90PKMWPQb6gLIzDoS/T/mDZzsgBc2JIEwmJQ0nYNqtfCHIAwaq/anVjMgIRBqjzKN7XLOxOW42rWQtX1Mlh6Lq8wFZtJhm4tOFpl9f2IhX8552oiOaT/LLCxjuJyWOiYtqoBQO9po0K4/se3gai0zwx6i7Tigcy6u/XoXp5wyG08++Ryee+7f+Pjjpdi/fx/S6TTKyspx4okzMXv2HMyZc7pln6KiYtx993146KG/YdOmjfjss8+gKAqGDBmS4Sj5IxAI4B//+AeeeeYZvPzyy9i6dStkWcawYcP0WktlZWWO/Y466ii88MILeq2lrVu3YujQobj++utx5ZVX6tmE+yMKIjKJRALXXXednmgnV22G3hI8eeibkBUFkbiEsiLDZCvJChIpd1GaAf49amyLIRz0uRYRSCQlAD7N6WTbk0cwAcDmve2W/W6peI4vKOXA/7Z9DR3MSWaGC824oWwhnonOwLa0M1kUdSniMi+81rGtp0FCpWDxzh4Zyzd2DpjEa07Je9dlOKB5YYJqnidWF5K5O6VgMoFR+JBZ1xYtPFlLnOYLInj2D3k1a52VquTBYVUAfxLXCh5ajm2yyFjmrlkviIkDaEQCtnFcFlkqGCHDBLxopSI7I5Ds+5nf68OrizSlnHjYrTBajSb7ou0gElqOGdOxXe+rpmNrLikzwSAEYMx5T9bdgjYhr+W7oM2JwqhGbc62q+7vCJs2jwfnNbATLOI87+7ivvsehNS4I8O1cseCBa9mbKutHYAf/vDH+OEPf5z3eMceOw5/+tNfAAA1NSWO9nfffTfr/rfffrtrgUkAEEUR8+fPx/z58/OeDwAMHDgw45iPP95/s4oXRGTuvfdeLFmyBOFwGF//+tcxadIkVFZW6vHuHr7cSKYU1DVFUBQsh6j6c5ra4zmJDGO8HlJaUhBNSLr1xUyU2yIJMH9Idy1pUBQGCAEoSi6yBJTQBGpIFy4Mr8afO8/F90vewfrUMFxSxM2po8RG7JGseRaOEhtwTmxNvpegZ5EtzX4h0MJ1QeAbfzbEo05E8oOH3PvxAyNwxtWwZIx16Cmgb6fFFSBUhNLVDKuiSLXkiCLfQXc58ARoQmkNWDLK86DoZMh8DKuriZZUQelqBg2XqYU2VcJDKC8oCALiC3BtiKJAbt8PXSxqsv6R4koQOa2Xi7DobUxaFkIE8ErMJheM2VqiWzBU0hlrNfqYF3kQkGAxSCKiNmeL/oERWSX4IJQPhNLRAJ0suD082sgZsVhHjHmTogoQfwgs3slJB1NQURJEQDQ6624YbRPT3pgJldolXKa6owho+UBO+polENEP4g9BiZvFo87vEA0VG2TFfjkYg69iIEiHm6uW64QEvRyBhy87CiIyb7zxBiileOihhzBlypTempOHfgx7SheNkGTSvQBET36nKNoTo9MmYx3XaJFkBYyKeVdunV+0FJVCFOU0hrG+eoz11ZtmQyDYrC+XFS1DjeKi7TgUOIgn0sRRpyG4430AgH/GpdCEtUQQgbDT7AzAokkh/mLVGsLdM3aLjFFTCCD+MF+8EhFLH2HY8db9iElwy8ALEfrDXDvjdq5m9wghIIFioKsZtLgKSqzN2k+LShJEgwiAW4FA4tYFmQq8QGVXszE/StVsv8SYpyACimT9IhpfT3083XpjnbyFBBo6D7tLzPaa8MrTSqQZ4tjZ1kKJPj+QSrqQSps1w+5OIqbzDhRxK59K1oJ+wTaOi/jaIuI2aU9Ev5pYECC+IAhjIES1IAbCQKIzw0mq7wLFxnFdfr40WATSleG3R4gqkPbgocCopaamJgwfPtwjMR4ygjHmIBW5OQaz7Guul2TtouphzERG4sTDXnpAq8dkP4om5nULs2YgEIiVyBQLhzEk8SAsMtKAcRBHTQPALRmwhL8SiMec4tzJvBhrlgltIXbTK1gS2vFmWj4QAOCbehGIoLkY+WJIKIWeY8SyvmmEwOYSocY2Yt5u7Gj6x+CYn3WyLmTJzdJk7mf/jhBYb5lux3MJ2842Jxd3DqEifGPngIimqBpirUvkHMc8nnYOLq4kx7Htn6nLMXKeTrZj2MY2W7dcj5vXAT14sKAgi0xNTY0eH+/BgwZFYWpVaV42ICUpEEUKqpqiM5UF0AmL+l5WGEQlxckK5SZuhTEkU6qmQpZhpTFAOpWEHFCwdJeVgNxT+Tg2pIbajkjgU8W8/1P+kut8vl30oWVbCImc599ryIPIkOIqsIhbtk4CccQUiCOnqouEWScC+MacAlo9Eqllpqq9guFa4hYKm0vF/tTv0M0Q0FApgmddC/jCgJSEmeQYESk2Ua3ZFWJZw9y0LbB21OaguXqcl8E+qLODRYxLrblQLBYY+1j2SVvbsuah0UiLnTho5NGek8fUn0uK7J+F/Tj2abpcA2KMZ1y/3GQpu3bF2kZLqqAcABAssozvOkYv8BdPp/nlQEGPfHPnzsX27dv7dQZADz2Ppo446poj/A1jaO6Io73LSASVMcGd1q7+VRiDEG2EL1IPmEoSHGiNAmAgqajWUd+XRBqN7TaM91vrhgwU2uFDZi3NzOA2VAh9qPq0mE/YqPuNurTYlMEWBNQfNi2QMBZ/80iWMFdTAjnXhdBMAPSd+Hvq0xdkPbcM9UEvumjW1OkLtDP0mxZXwlg4+Vg0VKL303OzOMZwuUT6P9NCrZ0XISCiHzRcCqrmlaHFFYA5uzCgthfr49BwKa/NZDkQh1BaY9pkbCeiz3CJEIAUlXMXm30Manz2vLo0QIoqQYoqQMOlICG7eDQLsQHhVjnLJvWCCCKEslrQ0lr9WgAwXWfT8ILPOC/T3Bxjm44hHjMH/pnzIVSPhKXAZMh67eykrxCy5MFDQUTmRz/6EUaOHInrr7/eIzMedCgKc9HFGG9YFiaj6WOY/pqBMUV9z4yxGMArA1tdRpLEdBsNgYIRQlPGeX67+CMILvlfDjWEYRPz6kfE7ls//Zr2QbWEkKJyaO4hLScJrRgMYegEHiYNWPO2GLMw6WGcFgLHmkIpT/VvyzbLE8gBJFDEF02LRcJp2QERQMPlDusCLTEVyxNE7QRh1XeYxrG8ps5txNiXFlcZielCZRaiAwC0uIone9PmUlxlyShrvjZWcqLZEdXtPkPDQ8PlNoGtel1MlY25ToYLmWm4jM+j2EQe7O4x/VyN9xpBc8wzXAYSKNKPoZ+b+TprZM8XcJwXEf362K52KUGAUDMS9mtEiytdCW3PwiM8XxYU5Fp65JFHcMopp+DJJ5/E3LlzMXv2bAwfPjxj9U5CCH74wx/2yEQ99A/YaYJGRDLdp1xkvfp+muFF68O5DB9Qs/hLskGS5obWYV5oHe7qmNcDZ9J7IC6LijjmFEeSMWHQsTxiRU38RWtGgcU6wKKmui85H1wz5eoACBXhn3Ihkh/9CyzeAYfQUycrmTL2EuciqllpbJE7lr+ursbMrowMszf+6mTI7bnMTpLszWark/G++y4JFwuVS5PDUuMgI6Z2S5h2tmMWMGftB5Szk/Yq0+eT3+bsB+3G/D14MKEgInPfffdBCwGUJAlvvfWWaz+tj0dkjmxIsqFLYfo/ZqMlhgVFlhkkWYEoUItuxl6KgFnaGIia7ZQpTH/Q1IJM0grD2r1cxzJE4JEsZTTeQ2fYS1AtH6S0FkjFwRJdFjcSrRgK3/Sv8qfd6pFQmncive5NAID/pG8g+e7fch5CrxituXf0BpP1gqmfX4Cn3zcKKhr9CIipMjOs48C+wGlkQluUTaQGNjZrWe9N0mu7y8tMpjKcqUWYbJmiNketzXYOgHptMiT6y4jM7fYQZet4bpYT+7jG+VvJVKZjGiRH66/ZJ7MTFQoQe34k7RjG8ZmuP+oGydB4i+4aJO4cFsjy+bp0zXMu7uTQw5GIgojMj370o96ah4d+hnhSQkObSU+S6QalWlYIA1KSjH1NEYwcWIr9TVF+fzS5oQxvlBG2pFtjVP+SJCkAY6CEp+aSFYZ3N8fth+zTIKIfvglzIQwYg+TypwAAtGKY3u6bcA6oLwjGFC48NYcTW+oP8W05jsYXEsFnXVAJ0RcZ33GnQa4YClox2DSmraigY80lxkKnbQoWA+YQbPvfDPMm4TKwqJThYZ3YuwMAzxYs+KDEO8DSSRBfCEyyFugjoRLuMpGSIGIAxB8AELTVEOLnoYcCC4Lx2jnVbsL9/B06FADUH+TuLdGnJucDIPpAfBncjKJf1wqRQBFYIsrPyed3CQlXjxwqBVJxx7yI6AOCJTrBJYHwwSdk1Dks/w5Sm2tKR4a5ug7pC9i0SRn6FVV0j4B56HfwiIyHbkE26WIs9wpb1LNiEu0qpjI6kqJAAQNV73R2HQ1jxr5alWsoCiDFQKQkREKRBoUkM3TEnZl3DzdozVFQ2vfzhGG2+ksMBOKgsXzB0i6IqUK3fdEijuKJpuNUDoPc5aILIlyroleCdhAgrR8BEYMQBx8HptWusVs4cgkvtW7+EF+QVFExYYLVQpPBwkCLKqDEOsDrNmltzNrN9sROiyoAAEKwmGdw1YommvtoOhKJqEUWB8MJlcyo+VoIFSFUDLa253SrGASRBMJg6YR1O4Fh1TBdA6sORe1aWgNCRct3wDknWNtqRurv5XQCUCQIFeZU+DbCoudfsW4XKq1RfqSoAoh3IlcCP8s+jnfq5yj4IFRmTs9PS6rVhIrOURx9TcUys/bLlC/JwxEHLyWvh27DWZ3aXSMjK5ruhTkbTa4opm7TE+gxYx+NH9V89giqNzyF+aElAADZNKZbbpjDBRIoQnDO9xziSN6oumCyCYfMET/ZjuMLghRVZj6GPeu2YziTtSWTDibbwuJw52g1mYiaUTjrwZ2HcnSltveZdu5+cw4B10Ei0/XLd1tPHL+Q7W4o7EJYQ8o9i4iH3odHZDzoyJYd173NREKYOcza2lcnIypB0d4rimk/ZrQTdYMCgCky/E1foGTza6hvN5LTTfDtAQC8u/kw5nnJhQxEhOi1i6jh5jDf8LX6QoBq3dCsKS6fgaZfcRyEQCsRYNoIi2XEHDHkWHPsrMJ2jIzkB4blQTDrZUw7ZRWvEsOCYUEv3apyaDPy0VnoWpxMhEjXmXRnggXC5dK6zatg/UjG75h1TGe3wk/ay/3ioVBkdC299NJLAIDi4mKcddZZlm2F4KKLLurOvDwcYkQTaTS1xzFyII+o2dcUQdAnoLqcm9x3N3ShqjSIkjDPNppRA+hCeBQ1jFoBQBnD/uaovt3cR+yqB6hhfRGSXRCi7Shb9Q8QRcL7rVGcYFt3WqNGFewJat6YPmOXIQS+ifOgNO8CAsUgog9K+wEIQ47XL55/2tcg71gBYg6nZYy7aOQ0nC4ZA8JR0yGMmgJh8HGQ9q4DYQqkHSu0g6uh0G4hyeqYTKufQ7n1hGkEx5y4TgH1B6Gk7ALqTE/d6kqqWmYYkUADIRB/CERWiajoB6FZiIz5nS/Iw5MzVOwGACIIqiumQOuD6j4jQadWBeBWNZbluEZHAiIG+PdO8NmabAUssw5DchKr/GAchwSL1XpI7sezZA/ONFqwRL0ObhbVDPscROoAEizuoevg4cuCjETmF7/4BQghGDVqlE5ktG2FwCMy/QNpSXG8tyxPzNlHE+va78+KuZvuHmIA4zf1eFKGqBaqU1QrjCwzvtBRvhPfRbYMJma5t433Gcnv+sQtkAAgFLSoHLTsRAAUkFIQBowBoSKYohbbKyoDGX2i9cZNCHgytjRACaiq7RCGTrAQE9+xp/LuxSH4xs4CoaKJyPBJuLqn7AJatUIzURObiSOnGH0YQMoGAk07+Q5UgDDoWD52sAgs2u5y7qqFIlgEFusALeNlCwTVzUZEv5ogzX7BnK8zaUPMEKqGG28yRfdmFBwznnDOBTRcBiaIkDvsObNs2pLqEfxvzUi1AKbWjYAUlauZlzWCl/n+ada6dB/WH6SmJer2sQjXGinRtryeELQHIYNYFvJYweftph3y4CEbMhKZiy66CIQQ1NTUOLZ5OPJgt+YzPWwyG/hN0/5gprjkhdFe874aiVGrVzNAVhS13ZQynQGMKfrEfIRlvC+aNx9qrUzkqDNRvOMdlxbNWqG5kgBdCAvB2seyjmuWEr4vCRUjdOGvuYjUJhw2Im/sWhjteC60jhL1gmkuEQoGBcRfhPAlv+eRL9oYMLshGEIX/sY0hwwLsznjb7duF6Sw9e9gkJeLpcCTsGhEMu3fz+6jBOjeh0L63an2JFas+BjXX8+DZG699VZ87WtfK3iMM844A3V1ddi8eXPe+1xxxRVYsWIF3nnnHQwdaoi4x44d6+jr8/lQW1uLmTNn4pprrsGwYcMcffo6MhKZ22+/Pa9tvYV4PI4HHngACxcuRENDAyorK3H66afjuuuuQ2Wli7gxT3z66ae47LLLwBjD/Pnz8b//+789OOsjCSzLO/M2BmYmM0zN92Luo/IPxo0yKtEgRuZecEEw1Y6iMCTTMpZtbMOZR5ktMgz2CgMECi4Kr8YeyXDNkF7O3kuOOwvs87f1965H0/1upru4niGXgjva1DZK4cj1okHLHJshn4pezDGT5YVS2wQ1YmXW4VCV87hpYQwyo+WGYto4jgXbZf4FrWL8+Nqxeh+qRSYbYXeZR+EPc0TXBfH8PjmOeTDImTyvuzDlHSoAWXPHGJ30vkcaFi1aqL9+5ZVXukVkegMXX3yx/rqrqwsbN27EggUL8Oabb+Lpp5/GMccccxhnVzgKCr8+VIjH45g/fz42btyIYcOG4cwzz8S2bdvw9NNPY8mSJXjuuedQVZWhvkcWpNNpj7hkgcV2woC0JCMST6M4ZPj9D7REkUzL8AnUQXUa22I6KemKp+D3UUedJcYAsXMfUFINX0c9EC6HP94OxSQUVsDwyecNWLs7gmq/D8PVFntlagAYJrTitODnlm29YZERx86GtJkXlCS+oNUCZLoBy4ESCMkungOECvyvTmjUxZ9QQBSNPCzqP9/EeWCpqIm0EDUKyEhu53Cc6TleKCxXWhfyaqRJG49vZ+ZFiQpqF2OORAyCySnozFHw8SKQ5nMWMtSCIsSkgclvcdKcENq1pAH3bOHdwsEskILPXSxLs906rdYoQgiINo4ggjDWC99Q9Vj+EIiSuZ5YN0bk/xV8eeVuMYN/hgQs2ZWzr360TN+pfohEIoElS96H3+9HOBzGihUr0NDQgAEDBhzuqTmMEul0Gr/5zW/w0ksv4a677sLf/pY76WZfQlY5wWmnnYaf//zneOaZZ7B9+/ZDNSfcf//92LhxI8455xy8+eabuOeee/Daa6/hiiuuQF1dHW699dZujfvQQw9h27ZtuOSSS3p4xkcmFAZeeRrQ18hESuYkR1bUUGmjTVJzy0iSomphuAvCHJkEMBA5xYWsDKCpKBhjUBRFfwqPpxg+3cJzSqzYk9afiu3UCQASzHnjc+t3sBBGTgOpUE20DncNdbxmiswXO3PuEO01FSHUjOZ5OrTtIPAdeyrEoeON8VQrCy9pYBAeUlxp2g8wzPd2S4iTTNCSahB/UB9P14eYCBUA0Mohqj6Fv7fkACEEQs1IVZRpv1LcIsA1MIUTCKFmJISakbqupjB0g7Dk2IX4Ag49iVAzUs87k3tw41oJNSO5eNkf6iE9jBO0tDbvPCuFQKgaZtXb5EEOadlA0LIByP25qN+xvK9r/8CSJe8jHo9hxoxTMG/ePCiKgldfffVwT8sVPp9PzxO3cuXKwzybwpGVyNTX1+O1117Db3/7W3zlK1/BySefjOuuuw7/+te/8Pnnn2fbtdtIpVJ46qmn4PP5cNNNN0EUjSefG2+8EZWVlVi4cCEaGxsLGnfXrl144IEH8NWvfhVTpkzp6WkfGbBoZBiYwkz3KyOwWvciMduummcI1gazo8lsoVGgqBFNTNXKMBCmYNlOI1KkK2kcZ5Ky0TLd2YHP8evylx2ncUn4k3zOtjBQw+JhTy5n9aSoPymzJcYU3kxU0sGf8rUrYXYN2YiP6T0BT2wXmHMlgufdaOyrWT/s64VjsXHRbujkxTyGqdYQcds/17ZuoBfdCrr7zaWllw7IQUmvnpeHvo3Fi7lb6ZxzzsUFF1wAABmJjCRJ+Pvf/45zzjkHEyZMwJlnnol77rkHqVT2qLlnnnkGF1xwASZMmIDZs2fj97//Pbq68reAmaF5ORTFafkeO3YszjjjDMiyjAcffBBz587F+PHjceqpp+KPf/xjznn2NrK6lv74xz9i1apVWL16NbZv347W1lYsXrxYr7FUUlKCKVOmYNq0aZg+fTrGjx8PQciQQTRPrF69GpFIBDNnzkR1tVW97vf7cfrpp+P555/HkiVLCrKs3HTTTQiFQvj5z3+O995776Dm+GWARkwcZnXT2mveaLiFYCMvpjwy/I2mEtAtOgQMCmNZ0m3w/ScrGyxbLylyf3II07Rj24Lo9Iz98wEVRINR2YgMM1toqI3IEEMXoYt+zeDiFMPlBJiIjEsNICJwtw0VVNcP+Liq5iYw+7tg6aQ2sHU8x4HNbzMt9tmQgUA5LEMFjtdNZM7j0nvHzA6b9qlfwiNi3UFbWytWrvwExcXFOPnk2Rg8uBJDhgzBF198ga1bt2LMmDGW/tdffz0WLVqEoqIizJkzB7Is49FHH8Xnn3+eUS/2f//3f3j44Yfh9/sxc+ZMBINBvPrqq/j000/h9+cOq7djwwZ+fx01alTGPjfccAM++OADnHTSSRg1ahRWrVqFhx56CA0NDfjTn/5U8DF7ClmJzAUXXKAzyfb2dp3UrFq1Cp9//jk6Ozvx/vvv44MPPgAABINBTJ48WSc2kydPLviCasrscePGubYff/zxeP7557Fly5a8x3zxxRfx8ccf4w9/+AMqKipy7/ClhVWky8DQEU0ikZLAGNAZM+eSIJAVBlFd03WBr8LABAKmMMSSEiRFQbBrHwRSDjlYoSfEY6oricoSiMIgywyCuhD5BevNM2PKkQKwOnXUQREZHg5tEJlU5Wj4W1V3q2kBjY6cjdLNr0Mcc7LaBlgtK3bCYGrT8qyYq1Br7iVNJyNQQGY6QdKEwloKeVo2kOswCOFpYbJwCkKo6v6yNeoGGxO5AngtHlmyjWMdlAg+g/Cp8zikKKRmDyFgrGfnR3Qxt/o6U2mIfo8evG5HIFd6++3FkGUZc+acDr+f18O64IIL8Le//Q0vv/wyfvazn+l9X3vtNSxatAgjRozAE088gdraWgDA3r17cfnll6O+vt4x/qeffoqHH34YFRUVePLJJzF69GgAQFtbG77zne/gs88+y3uuXV1dWLduHW6++WYAwPe+9z3XfnV1dQgGg1i8eLEezbx371589atfxauvvorrrrsOw4cPd923t5H3r768vBxnnXWWnlMmHo/js88+w8qVK7Fq1SqsW7cO8Xgcy5cvx8cffwyAW1DWrl1b0IQOHDgAABg40N1HrgmltH650Nraittvvx1TpkzpM4rxfgE9Gy8QV3Uyhs6Fg9db4poISeFERHM9STIveKhl7CXJCBCsgCRz0qLlpFGYAoFXVQJj/KZPeyHqKO6ipcmFdiWMcmrUH6JlgyG3HwACYUTHnAv/J38FADAxyPsMnQCldBBC5/2c7+IPmqwjyGwZ0QiHlNLdULRyCFgqDqWjngtFKwZBbtwJw/qijkcF0KphkBt3WMYnReVAKmocQ/1rlpnSquE8y3CnOqbq+tJIEREDlnwvtGIwkE5Cbq/LeM1Iaa1xtFAZSLA0Y9+eBq0anp/1SetfPhBy2/6enYQ/rOtfaNWwzDWuviRYuv4APlxdDxAKItrz8XCwVBwQ2x0Cap9fQDrVk8LlzJg1cRBOmdBz2iLDrTRP33bhhRfib3/7G1577TXccMMNOsl/+umnAQA/+clPdBIDAMOGDcO1117rGqDy73//GwBw5ZVX6iQGACoqKnDjjTfiyiuvzDo/tzDsmpoa3HfffTj77LMz7veb3/zGkpJl2LBhuPDCC/HEE09g1apVfZ/I2BEKhTBz5kzMnDkTAPfxbdiwAa+++ioWLFiAZDLZLb9ZLBbTx3dDOBwGAESjUdd2O26//XZEIhH87ne/69Gnw6qqDFVcC0RNjXtW0UMNMeCDTKk+n/qOJAgB/D5BD5MmlECRFRBCIAoUVEwiFBRBCUE8KYFSAkWgCPoEFIV88IkUaUlBZ6cfRUE/WFkIaYkhIPshFwcgyQFQSkAUASTNIAoExQGKQCAFwD0baXewI10DBoo/tP8HvjumCQNLKYTtH+Xcb11qGOYEuYWwoqII6ZPOxf6aY1A1eAgSrUam23BlDarmXQPJVwQ5JqO8gn9HaSAM2ZcGEX1gap4cIvhAA2EEakogRSRIvgSUJAX1B0CoAEksBqEUodoyKAkfUrQL/uoSUF8AiVQYxOcHkyVOEqU0iOhHcEA54qkQCKVgsgwiiPCVlyLJ2kHVxGTBmhLEEyH4q4qhJAjkBBAcwIW+iXSYC68DIYAQyHEFoQzfSyUZRwrFCKrtCSmMQE3pQf+2kkoRmCzp4/YkEnX1qK4uBvUHLduVtB8ptPfKMY8EMCmNpBJ2vT7xeBj+6hIIwSKXPQ2UlrZBFAVu/fO7kzqFURBRAHGRJfgy7NPTKC0N9ti9eOfOnfj8802ora3F3Lmng6ru5tGjR2PcuHHYtGkTVq5ciRNPPBHpdBpr166FIAg455xzHGOdf/75rkRm9erVAIB58+Y52mbNmoXy8nK0t7dnnKM5/DqZTGLPnj3YsGEDbrvtNgwcOBATJkxw7OPz+XDSSSc5to8cORIA0NTkUrz2EOGgw6937dqFlStXYvXq1Vi5ciX279+v+/R8vsMbSrd8+XK8/PLLuPLKK3s8Lr6lJcIFqgeBmpoSNDV1T5jV0+iIJNHWkUBTmH9mbe1REBD4fYIq/iKglEcmURAIAkFXLI1EXAAlBImUBEIJIrE0EiJBKiFCFChkhSHWmYCcViALcUiyglBXEpKcBIskAEIgMglRiUCgBEgBUjo/EiMzAiEP683DkdMAABGhFIHB5YiIAsqQm8gIYPDPvAxK8y60tcfAUimguAodHXFEIiloTspILIWishqkoil0RdNob48DYCA+BiWRACFpIweHIIH4AYF2QYlFoETigJQARBmEClBivHZUpCkClopC6YhDQBfgS0NujwGizBPSUQrIMiBIEP1dkNrj3AcnK4BAQeUIlI447w+i96GIAMkYlGQMoo9/96Q2Pl/q59dSScQQCbh/L1kqDrk9BtFv7CuIXQdNZOS2KJgs63PqSZQAaG6JgIhW7RSTUpDb4r1yzCMBTE5Dbou5Xh+pPQaBdoEEsueWmTCiAseFarl1L0P1a6lhG2hJNWi43LL9UN8fe+pY//73AgDA6aefhZYW/sCtkaQLLrgAmzZtwiuvvIITTzwR7e3tSKfTGDRokCWwRUNxcTFKS0vR2dlp2d7Y2AhKacZQ7kGDBmUlMm454ZYvX46rrroK//mf/4m33noL5eXllvbq6mpXDWxRESezh1PwWzCR+eKLL3R30urVq9HS0gKAuw9KSkowa9YsTJ06FVOnTsWkSZMKnpBmcYnH7bVdODSLjXbxMiGZTOKmm27CoEGD9LAyD7nREUki6BfVfG7cUaRAzVKiZbUj0MW6WritwgBqcjUxAIqchthxAIBiFQAzxhdvMChMl/7qa32+a6IMCsGeIc+G3VIVuphh3SOa3oRQkBwJvigU0PJBoMVVJmmLpmkxaUcoBQEBVRPLmVXL1kR3NrGvliNGz4RLYM4HQ8wRTKb5OxPS6a02vYvqIrILTim1EA8iiKa8N0r+HwCyCGwLBRVyfh69gSMxCdshQU9etiPsI1i8+A0AwPLlS7FpE4+09Pk4AdA8CYsWLeqRnGY9+f2dOXMmzjrrLLz55pt46aWX8N3vftfSTh112/oOshIZSZKwfv163eLy6aefIhKJ6BaXAQMGYN68eTpxGTt27EFf2EGDuJ/STeAEAA0NDZZ+mbBjxw7s3r0bgwYNwve//31Lm2YCe+utt7B161YMHz4cf/jDHw5q3v0e6ufWFkkiHFR0rQtjqpCXQi8fwAh0VsLfq0/yely2uo6nIgAU+EQCBWreGZXRUCkBSQ9RYigN+5FWGAApb42MzChAshOZmGIk8SLg1Xkry4Jom/afiKdk1K59JOO+gpb91izWpYIjwKeylLtkRL+AqrIQb2OakFQjPcwhRCXBErB4J8BM50CpSibA87R0mtIMEAJaWsN1Oup7a6STm5gYoJVDTX0AUlwFEjZE77RisGrl8YF1FpbWgFYM7pGbKc8b00tp4jIFM4l+fu4esiDTZ9vDTOaQZHLufaxfvxb793MN2Z49uwHsdu3X2dmJ9957D2eccQZ8Ph+ampogSZLDKhOJRBzWGIDrWerq6lBfX28pQaAhXw2pHUOGcKvZ7t3u8+6ryEpkpk2bhmTSyOY5evRonbhMmzZNP+mehCZC2rRpk2v7xo2c4ebrKjpw4EDGD7WxsRGNjY2uX5QvK3SrC8yJ7DQrizUPjE50AMuNyMgzoyayU9OUG7lm+BvCFCiEgjAGQgE/FcDSEoQ875FyHuUhY8weNUchCiLAkharSttJP0DFJw9YelaFNSGuQRCMHHTGJAVRgEZ0fKbKloQATHuK0StLA7rFRc34aq6wzCtGu93U1f1zZT61WXwA1eJi6UJ59JPeTdCja5jpUPkgn+rJeY1zmESxPTV/DweDI8cks2gRt8Z873v/D1deaTxAm/U3L7zwAn75y1/i1Vdfxdy5czFx4kSsXr0ab7/9Ns4991zLeG+88YbrcaZOnYq6ujosWrTIIexdtmxZVrdSNuzbx4vvZtKo9lVkXQkSCe6vHzZsGG666SY88sgjuPnmm/Ef//EfvUJiAP4BFRcXY9WqVbrbSkMqlcJ7770HSinmzJmTdZzjjjsOmzdvdv132223AQDmz5+PzZs34+WXX+6Vc+lvsJQTML1g4HEyWki2RkaYbmFR+5jJjIUEQc8ZY+Ix3JekdozEZcRTaki27b4mMfevaS4iE0UIr8dPwOwR6gYCriOhxHHvlMucTzVDy1QrjCk5HLGHKgMGYbC5dDhb02olZZqrOSme+Xj2sUyvif21HeYoqe4sErn26W8LT3+bb99B5tw8PWxFOQIsMpIk4b33eI61M890Cnc1nH322fD7/fjggw/Q2dmJb37zmwCAu+++2yKYraurw1//+lfXMbR9HnroIezYsUPf3t7ejjvuuKNb81+2bBneeYcXv821vvY1ZLXITJ8+HevXr8eePXvwu9/9Dr/73e8wdOhQTJs2TbfKaIrlnoLf78dll12GBx98EL/73e9w11136ea2O+64A62trfjKV75iCVO788478dZbb+Hyyy/H5Zdf3qPzORLRGUuhNJzhSZQBsaRkIRyae8m4d6lVqmElLJrCgbuVoiBSghMZpoBppEORuGUHDIRp1IjhnvcioAT47zNCsNdK6lBCqBKcUWpKjhwgr4nnokUpQXGA+8LG1Wpkg6qpWFxIggnpsmF6ODJRCQmBoGpb7MnqtPe2G7Kl1pHbcUyVq1VXkbnopWWOxLyPW0UpToKIrtGxEy7HDk7k0ae/6UpItwndlx09dc2yj9P/kwZyfPzxUnR0dGD06KMxcuSojP1KSkowe/ZsvPPOO3jjjTfwjW98A2+99RbeeustnHvuuZg5cyZkWcbHH3+M6dOngxCC/futaQKmTZuG73znO3jsscdw0UUXYebMmQgEAvj4448xePBgTJ48OWsumV/84hf662Qyib1792L9+vUAOEmaMWPGwV2MQ4ysRObxxx/Xw6pXrVqFlStXYs2aNXjxxRfx0ksvAQAqKyt1jcy0adNw3HHHHbQo6Nprr8XSpUuxaNEizJs3D+PHj8e2bduwZcsWDBkyBL/61a8s/ZuamrBz5060tbUd1HG/DGCMoaUjkYHIqFYTRnQri+Em4n8lSYFgdkPAZMkxRXEJ8RbLjrwGkwxfvFUvWMjAAIVBSiUACLy2U1pW6zsRnBbYhEn+3VAyWF4ybTfOht9A/QJw/elFKCIJ3eLBGEDMUXXmxTlcAXnKN5FM+wyrhugH0glUloeQTEioKAkZthRKVV0MYL5p07KBPBdMV6Npu028W1LFi/wpMiD4IDftBC0bqJMFWjYA8AWNfdX5kEAYLNHl8iRLQMtqcXDIsvD4gmr9nP4DX+UgkM5Dk4/kSAIRxMzfJWKI9A8WtHwgIPR/F5/mVjrjjMx5WDScf/75eOedd/DKK6/gm9/8Ju6++27885//xIIFC/D++++jtrYWl19+Of7rv/7L4W7S8Mtf/hKjRo3Ck08+iaVLl6K8vBzz5s3DDTfcgB/+8IdZj//iiy/qrymlKCsrw6xZs3DJJZe4hnT3deSMWhJFEZMnT8bkyZNx1VVXgTGGLVu2YNWqVXrk0uLFi7F48WIQQhAOhzF58mSd3LjFnedCKBTCE088gQceeAALFy7EW2+9hYqKCnzrW9/Cj3/8Y1RWVnbrZD1wcEsKc32yVhgDMVliNOEuCIPC7HWUjNIEjPF2jdRwrqJw4S5jYETlOWptJaYoXNQqAPetMAjJnz9KQVb4vC4uWgUAaJDdk6pVCZGs59lJS6EleisKCKCSoWVhhOqZhAFua4kefQ6KtvHvMQJFfH6alcMXBEt0IRgQkUzKCAYEaHF1XN+hWmWYug/AE9mFfFC6mpBJyEqoCKiJwDQtEgkaOYosRfTM5nxfEEh0wY10EH8YLBVzWmR6QFRJCAH6WWE/GggD8EKsuwPiD2dq6TF3EPEFc3fqB/j9750hzZlw/vnn4/zzz9ff+3w+XHPNNbjmmmscfd99913XMQghuPTSS3HppZc62h5//HHXfbTM+YUi235f/epX8dWvfrVb4/YUCg6/JoRg7NixGDt2LObPnw+ApynWiM2qVauwdOlSLFu2DISQjKLdXAiHw7jhhhtwww035Ox7++23u8bFZ0JfuPCHC4bg1r2NJ78juuiX6foXoxOzchndpaSYxgFUokKtBySMcQuEWl8JDEjJxoIr92AErqFZoQAlYJSAEqKnkbeGRgNyuNp1HIslxK3MgDms2vXaEkB3F2V+hs3psiGa2cfFukNsL5jWL7v7LMOB8uznwUNu9DdXpIf+hx5xTg4YMADDhg3DsGHDMHToUAQCAXVB7P8CriMOmsYlQ7OiaASD8cIBproEhhiYGSlkTG2QJUAxko6pI2gHBUlHeZivef88Qm4LMWD/uPUK441Z8woTESEUjBC9vqMcKOVtdpWxoop1LbyBOtd5LSrILAy2wBggf7LiBpr1KVjLkWM+buGLSHf28fDlQw+LfT14OAh0K7NvNBrFmjVr9Pwy69ev17P6aeTF7/dj4sSJPTdTDz0GUx47BxQGUJNrSVH/EcBinZFkmbuSYPQNxBoRDhC0sYFQwEAURXW38GP54i1WgbAeD2XFWcH1aFaMcMXC7pfGSflEAYAMnouYJ60rL+F5ZXhBPwrfad9HW0cKpUUB+DTdkNnCQnhfEigCupp1wS4tNVlvdEGvOcLJmIdQWgO5k+c/omW14AUoc07fAVpcAYh+0JIqYz5m0kMISLhMPfkQaLAIJFhY2nVSVMHH9uAhC/h3MJPbyda3tEZ3n3rw0BvI69vV1tamlyBYtWoVNm/eDFnWCgnyG2koFMIJJ5ygV76eNGlSt0qJe+hdGLlgnEzGcCFpcUWmNs0ao37emqvJXEhSYQoCok8fjPdXeDg146SFEUHVkRgB3WYIkHFBeI1tXt2zEAyp8GPN3hSKg1S3lgT8Ik86p8UvldaARVtRVhyEUlEL6ajpoMMmQtJyxmikRs9zwsehwRKEzvs50ts/UbcbFasJobqgGYCa2I4TmcyaA713RoOMlsKdhAzNkGE9Ueel9aEUpLRw0S8R/VzY7MFDFtCQu27NDR4x9tDbyEpk/vd//xerV6+2xKlrxKWsrAxTpkzRicvxxx/vWofBQ19EZhOHotg1MsZfPSUMTEE6KsMxvETqe8bDq/l6zkkRlRmYaBL76jsauCS8wjEnnmE3f3QqQRSHfDh/YgnKhQRGVYvcbWTWtxACormJtE2Uwjf+bDApBZI2LDI2H5Uxr9qjVIJj1644+5quWA7k2880L8frfPp78ODBw5GBrETm2Wef1V9XV1frpGXatGmuZcA99H1o4lzGgHhSQsAnQGEMaUm2al70/iYNjD6IVdir2PgIY4op2kmrs8QASIDCfev/XB7HoBLgvOOMPf1I4+TgVsecQ6SwYmT/0/51/GE6RZFAcXSNAFkV94IIfC2nKnOhgKEJ0SwvmvVGs7BQwCYKtvIHsyWGgJktJA7kSTZ6SXvgaV88ePBwJCIrkbnoooswdepUTJ8+vccT33k4jGBAWlJQ1xRFeYkfkXjalNTOsMIA0EOuqSlqKZmS4PMJljBuWVIQUtPzB2K8Xg+3vBjuI5qMoaJpFQQ5gQkYjGXNx+ACOYkwSWFO8Au0yO56jnyITNfos7ke5EPAbEFhQoD/DZSApJN6YjtukTEJW3VSIwJI6UPQQBGYbD++NVqIhkrAEhH9mABAi7uXIoAGwnm7diw6nTxAAkXWcG4PHjx4OAKQlcgUEtLsoT+BqUJeBlmxuo0sKX2hWmRMXhPNAiMqGtFhoIwhHBRQ7vMDsgSSjoOyBCTq56JfcFlMWctalLRvAQB8s2gPNqaGgMoCvh5egSmBXRln68vDtaQEyyFUDQPQzOepEgqmCmuVQAnEaIvuXtL0L7RsEFDfqhMQ4guoOVj4T0NL/sYUWRf+2l06tKQaKOGkQmrczgW5qlalUBSSbI4WKOSlpTWFTseDBw8e+jyOjNzQHgqCUWaA6YoMpupgzBJcTlq02CLDWqO9B3iqGAbASNamYMi2BRi68VGAqZFL+oGthOTmiudRuX8ZAiSNgwa1uYBgEA6iZfPl76Bu5Bl5zbHV5irXcIukJlbNjCsytPW6W8cLh/XgwcOXEx6ROYKhMGb5yxhTxbzQs/DyBk0LwyzCXs3dpAt5TeukLgIGIOsqYP43kOBWkaKOHajZ9jInNGBwK5wYju7D8f66gz5XRnyu2hUtb4xGZgxxLmDWtziEs1rZAQu0fn2PMHj6Fw8ePHxZ4RGZIxQd0RT2NPC07HsaupBMydjbGMH+ligAhuaOuCnAmumWGPsarajKYLOVRsv4y0kRZ0ViskMbSkf1vvfgT7SiOFYHQSTdDqPOhGTpMOMNFWD+OodDRh0lQqmRFE9NGhcKiAgHtbwxsP419jS9zCbiPfwgoRIjh4wHDx48fIngEZkjFGnJKJLHGCArhh5GsVlkzDWV7C4lfQyYrDPgBMfvo3obUTP2Ws023JVUtn0RyotEhIKmIo09gPiACVC0RFtUNBVXBIpCGkkhkMqGoTjo1zP3CjUjUVESRDgcNIl8AYBySkfNlhptGGrfVBB622JCi6u6rcvx4MGDh/4Mj8gcwbDkfSE2mqE16gusKczaljPGDQrUxdniorISGfPSXb70LwjsX9Ptc1mRPMq5kfpBNLcZFawWH71cgC0HjNmwonc3vWFwdYFp/Yi9zTVfTCb0TWuOBw8ePPRneETmywSTroXZLDLWQpBGsUg9aolZw7KhmITCpkMQlon6HOzUnSSA+UxhytSejFGzoBCDz1gijsxlCMzFIE1aHtshSR7uJU+r4sGDBw+HFh6ROQKRTMs8N4y2gQENbTH+kmmuJgWMMcQSaZipiBZerbgQEvMmhTH4fRSBrr0gUsK096EjMoSKaB5zAdKDJwPUB38whHkzhuOSE6v0KCbmL4LVKqNGJIkBEH9Yr7nEmwnfJmguMOsxtdoyNFuNGdHvmquF+ItAg16qdg8evixobW3B3/52H77znW9hypQpmDx5MubOnYubb74Zu3fv7taYn3zyCcaOHYtf/OIXBz2/F154AWPHjsW999570GN1B2eccUaPJdYtiMiceeaZ+OlPf5pX3+uvvx5nnXVWtybl4eCQlhRbJC7TQ64BXoZAkk2WGb2XIeaF+tfcrsDUEUA46OMGDD1hHOstHuMKJoYhFdVCGnUKQAnC5RW4aNYoHDeiEoSKoKESKEVVOh9RgoYYVqgcAhouBxF8kCtGghACoWYkaFGFmo/GSZxoaS0Cg0Zbc73YugkVg0GLKlz2rQHtRu0jDx489D8sW/YRvvWtr+KJJx5Fe3sbZs6ciTlz5kCWZTz55JM4//zz8dxzzx3uaR4xKKgkaV1dHQYOHJhX36amJtTVHXxYrYfuwRyBxBx/GRS1npLJc2RxPenjMF57iQBGGWzbWIc6Grl5wuVgRIAoiCAyVI+QYW1hBNYij45XHjx48NA7WL9+LX75yxvAGMMPf/gTfPObl2HAAOMh6tVXX8Wvf/1r/OY3v0FxcTHmzZuX99gTJ07EwoULUVJSWDJMN5x99tmYNGkSKiqcD179Db1WWz2VSnlFJPsUDDGvzBiIHrbEdNZjaHZNriaT6FdhDFRPLGcb2c6Wemn+AEAEH5iqcSGEwUhUp/fg9ZSY/s7aZkMmWYund/HgwUMhUBQFt976O8iyjO9//0e49NLLHX0uuOACUEpx/fXX47e//S1mz56N4uLivMYPhUIYPXp0j8y1pKSkRwhRX0CvaGRaW1uxbds2VFcXVgvGQw/CZJIxJ7gD1PpH5rpKNtONosgIRurUYQwCoajWl2BkHwKRfUY7YxAEYjlmTyPsM1lWtGR1lIARqkdlUUErkCQCEABB1BP1+kT+3k5OMpEV6g+4CIjtUEmUBw8ePIC7lPbu3YPa2gGuJEbD+eefj8mTJ6O9vR0vvfSSvn3s2LE444wzkEqlcO+992Lu3LkYP368ronJppFpbm7Gr3/9a5x88smYNGkSLr74Yrz66qvYt28fxo4diyuuuMLSP5NG5oorrsDYsWOxb98+vP322/jGN76ByZMn48QTT8T111+P+vp6x7EbGhrw4IMPYv78+Zg1axbGjx+PWbNm4YYbbsD27dsLuYTdQlaLzIsvvogXX3zRsm3Lli349re/nXGfZDKJbdu2IRaLYe7cuT0zSw/dgJVQMBuxUVRLDLPpYMAAosj6+8rSALpiEsCAsmI/ojEJABAO8K9ORbEfLRIQCohAovfEvqOrBaBVnaLgA5EkAJRnDAbBwMoiiD6Ba398IQjVNSBUADo7IVCKsiI/SsID4bQnAcMHOJ9KimsGIVydnxvVgwcPHgDgk0+WAwBOP/1MiGJ2h8d5552Hzz77DB9++CEuv9wgPYqi4Nprr8Xq1atx0kknYezYsTndPy0tLfjWt76FvXv3YvDgwTjppJPQ3NyMG2+8EfPnz+/WuTz11FN49NFHMXXqVMyZMwfr1q3D66+/jo0bN+Lll19GMGjk7Xr33Xdx5513YvTo0Rg3bhyCwSB27NiB1157De+++y6eeuopHHfccd2aRz7IeqXr6uqwYsUK/T0hBF1dXZZtmTBjxgz87Gc/O/gZeugWGDO7TKxRSVpkkrnFnAxPD7NmhsWCAbpbiSkMlspFioySlY8iPnzm/2/vvuOjqtIGjv/uvTOTDiEhFGmGktCl2bEAKwgurmUVhUUsqy6iIupa0BXbi4gi8O4KsqtiQRQb7yJkQZGmiCIsG1YQAgoktFACqZNMuff9487czGQmyQQCJOT57geZ3HpmbpY8Oec5z0H1OE/J+1EqFKczVP9yAx7ftsD9mEGMr33WAteBdWUCqGF6ZRRFQYtoaEl6ZIQQpp07zUVx09Or/6HdrVs3ALZt2xa0/cCBA0RFRbF06VKaN49sEdlXX32VnJwcrr76al555RUcDrM0xbp167j77rtr8hYs8+fP54MPPqB3794AOJ1O7rjjDjZt2sTixYv5/e9/bx3bp08flixZQseOHYOu8X//9388/vjjTJ48mffff/+E2hGJKgOZ66+/ngsuuAAwf3MfM2YMaWlpPP3002GPVxSFqKgoWrduTVJSUu23VtRIYH6LfxDE6jPxJe6G1pMxfMcGBz/+g6w1mQL220uPYivYR+wvX6O4T00gE1TrTlXBW77cQPCaSUKIusKdtRb39jUndO5+uw2321PLLQrPnn459rRLT/o6BQXmUi2NGydWe6y/l+XYsWMh+yZMmBBxEFNUVMTixYux2+1MnDjRCmIALr74Yq655pqg4atIjRkzxgpiwMzP8QcyGzZsCApkKptGfd1117FgwQJ+/PFHCgsLT1lOTpWBTKtWrWjVqpX19fnnn096eroV3Ij6wd+Lglo+5UhHQSUwMCmnA1rgViOg9yWAWnwEndPDnJikobbu6Vs7yf/HlwRTMdnX/0oBmyblkoQQdUtl+XmKojBw4MCIr7N161ZcLhcXXHBB2ODn6quvPqFApn///iHbzj33XMCclVxRWVkZq1at4r///S/Hjx/H4/FYxxqGQXZ2ttULVdtqNGvpVHYNidoVNLRkgEc3sAf8H0cnTKZ3FTOPjDB5rYqrJCgG0pyhv1mcrA+KLmFU/HeAQW6fe2jdrDGJXp28Yy4axUdx9FjVv7G1aRYfNCwlhDh97GmXnnBPR0pKAocPF9Zyi06tRo3Madb5+cerPTYvz0z6q5j/kpycHNSrUh1/UNGyZcuw+yvbXp1wpVbi4syini6XK2j7tm3bGDt2LPv376/0esXFxSfUjkjU2q+qhYWFLFq0iDfffJN169bV1mXFCTDjlXBJt+XTrf2TjMqXIvAdETTUFDgNO3RKdsUhplMheP0kQFWw2TQUVUOzRwWtqWT9hhNwiqaqqOqpDGQkSBJCmDp2TANg+/afqz1269atAHTu3Dloe1RUVO037AREWn7CMAweeugh9u/fz6hRo/jnP//Jv//9b7Zt28b27dv57W9/ax13qtQokFm8eDHDhw/n448/Dtq+c+dOrrnmGh5//HGmTZvGnXfeyeOPP16rDRU1Y+XrBtWKCU7qpcLf5Ym/OkEBikGFTprA+dwGRi0HMzvczdlQlkpJ2+Df5BT/XGrVlx+jaqGLQwohxBly4YUXA7Bq1Qq8Xm+Vx2ZkZABw2WWXndQ9U1JSADNJOJxw06Vr06+//squXbvo0aMHzzzzDJ07dyYuLs4KhHJyck7p/aGGgcyyZcvYuXNnSI7M5MmTOXToEK1bt2bQoEHExMSwaNEiFi9eXKuNFZFxlnms0OJgXklw9V7M4EYhoLKvb6f1v8C4xNCxuQrQXPm+cwnphLG5a7fL8O2iK3m/+DI8Sans85pJ467G7XxrVCsoimbOWLISfc9UDowS8W8tQoiz3yWX9Kd167bk5h5kwYIPKj0uIyODTZs2kZiYyHXXXXdS9+zatSsOh4NNmzZx6NChkP3Lli07qetXJz/f/NkQbigqJyfH6nk6lWr0E2Dbtm0kJiZaCT9gFsJZt24dLVq0YNGiRfztb39j1qxZGIYRUoNGnB6FJW7fKwNnmQe3x0zJ9XjLg5RGcXZcHrNejH/dJQyIi7HTKN6Bzb+QoteFVpZPU0dZmDvpaJpCQkztFoguMcyu1ZgoG/u9TXgs71bKktOsFaodjZNp2jgGUNDjQtcvOl2hheKIQU2UWjNCCJOqqkyc+AyapvHGG3/j44/no+vBUyKWLFnCk08+CcCzzz4bcVXfysTHx/Pb3/4Wt9vNSy+9hNvttvZ9//33fPHFFyd1/eq0a9cORVH4/vvvyc7OtrYXFRXx1FNPBbXnVKnRT6C8vDzatWsXtO2HH37AMAyuueYaYmLMVX8vuugiWrZseVoiMVEJX3KuRzfQDVAxMBRzArZhgN2m4rBr6L4qv/7KMFE2lRjNRmFANGAYBnZNxZ/epQQMRCmGju0UdYiYS1zolOFbjVpRzJGl2EQchUcoU1SwR4PnzPWKKPbo6g8SQjQYPXv2YvLkV3nuuaf53/99jQ8+eI8+fXqjaRpbt24lOzsbu93OCy+8UKN1lqry6KOPsn79ejIyMsjMzKRXr14cOXKEH3/8kZEjRzJv3jzsdnut3Kui5ORkbrjhBj777DOuvfZaLrroIux2O+vXr6dRo0YMGjSIr7/++pTc269GP4LKysqsKVV+GzduRFGUkOGmlJQUCgvrV8b52cII+K/uNdAN3aoFUzGbxdCNSvYZoXkxITcKSZ6pXYFF7vxfWgm9AXVkrCOEEOLMu/TSy/jww88YNWoMjRs3Zu3ataxatQpVVRk5ciRLlizh5ptvrrX7JScns2DBAm688UacTifLly8nPz+fl156iWHDhgGQmJhYa/er6Pnnn+fRRx+lZcuWrF27lszMTIYMGcKCBQto1KjRKbuvX416ZJo1a0ZOTg5Op9Pqffnmm29QVZW+ffsGHet0Ok/LGxDldN3A5TETzAwDDMVcH0nXDTQ1IAG4wvpJircUXYvCCgZCYoLA4niByb0GGFUntFXlmDeWJlqJ2Q7VjqqXd0E6tDDNUJSAKrxqUBBTsbKvEEKcScnJTRk79gHGjn2AlJTICsFt3769yv0XXnhhpcc0bdqUyZMnh2z/+9//DoTOjrrhhhu44YYbQo6vqsxK69atw97fZrNx9913h60iPGXKFKZMmRKyfcWKFZXep6Zq1CPTr18/SktLeeGFF8jKyuKvf/0r+/fvp0+fPkHjfC6Xi927d9OsWWj+gjh1jheVcTCvpHxSEb51lfTgjpNGceU1CnTDwF5yBMVdElQKODAWMHxDvFF2jcR4BwlxDuw2Dd2r4/V48XhrVhbvjcJBPH7slgpby+9474Uq95xPQMVe/xEKSrRZx0Br3IyoxCQUBfSE5igx5j8UNk0hqZEM9wghGpYtW7aEbPvxxx+ZM2cONput1oax6qIa9cjce++9fPXVV0GLSaqqytixY4OO+/bbb3G73UHljcXpUR6LGPgXhay4L9pevqqzGeQYGIaBrocfoFF8kZGiKMRG29FKVCYtyqOtdoRCI5ooxcOTjSNv48/uViHbjIBulIQoG4bhCe5a8Q0jqdFmwKxExWGtpmSPRo2P8x2m0Cg28mJSQghxNrjpppto2bIlHTp0ICYmhuzsbCtP9ZFHHqFNmzZnuIWnTo0CmQ4dOvDee+/x+uuvs2fPHlq2bMntt9/OJZdcEnTcF198QUJCwknPjxc1V14LpkLNGF+pXwOCohXdd6C1ArZVkqXCEFIYjzQ26yBMyR8eUdsyXW1Jt5dXfgwaAgr8QlVQvMELRVqjXjJuJIQQIe69917WrFlDZmYmRUVFxMfH079/f0aNGlWjJQ/qoxrPm+3RowdvvPFGlcdMnz79hBskTpJh/ScomCnfEFyw36ohE9h7UyFusQKa4CW1Ld3seyNq2ttFV1a6zxOdjFa0z/xCVUFXQgY+wwYxEtcIIQTjx49n/PjxZ7oZZ4SspnfWCQhiAgIQf2xS6vIElezXdf8SBWYw4w9zYqNtNI5zYNdUom3mtG2lrCDwFparYn46oZbaVfNC84ouJS91cPk7UOy+dqjcf0UC91zmSxqXoEUIIUQFJ1zJ7N///jfr168nNzeXsrKyoGzpvXv34na7SU1NPeGGOZ1OZs+eTUZGBrm5uSQlJTFgwAAefPBBkpKSIrrGkSNHWLFiBStXruSnn34iLy+PuLg4unbtyq233sqQIUNOuH11lb/TxKrka/gnY5uBilc3yns2jPKgx8qn8e2yaxpqjB08XhJi7WY1YN1rBT2BopUTK3jk0AAv7PC0YKCtfH0RRVWs+dZNE2x4DQVD9xJlD/12ldhGCCEathoHMjk5OTz66KNs3rwZ8JW7V5SgQObNN99kwYIFvP/++/Tr16/GjXI6nYwaNYotW7bQpk0bBg0axM6dO/nwww9Zs2YNn3zyCcnJydVeZ8qUKXzxxRfYbDa6d+9Ov379yM3N5YcffmDdunWMHDmSSZMm1bh9dVXwwo8Bw0C+QMU/FTsc3XdyaGCgmIm+eK0eHm8EtWO8tlh+dp9Dd2Vntcfe0rt86rczvrU5W0lRyxeD5FQvTSmEEKK+qtHQUl5eHqNHjyYzM5Nu3bpx//33h1T6Bbj++usxDIOvvvrqhBo1a9YstmzZwuDBg1m6dCkzZsxg8eLFjB49mn379oWdKx9OYmIiEyZM4JtvvmHBggVMnz6d+fPns2DBAuLj45k/fz6rV68+oTbWdRWTfQ1Cp2FDeUrv+6v2snX3MQD2Hy7kfzLyKCouBQwy97mZ+lUButcDho4ewWxrzVNCiRHZ7KHmCWbQciDtZo60GeBbT8kX2vj/U0nXi+T+CiFEw1ajQObvf/87Bw8eZMSIEXzyySeMGzcubM/IeeedR2xsLN9//32NG+RyuZg/fz52u51JkyZhs5V3Gj322GMkJSWRkZERdnGsip5++mn+9Kc/hQxF9ezZk3vuuQeApUuX1riNdZlhYObA+PJizCEl87UOxEaXT72Oi7ET7dAAg6JSL9/+9wA2w83uTT+QwlH2794DwOKfnBSVGbidTrT8vdiPZnFFVPXLT2SUnMduT0ql+/0xiKGoGKodbHZQNV/soqFrZkltxQpmQqOW+Bg7CTLdWgghGqwaBTIrVqwgJiaGiRMnVjsNtm3btuzdG9lslkAbN26kqKiIfv360bRp06B9DoeDAQMGoOs6a9asqfG1A/mrHEYSENUvBrFRZvBnrqPk32x20TSOL89FiYmy0TjegREw3KQpBsM9S3ms8RI++rGQmZ9vp8y3KsVLGUeY9H+HaLJzKTfEbai2JcdcdqYXDGW/J5ElJb1C9h9N6Yuh2tFtsRiqzazu6+uKUVQNA63aHpemjWOIjzk1a4gIIYSo+2oUyBw8eJB27doRFRVV7bFRUVGUlYVbMblq/vLHXbt2Dbu/W7duAGRlZdX42oFycnIAIk4crg8CK7+YG4zypN9KKChB+40KkUN+yYkl8m4sO9d6/XLBtXxZ2jPkmOJG7cm/+D7QbGbBO1Uzp14Diqr61r1UUFRfnowkygghhKigRoFMdHQ0BQUFER178ODBE1qk6sCBAwC0aNEi7P7mzZsHHXci3G438+fPB2DAgAEnfJ26yD+c5H8dtKMSekCEUOYOXjspzbafEbHratwObwTfWtY0cP+okaqhKgqq4g9kAtZSIvBrIYQQwlSjQCYtLY2DBw+ye/fuKo/bvHkzubm5dO/evcYNKikxFxH0L0pZUWxsLADFxcU1vrbfzJkz+eWXX+jWrRtXX331CV+nLrNqwxCmpyaAVnwoaDr11j3Bgeq4Rsu5JHpHhStXT48okIGj+WXkF3tAtaGggqqZi0OqKnabhuHwLUlgrRV5whUDhBBCnIVq9FPhd7/7HRs2bOAvf/kLs2fPDloo0i8vL4+nnnoKRVHCrqx5pi1ZsoQ333yThIQEXn31VVT1xGoCJieHvvcTEemqqJEwNA1dNRd2dHrB0M3aMbExOqoKXh2aJMYFnaO7FYpjowAzgPzi+338porRtkj7RLxG8OfaopHGwYLg3p64+GhcigMUldgmjVC0UnNkyR6FpkDLFo3QjeboR3PQ3aVEt06nWYszv6J6bT4zcXrIM6uf5LmJSFQZyAwaNIiePXtaSw7ceOONfPHFF6xfv57hw4czZMgQjhw5AsBHH33Ezp07+eKLL8jPz+fKK69k8ODBVV0+LH+Pi9PpDLvf32MTFxcXdn9V1q1bx+OPP47D4WDWrFm0b9++xtfwO3q0qNKaLJFKSUng8OHCk7pGoKP5To4XlOItc5OfX4LuNQvceby+dZZ0g5gKT1wtLKG4KPJcJiXCHhlvhZAn3GdVUuLChbkYpNdeis3pRsVAtyloChw77uSYXoJS4ARPGSV5xcRqZ3Z4qbafmTj15JnVT2fTc5OA7NSqMpDZt29fUK6KqqrMnj2bSZMmsXjxYt555x1r33PPPWcNUQwfPpwXXnjhhBrUsmVLwMyxCSc3NzfouEht3ryZ++67D13XmT59OhdccMEJta9OMwISYg3QDbBWI/Dt2J59DFBIb5sIQGGpzrrdrogun6A4GREXWb5MSI5M2PhDCXqlYObGmEV9rdUrzUrFUhVPCCFEGDVOOIiLi+PVV1/l3nvv5csvvyQrK4vCwkJiY2Pp2LEjgwcPrnTGUSTS09MBrOXHK9qyZQtg5utEaufOndx9992UlJTw4osvctVVV51w++oH8ye+Dqi+Ar9mMTyDBSt+AeDZ37fDsEfz+b8L2XXEE9E1b437jm6OfRG1oOLQUqBhXaPYcchl5sPooCgqKKAqii+QMXtpFEUlOkoDm4ZuqNgcWqXXFEII0TCdcOZkp06d6NSpU222BYC+ffsSHx/Phg0bOHr0aFDBPZfLxcqVK1FVlcsvvzyi6+3du5c777yT48eP8+ijj3LTTTfVepvrGn8l38DVqiuuj+RyFrN+23HKPJF1cygY2BRv9Qf6VDVrqWMzO52bGqD4i98FFvBVMBwxeONTUJs2prmi4DViMdwqtsTwCeBCCCEarjq3+rXD4WDkyJG43W6ee+45PJ7y3oKpU6eSl5fHsGHDaNasmbV92rRpXH311cybNy/oWkePHuWuu+4iNzeXu+++m7vvvvu0vY8zJagmTIV6eIGW/VTE8p/y2X88kt4YM9DQq+hlqSiwR6ZRFAzpGkPXc5twUWpUeTFF/zoEAV8r/j+qUm3RRSGEEKJOzmW97777WLt2LcuWLWPo0KF0796dnTt3kpWVRatWrZg4cWLQ8YcPH2bXrl0cO3YsaPszzzzD7t27iYmJ4ciRIzzxxBMh92rfvr21XMHZwShfY8kwyCtwUVDi4pzkODPp12dTdmmNrupQPOhVzFny2GKwecoTtAN7ZP48IBrd7iC1SwdcB3bidAVHVUrAC8VfU0aCGCGEEBGoNpD597//TZcuXU7o4oqiVJrrUpWYmBjmzZvH7NmzycjI4KuvvqJJkybccsstjB8/PuJqvP7ifU6nk4ULF4Y95oILLjhrApmiUjcGUOJbU8Aw4MOvzRowf/xtFzJ/OWIdG8mEKwflVX1/F7OxykDGHdMUW2GO9XVgIOM1FAqKXcQnY/a64A1I5jUCAhcVVMVaNFIIIYSoTrWBTMXcitMlNjaWRx55hEceeaTaY6dMmcKUKVNCtr///vunoml1lj84MQzYc6CApo3Lc0qaNooiK/t4ja4XONU6US3GXcW3izeqMQQEMoe95nTDLi0cuA0NtydgyWxfEGOoKniN8pWuUaxcGbtW50Y9hRBC1EHVBjJpaWk8/fTTp6Mt4iQYvkUhDQOKnG6++G4PrZqW19pZlXmAQmfN1k1SlfJApp3tCHFq+Gnar+UPZWSPZhgtuvD6DxDlLWS3x8xhMmMWFQU9+KTAQEVRwdcLo2kaHkCTQEYIIUQEqg1kEhISzs6aK2eZwMUh9x8xl284FlDobtueY2HOCtVSKz+uve2w9bqyIAZgjzcFVbNh2BoxdpCdZzICZhcp/oUoy9dNMnteNCsPxj+ypKBIrRghhBA1Ir/2niXcbt0aBvx8za8h+48XRVb07onGX1iv70lYUe3xLsOs7aKoqjVkFEhRFFACFnz0RS2G6gtoFAVV8a147Ts97GwlyZkRQggRRp2ctSRq7nhxGV698sUhT4VPii9gbZlZwBBNxdB8CbsB1PLuFgDccc1x2cGGC0W1Yy/ajxoVbR6iSrQihBCiZqRH5izi1Q2Mk1z/qSb+Y6RZPS2aL88FRSE1OTD/xddj4y/MpzpQNQ3DHoths5vVfDUb5YsUQPjuFwlyhBBChJIembOEWbDu9KaYTLgiBpeuYWg2NA1fJWG47fxoNuyDJf8tCZhGHRCI+OvgoaCqKqqqmGspETo0JYQQQlSlykBm27Ztp6sdohboXj1oeKakNLKqvScqSndij2qEoWkYGCi6jqGoaKrKOYlmr0zb5CjK1yAwqUp51V5NBRQFFf/kJUViGSGEEBGTHpmzRHSUDa9hoLsjXw+popAp0tWeoPr++GYbBfSqtG5i58Hr0mmcEE1ZmRu3txDADHgUpXwQSVGwaSpuRyN0I5rmTRPYm1cW7mYn/L6EEEKcvSRH5ixiAL/uLzzh8201DWQCZypZM5bKXycmJqBodgzNgR7VyDqtYkgSH2MHRcUeE48jJhZFlVWuhRBCREYCmbOI7tXZd6So2uOaJzq4rIMjZLum1CyQMRQbhj/JN2h6NVWulRQ4/CVLEQghhDgZEsjUU2UVhpC8Hp1Z/7eFzb/kVXtu68YqHZJDH71W4x6Z8qJ2KP7r+SIZNfj6/njF7dF9tWUUa7VrCA5oJLgRQggRKQlk6qkDR4spdZUn87q8kQch17XcR1LBzwB0sOUyM+k9WmrHahzImL0x5vCS4YizKvgaiooe2zTsOWUuHdUXx6hWTk3g35DcKJom8VE1aosQQoiGSQKZesq3tFL51zWoH9MoawntDn0DGDzYaBkA4xK+IlEtqWEryoMPPS4gcFEUDHtM8FEVellURUFVA1a5DtifEOsgylExT0a6aYQQQoSSQKaeqhi26BEGMrGO8oDgQscv1msVnUcaZ9SwFRWDC3+ib/C3VWDL/D0xigKaL1fGV35GCCGEqDEJZOo5j29IyWtEFsikNy9P8h0Z/531uqpFISsVrjslaDt4PLrVc6TrhlVDRlXApqkR58OEXX9JCCFEgyeBTH3lCw72Hi7Cq+t4vJEFMsN6xFd7jKfROTVri80e3DRHrPX60HEnx32rcB/MKynvtEFBC1ibKTE+isbxoTOpANTYRJTYxjVrkxBCiAZBApl6ywxc/LkykQwtDegch8NW/XGeRq0ia4Kvk8Sb0LJ8g+ZAj0uxDlGtWU0Kim+1a3O2EkEJvU0SooiLDg6IrNtEx6PGNYmsTUIIIRoUCWTqOf+wjVevfsaRYRDRYkx6VCN0R0IEd1dC50pX+DpwSMi/+kDFb7rTu2a3EEKIs4kEMvWIYRjW3+UpMeYLj6f6QMajGyiuqiv/uhPb4klKpfC8WylrcZ61vbTdJaHtCQlisFbDtjYF1MkzZyqZX1WWXiOEEELUhAQy9cie3EKKS93syS0PRgwD9mfnUHwop9LzYuxmtODVDaIO/lTlPbyxTc3idvYoylr1Kd8eFa6HJsyspZBDgle99g8r+bc7omKIjqs+b0cIIYQIRwKZesQwzMq4gROUdMNA97iYvbLyir4Du8RZ56NXs6ikagNFNYvdqRruxHa+HQplLXpU08LQlasD6/2iKNh8tWP8h7Vo34H4xonVXFcIIYQITwKZesQfv/gTfM0/BsdLqg5OereLoV9bO1ekxaAYVR9rqKo5ZOT7o0ebs4UMewxl7S4h/5L7yw+uJj8G/NV7fa/LD7QCGyGEEOJk2M50A8SJKM/a1XUDTQ0fj3a276OJWoxda8a13R0YNiLokdEoT8lVKGtzAZ4m5+JNOAfQfcsSBKpmeKniGkpG+fRrIYQQ4mRJIFOfGHC8qCwo0Vc3oLJllsYmfA3Acf0SFEM3k4V1T/iDfTyNWpk5Mqrq65Wx4WnSztf9Y66tVJx2NVrxUfMEW0DtF9WGYSufUl1Q7AqatWTXVHQMf2ePEEIIcdJkaKleKZ+tFDjM5K5uxpLhAcNA0b3VDi2hRZmzj2zRGIqCoVDeC6OayS3ulHTK2l4IgDe+mXWqt3ErjOjywnVOl8es5OubpeSwq6gotE6R5F4hhBC1Q3pk6hHDwAwyDMMaXDIMo9IeGT9F18HQidv6T2z5lc9uMg82V7A2qYBu5cuUr3btv2H13Sr+CUoKmItE+pN9Q/OChRBCiBqTHpn6xoCDx0rIyjkO4BtaqiaS0d04Dvy3+iAGAMVXH8Ywh5h8+TKBCcBmMBNZcxV/tq+Ctc6SmeorYYwQQoiTJz0y9Y7B20u2AdA9NQlHwV4MlwcNL160sGdE7/meqIOZVV61tFVfbIUHyntkDHyvNfNvQwHFlwhcg2p2qqJg08x8G1VVUai+cJ8QQggRKemRqUf8U679DhwtJudYGef9/Ffuil9lbY+tsGRRdUEMQGnqZTg7DDK/8AUqhqJiqCreRudgdqloeBu3wpvQnIh6VHwzlBx2zUyvUcrjJOmQEUIIURukR6Ze8SXJ+Pzji58BuCgJujn2AdDP8Qupjjw+cfer4bWDh4uUwHtpdl/wofqmZ9fwyqoCemAVGYljhBBC1A4JZOqJvIJS9h4uxlahZoxaYahmdPxaAD6jb81u4O8q8efAYJjRhhGY+BvunMr5O49Uc6Gl8nWXAhdgEkIIIU6CBDL1xJN//z7sNGs75XVhzrUdCrs9EoYBRaVe4mJt5gwlVILq1imqL/nXd3wEeTKGXh7rWEsVKOV/q1JMRgghxEmSHJl6orJaMQ6lvC5MnOKyXk9N+qhm149OxOVVrGDFcMT69vgWnGzcCm9cSsAZEUy9tjpeFNSA1wCtmsYRFy1xtBBCiJMjgUw9Z1fKe16MKo4LpyRtSPm5tijfFGsVNH+AEZDNomph8mOqDmbMqdb+IsGKb5uv3TYtqOqvEEIIcSIkkKmHktVCHLgBsFPeIxOrlFV6jiu5U9DXuj3WXI7AoqKgYFhrLUHwVOtKVLHfv6aSoijVXkYIIYQ4ERLI1EGGYaA7C4K+DvRM4kLuTVgBgD1gaMmf6BuOf0kBP3dS+6CcF6+OLw9Gw+1VKHK6cHt0PF6d0jIPxaVudN2gpNQMoJxlXkrKPBQUuzhy3ImzzEOx043bYx6fX+iyAhg1YHElCWiEEELUpjqbpOB0Opk9ezYZGRnk5uaSlJTEgAEDePDBB0lKSqrRtfbu3cvMmTP57rvvKCwspHXr1lx33XXceeed2Gx18CPwutELj6LGNALA4y0PZK6P/RGAjvZcZia9R4EeHdk1A4IW3RFPWbuLMdTyBR/zS9zEqWBodvKJB1cZrmI3RmJLdKcHt0dHjzUoKvEQmxRFSakHb0IyLqcHRYHjhWZ+jqZ58HoNYqI1tICgxbq7RDJCCCFqUZ3skXE6nYwaNYo5c+YAMGjQIBISEvjwww/5/e9/z9GjRyO+1i+//MINN9zAokWLOOeccxg4cCAFBQVMmzaN+++/H7268v5nTPiMlyujfw76upFaGtnVVBu6albKK0kbiqGas5OK04eSk36L7ygFPSoBXbVbxesM1YGq+NZJQkHTzB2KqmLYYlBVBVU1tyu+daBUVUFVVDTN/PZSAtZvUmXOtRBCiFpUJwOZWbNmsWXLFgYPHszSpUuZMWMGixcvZvTo0ezbt4/JkydHfK2nnnqK/Px8nnzyST755BNmzJjBsmXL6N27NytXruSTTz45he/kJBjlAZbLXc2K1dVwRTUB1WatYq1HxfmWGzBXsvbENCWwhowZxBi+oSElKFXGP2VaURVURfEFLYq1jhKGGdBoqoKmqcHlaTATf4UQQojaUud+rLhcLubPn4/dbmfSpElBQz+PPfYYSUlJZGRkcOjQoSquYvrPf/7Dpk2b6Ny5M7fffru1PS4ujr/85S8AvPPOO7X9Fk6K26NT5vKCYVBSVETh8eM8MPMbILT4XXWKm5/H7vQ/cKDTjRS7DErjWpr3QMNjmHkxxaU6Cgoe3UBBpcwLbo/ZG6QoUObyovgCleJSN6oKBUVlKKigqWiqGZyYM5PAoPxcmwrlE67xbZceGSGEELWnzgUyGzdupKioiH79+tG0adOgfQ6HgwEDBqDrOmvWrKn2WqtXrwZgyJAhIfu6detGmzZt+PXXX8nJiWRV6NPj4NFijv6wCFf2Zg7s2sXB7N201Y5wS+x3JChVDyOVRSdzpPWV7O9wPUdaXMSx+I4YWhSO6GhKPZDXdhD7z72W46Uqx4s8HC92U+rW0WMTUVBQNAVnmRdFVTDscXijk2iSEIWmKKgqaKpKbJQNRdVQ45NQMbcpikJslB3NP8ykKjhsGqpqLnuQkhhjrrlk02jaOMKcHiGEECICdS7Tdfv27QB07do17P5u3brx2WefkZWVVe21tm3bZp0TTteuXcnJyWH79u20adPmBFtcOwxd58CPy5m0UmNm0r/wAsuLLuGgtzH3JqwgXi0jRSuo9Pyi5C4cT+qBrtmx21SKbR2xKTpodjRVRdU0UGx445uhoKKoKhgGRlQCelQjNLeOpmqoqoJNBVWz41Hi0TQFVVdRDANNw5cPo0JsY1SPWbpXBTRNwW7TwOPFrqnmEJIOoBAfY+fQMTPASYh1VPoehBBCiJqqc4HMgQMHAGjRokXY/c2bNw86rioHDx4MOqci/z0iuVZt83q8lDmduErNXhbvthUkZH5MX0d/65iR8d8BcMxrVtntaA8/nPZW4RUM7t0NpbQQQ9ex2Ry4PG4Um4KqaSiaimazoQJeDPAq5liQoWO3aXj8uS6aOT5kU1VsmoKum0NKNhW8Vr6MguYrcKfr5vLWum7mzjhsCrqhYLOZ1wmYdS3rKgkhhDgl6lwgU1JSAkBMTEzY/bGx5g/14uLik76Wf3sk16ptv859ghbG4ZDtt8V/G7KtiVbCYW8CKVphyL6D3sbstbXFbUCM4cbQQbPZUFQdVTFQNBWbpuLRbGYOi6KYvSsqeHUVu13FC9g0BVVVfUNMqjlMpChmUq+mongM8xgA1UziVdXyxF9FNXtkPF4Dh03FXPC6PHqROEYIIcSpUOcCmfoiOTn+pM7f3Xs4Wb9uC9qWVLqXbKMlzW0FxJYewmVo5BtxFDqaUZTQjoSyQ7Qt+ZmE9t1wNGuPvn8LBfZO3NeyNcnxKh5vojljCIU4rxnIoGho0XHEGwa4nWCAV7WjahqGakeLSbCmZaslHpLjmuBFs3pcEhOiKCxxWb0vKY2jKSwxC995A6aux8c6cJZ68Oo6mqqS0iSGvIJS4qLtpCTFojpsxETZiI22n9TnVhekpCSc6SaIGpJnVj/JcxORqHOBjL/Hxel0ht3v72WJi4s76Wv5t0dyrYqOHi0yf7ifoHP7Xc75Q6/h8OHgXpbUmlykW3/6nnALwvDNagqi6yQGLO7odXmItSlg04DAtZcMHNHl21xOF/F2Fbxe6z0WuzwUF0ZW96auSklJCHlmom6TZ1Y/nU3PTQKyU6vOzVpq2dL8YerPb6koNzc36Liq+HNg/OdU5L9HJNcSQgghRN1T5wKZ9PR0ALZu3Rp2/5YtWwBIS0ur9lqdO3cOOqci/z389xRCCCFE/VLnApm+ffsSHx/Phg0bQpYicLlcrFy5ElVVufzyy6u91hVXXAHAsmXLQvZt3bqVnJwc2rdvf8anXgshhBDixNS5QMbhcDBy5EjcbjfPPfccHo/H2jd16lTy8vIYNmwYzZo1s7ZPmzaNq6++mnnz5gVdq1evXvTq1Ytt27YFVfAtKSnh+eefBwiq+CuEEEKI+qXOJfsC3Hfffaxdu5Zly5YxdOhQunfvzs6dO8nKyqJVq1ZMnDgx6PjDhw+za9cujh07FnKtyZMnc+utt/LSSy+RkZHBOeecw4YNGzh8+DBXXnklN9100+l6W0IIIYSoZXWuRwbM+i7z5s3jnnvuQdd1vvrqK44fP84tt9zCp59+SnJycsTX6tChA59//jnDhw9n7969fP311yQkJPDII4/w+uuvm2X0hRBCCFEvKYZhnPgc4gbsZKdfw9k1vbChkGdW/8gzq5/Opucm069PLemOEEIIIUS9VSdzZOoDVa2dovu1dR1x+sgzq3/kmdVP8txEJGRoSQghhBD1lgwtCSGEEKLekkBGCCGEEPWWBDJCCCGEqLckkBFCCCFEvSWBjBBCCCHqLQlkhBBCCFFvSSAjhBBCiHpLAhkhhBBC1FsSyAghhBCi3pIlCk4Rp9PJ7NmzycjIIDc3l6SkJAYMGMCDDz5IUlJS0LHp6em0atWKFStWnKHWNgw//fQT3333HZs3byYzM5NDhw4RGxvLpk2bKj3H4/Ewd+5cFi5cyN69e0lISOCSSy5h/PjxtG7dOuT4gQMHsm/fPrZv334q30qD4XQ6+fbbb1mxYgUbN25k//792O12UlNTueaaaxg9ejQOhyPkPHluQjQcEsicAk6nk1GjRrFlyxbatGnDoEGD2LlzJx9++CFr1qzhk08+ITk5+Uw3s8GZNWsWX3/9dcTH67rO/fffz8qVK0lJSbF+2C1atIjVq1fz0Ucf0b59+1PYYrF48WKefvppADp06MCgQYMoKipi06ZNTJ06lWXLljF37lzi4uKsc+S5CdGwSCBzCsyaNYstW7YwePBgpk+fjs1mfswvvvgi77//PpMnT2batGlnuJUNT69evejcuTM9evSgR48eXHrppVUe//HHH7Ny5Up69+7NW2+9Zf2wnDt3LlOmTGHixIl89NFHp6PpDZbNZmPEiBGMGTOGDh06WNsPHTrEvffeS2ZmJrNmzeLPf/6ztU+emxANiywaWctcLhcXX3wxZWVlrFq1iqZNmwbtu+KKKzh+/DirV6+mWbNmgAwtnSnp6elVDi1dffXV7Nq1i4ULF9K1a9egfddeey3bt2/n448/5rzzzrO2yxDF6bNp0yZuueWWkP/vyHMTomGRZN9atnHjRoqKiujXr19QEAPgcDgYMGAAuq6zZs2aaq/13//+lwsvvJBevXrx7bffnqomizBycnLYtWsXbdu2DflhCDBkyBAAVq1aFfH1Bg0aRNeuXVm4cGFtNrXB6ty5M2D2zvjJcxOi4ZFAppb5f6ML948oQLdu3QDIysqq8jo//PADY8aMwev18tZbb9G/f//abaio0rZt24CTf47+Y2699VZyc3OZMWMG119/fe01tAHLyckBCMo3k+cmRMMjOTK17MCBAwC0aNEi7P7mzZsHHRfOihUreOihh4iPj+ett96iS5cutd9QUaXqnqN/e1XPESAzM5N77rkHl8vFnDlzqs3LEZF77733ABgwYIC1TZ6bEA2P9MjUspKSEgBiYmLC7o+NjQWguLg47P5FixbxwAMPkJSUxLx58ySIOUP8zzE6Ojrsfv/zrew5Aqxbt47bb78dXdd566235IdhLVq9ejWffvop8fHx3HvvvdZ2eW5CNDwSyNQh8+fP57HHHqN169bMnz9fpojWY8uXL+eee+4hJiaG9957jz59+pzpJp01fvnlF/785z9jGAbPP/88LVu2rLVry3MTov6RoaVa5u9xcTqdYff7f2MMrHsBcPDgQZ577jnrH1D/EJQ4M/zPsbS0NOx+//Ot+Bz9xo8fj9fr5R//+If0qtWi3Nxc/vjHP5Kfn8+jjz7KNddcE7RfnpsQDY/0yNQy/2+HBw8eDLs/Nzc36Di/pKQkLrroIpxOJ1OnTkXX9VPbUFGl6p6jf3tlvQHDhg3DMAymTJlS6Q9VUTPHjx/nzjvvZP/+/dxxxx3cfffdIcfIcxOi4ZFAppalp6cDsHXr1rD7t2zZAkBaWlrQdofDwRtvvMH555/P4sWLmThxIlLi58zxT+2t6XP0mzJlCsOGDWP9+vXcd999uFyuU9PQBqK4uJi7776bnTt38rvf/Y7HH3887HHy3IRoeCSQqWV9+/YlPj6eDRs2cPTo0aB9LpeLlStXoqoql19+eci5MTExzJkzh969e7Nw4UKeeeYZCWbOkDZt2pCamkp2djY///xzyP5ly5YBcOWVV4Y9X9M0XnnlFQYPHszatWt54IEH5IfiCXK5XNx3331s3ryZAQMGMHnyZBRFCXusPDchGh4JZGqZw+Fg5MiRuN1unnvuOTwej7Vv6tSp5OXlMWzYMKuqb0VxcXG8+eab9OzZk48//pgXX3zxdDVdVHD77bcD8Nxzz1m5TWCWut++fTu9e/cOqg5bkc1m47XXXmPgwIGsWrWKCRMmBH0/iOp5vV4efvhhvv/+ey644AJmzpxpLflRGXluQjQsskTBKRC4aGTbtm3p3r07O3fuJCsri1atWoUsGhluiYKCggJuv/12tmzZwh133METTzxxJt7KWWXVqlXMmjXL+jozMxNVVenRo4e1bdKkSVbRNF3XGTt2LKtWrSIlJYV+/fqxf/9+MjMzady4MR9++GHQ+j8QvtS9y+Vi3LhxrFmzhqFDhzJt2jQ0TTvF7/bs8O677zJ58mQABg8eXGmS7pQpU6zX8tyEaFhk1tIpEBMTw7x585g9ezYZGRl89dVXNGnShFtuuYXx48eTlJRU7TUaNWrE3LlzGTNmDHPnzsXhcPDwww+fhtafvfLy8sjMzAzaput60LaioiLrtaqqvP7667z99tssXLiQr7/+moSEBIYPH85DDz1E69atI7qvw+Hg9ddfZ+zYsfzrX//CZrMxdepUVFU6RKtTUFBgvf7yyy8rPS4wkJHnJkTDIj0yQgghhKi35FcLIYQQQtRbEsgIIYQQot6SQEYIIYQQ9ZYEMkIIIYSotySQEUIIIUS9JYGMEEIIIeotCWSEEEIIUW9JICNELfrhhx9IT09n9OjRtXbNgQMHkp6ezt69e2vtmmeDJ554gvT0dD7//PMz3ZSwRo8eTXp6Oj/88MOZbooQZzWp7CvqPf+K4zVRcUkIcfIifQ4vvfQSN9xwwylujRCioZBARtR7ffr0CdlWVFREVlZWpftTUlJOSVtiYmJITU2lZcuWtXbNNm3a4HA4sNvttXbNUyktLY34+PhK9weuM3YyUlJSSE1NJSEhoVauJ4Son2SJAnFW+uGHH7jtttsAghYCFKeOv0fmvffe48ILLzzDrTnzRo8ezfr16+XzEOIUkxwZIYQQQtRbEsiIBufzzz8nPT2dJ554AqfTyfTp07n66qvp2bMnv/vd76zjMjMzeeWVV7jxxhu59NJL6d69O5dddhnjx49n8+bNYa9dWbLv3r17SU9PZ+DAgQAsWbKEm266id69e9OvXz/uvfdetm3bFvaalSX7BiaT7t69m4cffpiLL76YHj16MHz4cBYsWFDpZ+DxeHj77be55ppr6NmzJ/379+fPf/4zOTk5QZ/P6ZCenm715mRkZDBixAjrc/njH/8YsmK5X1XJvosWLWL06NFccMEFdOvWjYsvvpjhw4fz/PPPs2PHjpDjPR4P8+fPZ8SIEfTt25eePXsybNgwpk+fHrQCd0W5ubk8+eSTXHrppfTs2ZOhQ4cyZ84cPB5Pte973bp1jBs3zvre6t+/PxMmTKj0++D48eO88sorDBs2jJ49e9KzZ0+uvPJKRo8ezZw5c3C5XNXeU4izkeTIiAartLSUUaNGsWXLFlJTU+nYsWNQHsqjjz5KdnY2iYmJpKSk0KxZM/bv38/SpUtZvnw5r732GkOGDKnxfadPn84bb7xBixYtOPfcc9m1axerVq1iw4YNfPrpp6Smptboelu3bmXs2LEYhkFqaiqHDh0iKyuLZ555hvz8fO65556g471eLw888ICV7NyuXTvi4+NZunQpa9asYeTIkTV+T7Xh7bff5uWXXyY5OZn27duTnZ3NN998w7p165g5cya/+c1vIrrO1KlTeeuttwAzj6ZNmzYUFRWRnZ1NVlYWrVq1olOnTtbxZWVljB07lrVr1wJw7rnnEhsby44dO3jjjTdYvHgx7777Lq1btw66z549exg5ciRHjhzBbreTlpZGQUEBr732GpmZmVQ1av/yyy/z9ttvA9CkSRM6derEvn37yMjIYPny5UyfPj3o/RYVFXHzzTezZ88eVFWlXbt2xMXFcejQITZs2MD69eu56aabSEpKiuzDFuJsYghxFvr++++NtLQ0Iy0tLWTfZ599ZqSlpRldunQxfvOb3xjbt2+39jmdTuv1woULjd27dwed6/V6jS+//NLo1auX0a9fP6OoqCjsff/whz8Ebc/JyTHS0tKMrl27Gr169TKWL19u7SsoKDD+8Ic/GGlpacbDDz8c0t4BAwYYaWlpRk5OTtB2/zndunUznnnmGaOkpMTa98477xhpaWlGz549jYKCgqDz/Pv69u1rrFu3ztp+9OhR47bbbjO6detmpKWlGY8//nhIW6ri/7y///77EzqvW7duxptvvml4vV7DMAyjrKzMeOGFF6y2Hjp0KOi8xx9/3EhLSzM+++yzoPfQpUsXo2vXrsZXX30VdLzb7TZWrlwZ0r6XX37ZSEtLMy6++GJj06ZN1vaDBw8aN998s5GWlmaMGDEi6Bxd142bbrrJSEtLM0aNGmUcPnzY2vfNN98YvXr1sj7Hivf7+OOPjbS0NOPyyy83Vq9eHbTvww8/NLp06WL06dMn6P2+/fbbRlpamnHttdcaBw4cCDrn6NGjxrvvvhvyvShEQyFDS6LB8nq9vPbaa6SlpVnboqOjrdfXXXcd7dq1CzpHVVWuuuoqxowZQ0FBAatWrarRPT0eD+PGjWPQoEHWtoSEBJ566ikAVq9eXeP3kZqayqRJk4iJibG2jRkzhi5dulBaWhpUx0TXdebOnQuYQzMXXXSRtS8pKYmZM2cGfQYn4rbbbrOGi8L9qcxll13GXXfdhaqa/yw5HA6eeuop0tLSKCws5KOPPqr23tnZ2Xi9XtLS0kJ6cGw2G1deeWVQ4m1RUREffvghAE8//TS9evWy9jVv3pzp06djs9nYtGlT0Oe4fv16MjMzsdvtTJs2jaZNm1r7+vfvz/3334/b7Q5pn9vtZubMmSiKwv/+7/9y+eWXB+2/5ZZbGD16NEVFRXzyySfW9l27dgFw44030qJFi6BzkpKSuO2224iLi6v28xHibCSBjGiwOnXqRI8ePao8Zs+ePbz++us8+OCDjB49mltvvZVbb72Vf/3rXwD8/PPPNb7viBEjQrZ17tyZqKgoCgsLOXbsWI2u9/vf/9764R+oZ8+egPnD3e/XX3/lwIEDREdHM3z48JBzEhMTIx7CqUxaWhp9+vSp9E9lRo0aFbJNURRrqOvbb7+t9t7+ae+7d++uNNck0MaNGykpKaFZs2ZhhwnPOecc6/P45ptvrO1r1qwBYPDgwTRv3jzkvBEjRoSdLv+f//yHw4cP07lzZ84777ywbfLfb/369SHva/Xq1TidzmrflxANieTIiAarffv2Ve5/++23mTZtWpWJm8ePH6/RPZs0aVJp3ZOkpCQOHDhASUkJTZo0ifiaFXuN/Pz1WkpKSqxtu3fvBsw8kKioqLDnde7cOeJ7h/P000+f0HTjDh06hN3esWNHoLxXoirNmzdn2LBhZGRkcP3119OnTx8uvPBC+vbtS9++fUN6m/zXTE1NRdO0sNfs1KkTS5cutT67wPMqa3N8fDzNmzcPSdD21zY6ePAgt956a9hzy8rKADOR2O/GG29k7ty5fPvtt1x22WVcdtll9OvXj/PPPz+oR1GIhkgCGdFgxcbGVrpv48aNvPzyy2iaxkMPPcSgQYNo1aoVsbGxKIrCp59+ylNPPRXR7JRI7+nvVTFqWNopcEipuusVFxcDVDkMcaaGKCorlOff7m97dV5++WU6duzIp59+yoYNG9iwYQNgvq9bbrmF8ePHW0GcP8gLHBqK5P7+86oq7te0adOQQMY/A+rYsWPV9rz5AxqAZs2asWDBAmbOnMnKlSvJyMggIyMDMIOpRx55JGi4UoiGRAIZIcL45z//CcAdd9zB2LFjQ/bn5+ef7ibVCn+QUlVQEGnAUNvy8vJC8j8Ajh49CkQeYDkcDsaNG8e4cePYs2cPGzZs4JtvvmH58uW89dZbFBUV8fzzzwPlgeWRI0cqvV64+/vP8+8LJ9w1/ecNHTqUGTNmRPR+/FJTU5kxYwYul4vNmzezYcMGli1bxtatW7n//vv54IMPqhy6E+JsJTkyQoSxb98+APr27Rt2f2W1Teq6c889FzCHmCqrO3KmKiH/8ssvVW73t70m2rVrx4033siMGTN4/fXXAbOOkL8nzT/V/ddff8Xr9Ya9hr/uTOD9/edV1uaioqKgoSE//7TvnTt31vi9+DkcDvr168ef/vQnFi5cyNChQ9F1nU8//fSErylEfSaBjBBh+Icewv1WnZ2dzcqVK093k2pFhw4dOOeccygtLWXx4sUh+/Pz81m+fPkZaBnMnz8/7PYPPvgAMGc1nQx/b4Xb7bZym/r27UtsbCyHDx9m2bJlIeccOHCAr7/+OuT+/tdffvklhw4dCjnv448/DjtrqW/fviQnJ7Njxw6rbs3J6t27N0DYdgjREEggI0QY/fr1A2DOnDlBs3527tzJn/70JxRFOVNNOymKonDHHXcA5irUgTNj8vLymDBhwhmbFbN69WreeecddF0HwOVyMXnyZLKysoiPjw8726uidevWMWXKlJBepbKyMmbNmgWYM4D8uS3x8fHWrKj/+Z//Ceppy83NZcKECbjdbnr37h2UwHzhhRfSo0cP3G43jzzySNAQ03fffcff/va3sLOWoqKimDBhAgATJkxgyZIl1vv1y87OZtasWXz55ZfWttdee40FCxaEJJfv3bvXmqbdrVu3aj8fIc5GkiMjRBg333wzH330EXv27GHYsGGkpqai6zq//PILKSkpjB07tsY5DnXFqFGjWLduHStWrGD06NGce+65xMXFsWPHDmJiYvjjH//I7Nmzw07pjsSLL75Y5erXQ4cOtRb0DPTwww/z0ksv8Y9//IOWLVuSnZ1Nfn4+mqYxefJkmjVrVu29i4uLmTt3LnPnziUxMZFWrVqh6zo5OTkUFRVht9t59tlngwLRBx98kK1bt/Ldd99x8803k5qaSkxMDDt27MDtdtO6dWteffXVoPsoisLUqVP5wx/+wPr167nyyivp1KkTRUVF7Nmzh4EDB1JYWMiPP/4Y0sabbrqJgwcP8vrrr/Pwww/z7LPP0rZtWwzD4ODBg1ZQ9Oyzz1rn7Ny5kzlz5jBp0iRat25NUlISBQUF7NmzB13X6dSpE3fddVe1n48QZyMJZIQIIz4+nvnz5/Paa6+xatUqdu3aRUpKCiNGjOCBBx6w6ojUR5qm8de//pV3332Xzz77jJycHBISEhg8eDAPPfSQ9d6qCkaq4p9iXJnu3buH3X7nnXfSokUL3n33XbKyslAUhf79+zNu3LiIk1j79u3LX/7yF9auXcuOHTvYtWsXbrebZs2aMXjwYO68886g5QnA7CX5xz/+wYIFC/jnP//Jjh078Hq9tGnThquuuoq77rqLxo0bh9yrffv2fP7558yYMYM1a9awY8cOWrduzcMPP8xdd91l9XyF88ADD3DFFVfwwQcf8OOPP1qfWYsWLbj44osZPHhwULG8sWPH0rFjR9avX8/evXvZunUrDoeDLl26MGTIEEaPHl3ljDghzmaKUdO5nkKIs9oLL7zAvHnzmDhxImPGjDnl9/NX+z1TScZCiPpNcmSEEJbi4mIr6bWyGVtCCFGXSCAjRAM0e/bskKnDBw4c4IEHHuDw4cP06NGj0iEgIYSoS2RoSYgG6KqrriI7O5sWLVrQvHlzioqK2LVrF7quk5yczHvvvWctDXCqydCSEOJkSLKvEA3Q2LFjWbp0Kdu3b7cCiHbt2nHFFVdw1113RTRDSAgh6gLpkRFCCCFEvSU5MkIIIYSotySQEUIIIUS9JYGMEEIIIeotCWSEEEIIUW9JICOEEEKIeksCGSGEEELUW/8PM7FJM53U6I0AAAAASUVORK5CYII=", "text/plain": [ "
" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "read finish!\n" ] } ], "source": [ "%matplotlib inline\n", "import pandas as pd\n", "pd_sample = pd.DataFrame(samples)\n", "\n", "import seaborn as sns\n", "import matplotlib\n", "# matplotlib.use('Agg')\n", "# import matplotlib.pyplot as plt; plt.rc('font',family='Times New Roman');\n", "import matplotlib.pyplot as plt\n", "\n", "# plt.ioff()\n", "#设置风格、尺度\n", "\n", "sns.set_style('darkgrid')\n", "sns.set_context('paper', font_scale=2.5)\n", "# plt.rc({\n", "# 'text.latex.unicode': True,\n", "# })\n", "sns.set(font=\"Times New Roman\", font_scale=1.88)\n", "# sns.set(font=\"Verdana\")\n", "res = sns.relplot(data=pd_sample, x='Training Episodes', y=main_key_name_on_graph, hue=\"Method\", kind=\"line\")\n", "# sns.lineplot(data=pd_sample, x='Training Episodes', y='Mean Episode Rewards', hue=\"Method\")\n", "for ax in res.fig.axes:\n", " xlabels = ['{:,.0f}'.format(x) + 'k' for x in ax.get_xticks()/1000]\n", " ax.set_xticklabels(xlabels)\n", "\n", "sns.move_legend(\n", " res, \"center left\",\n", " bbox_to_anchor=(0.79, 0.55)\n", ")\n", "# sns.move_legend(ax, \"upper right\", bbox_to_anchor=(1, 1))\n", "\n", "\n", "\n", "plt.tight_layout(); \n", "# plt.savefig(\n", "# './保存图像/IMG-1-1-new-seaborn_relplot.pdf',\n", "# # bbox_inches='tight'\n", "# )\n", "# plt.tight_layout(); \n", "plt.show()\n", "print('read finish!')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "interpreter": { "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" }, "kernelspec": { "display_name": "Python 3.8.10 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/seaborn_defaults.py ================================================ # %matplotlib inline import os import copy import subprocess import matplotlib import matplotlib.pyplot as plt import pandas as pd import seaborn as sns def setTimesNewRomanFont_MustExecuteAtLast(): plt.rcParams['font.family'] = 'serif' plt.rcParams['font.family'] = 'serif' plt.rcParams['font.serif'] = ['Times New Roman'] # + plt.rcParams['font.serif'] def init(font_scale): sns.set_theme( context='paper', # notebook, paper, talk, poster style='whitegrid', # darkgrid, whitegrid, dark, white, ticks palette='deep', # https://seaborn.pydata.org/generated/seaborn.color_palette.html#seaborn.color_palette font='sans-serif', font_scale=font_scale, color_codes=True, rc=None) setTimesNewRomanFont_MustExecuteAtLast() def roll_color_palette(cp, offset): pre = cp[:offset] post = cp[offset:] return post + pre def lift_color(cp, n): return [ cp[n] ] + [ c for i, c in enumerate(cp) if i!=n ] def find_in_dict_list(dict_list, **kwargs): res = None for d in dict_list: if all([d[k] == v for k, v in kwargs.items()]): res = d break return res def filter_in_dict_list(dict_list, **kwargs): res = [] for d in dict_list: if all([d[k] == v for k, v in kwargs.items()]): res.append(d) return res def filter_out_dict_list(dict_list, **kwargs): res = [] for d in dict_list: if all([d[k] == v for k, v in kwargs.items()]): pass else: res.append(d) return res def rename_key_in_dict_list(dict_list, from_what, to_what): res = [] for d in dict_list: for k, v in d.items(): if v == from_what: d[k] = to_what res.append(d) return res def rename_query_in_dict_list(dict_list, from_what, to_what): res = [] for d in dict_list: new = copy.deepcopy(d) for k, v in d.items(): if k == from_what: new[to_what] = new.pop(k) res.append(new) return res # def map_in_dict_list(dict_list, from_what, to_what): # res = [] # for d in dict_list: # for k, v in d.items(): # if v == from_what: d[k] = to_what # res.append(d) # return res def lift_key_in_dict_list(dict_list, key): res = [] for d in dict_list: if any([v == key for k, v in d.items()]): res.append(d) for d in dict_list: if not any([v == key for k, v in d.items()]): res.append(d) return res # 左下角为0点 def legend(handle, 水平位置百分比, 垂直位置百分比, 边框, 字体大小): # https://stackoverflow.com/questions/39803385/what-does-a-4-element-tuple-argument-for-bbox-to-anchor-mean-in-matplotlib/39806180#39806180 sns.move_legend( handle, "center", frameon = 边框, bbox_to_anchor=(水平位置百分比, 垂直位置百分比), fontsize = 字体大小, title_fontsize = 字体大小*1.2, ) def save_and_push(handle, img_path, check_exist=True): if check_exist and os.path.exists(img_path): assert False, "image already exists!" handle.savefig(img_path, bbox_inches='tight') image_basename = os.path.basename(img_path) subprocess.Popen([ 'curl', '-T', img_path, '-u', 'fuqingxu:clara', 'http://cloud.fuqingxu.top:4080/remote.php/dav/files/fuqingxu/research2/heteGrouping/img/%s'%image_basename ]) ''' ratio problem 1 g = sns.pointplot(x="N Agents", y="Average Test Reward", hue="Method", data=data, aspect=2.7 , height=5, capsize=.35, kind="bar", palette =sns.color_palette("pastel") ,legend_out=True) ''' # sns.move_legend( # res, "lower left", # bbox_to_anchor=(0.68, 0.55) # ) # changedNameOfImage = True # nameOfImage = "ADCA-Two-Phase" # path = "./imgsave/" # assert changedNameOfImage # plt.savefig('%s/%s.pdf'%(path,nameOfImage),bbox_inches='tight') # curl -T ./imgsave/ADCA-Two-Phase.pdf -u fuqingxu:clara http://cloud.fuqingxu.top:4080/remote.php/dav/files/fuqingxu/research/paper03_phase3/DoR-LMAS/img/ADCA-Two-Phase.pdf ''' # %load_ext autoreload # %autoreload 3 import os, shutil, subprocess, glob, re import commentjson as json import seaborn as sns import pandas as pd import matplotlib note_list = [ "NoHLT-cos-run3", "NoHLT-cos-run4", "NoHLT-cos-run5", "NoHLT-cos-run6", "NoHLT-cos-run7", "prob0d2-cos-run1", "prob0d2-cos-run2", "prob0d2-cos-run3", ] data = [] for note_name in note_list: target_json = 'ZHECKPOINT/%s/experiment_test.jsonc'%note_name target_dir = 'ZHECKPOINT/%s/matrix'%note_name method = note_name.split('-')[0] + '-'+ note_name.split('-')[-2] print(method) search_res = glob.glob(target_dir+'/*') for p in (search_res): base_name = os.path.basename(p) res = base_name.split('_') which_ckp = int(res[1].split('c')[1]) alive_frontier = int(res[2].split('a')[1]) update_cnt = int(res[3].split('m')[1]) # print(p) with open(p,'r') as f: in_line = [line for line in f.readlines() if 'agents of interest: ' in line][0] res = re.findall( re.compile(r"recent reward (.*?), best reward (.*?), win rate (.*?)$"), in_line )[0] reward = float(res[0]) win_rate = float(res[2]) data.append( { 'note_name':note_name, 'method':method, 'which_ckp':which_ckp, 'alive_frontier':alive_frontier, 'update_cnt':update_cnt, 'reward':reward, 'win_rate':win_rate, 'target_dir':target_dir, } ) def find_in_dict(dict_list, **kwargs): res = None detail_debug = [] for d in dict_list: detail_debug.append([d[k] == v for k, v in kwargs.items()]) if all([d[k] == v for k, v in kwargs.items()]): res = d break return res # print(data) frontier_win_rate = res = find_in_dict(data, which_ckp=1, alive_frontier=3, method=method, target_dir=target_dir)['win_rate'] # print('frontier win rate', res['win_rate']) for test_which_cpk in range(1,5): # print('test_which_cpk', test_which_cpk, end='\t') for alive in range(3): res = find_in_dict(data, which_ckp=test_which_cpk, alive_frontier=alive) # print(res['win_rate'], end='\t') # print('') for test_which_cpk in range(1,5): # print('test_which_cpk', test_which_cpk, end='\t') for alive in range(3): base_line = find_in_dict(data, which_ckp=test_which_cpk, alive_frontier=0, method=method, target_dir=target_dir) res = find_in_dict(data, which_ckp=test_which_cpk, alive_frontier=alive, method=method, target_dir=target_dir) res['baseline'] = float(base_line['win_rate']) res['inc'] = (float(res['win_rate'])-float(base_line['win_rate']))# /frontier_win_rate # print(res['inc'], end='\t') # print('') # %matplotlib inline # ! rm /home/hmp/.cache/matplotlib -rf from VISUALIZE.seaborn_defaults import * init(font_scale=1.7) data_p = filter_in_dict_list(data, alive_frontier=2) data_p = rename_key_in_dict_list(data_p, from_what='NoHLT-cos', to_what='without HLT') data_p = rename_key_in_dict_list(data_p, from_what='prob0d2-cos', to_what='HLT') data_p = lift_key_in_dict_list(data_p, key='HLT') data_p = pd.DataFrame(data_p) cp = sns.color_palette("husl") cp = roll_color_palette(cp, offset=4) cp = lift_color(cp, n=1) sns.set_palette(cp) g = sns.lmplot( data=data_p, x="baseline", y="inc", hue="method", aspect=1.27 ) g.set_axis_labels("Past Policy Win Rate", "Improvement Score") legend(g, 水平位置百分比=0.323, 垂直位置百分比=0.29, 边框=True) plt.savefig('temp.jpg', bbox_inches='tight') ''' ================================================ FILE: PythonExample/hmp_minimal_modules/VISUALIZE/threejs_replay.py ================================================ import os, sys import argparse from VISUALIZE.mcom import * from VISUALIZE.mcom_replay import RecallProcessThreejs from UTIL.network import find_free_port if __name__ == '__main__': parser = argparse.ArgumentParser(description='HMP') parser.add_argument('-f', '--file', help='Directory of chosen file', default='TEMP/v2d_logger/backup.dp.gz') parser.add_argument('-p', '--port', help='The port for web server') args, unknown = parser.parse_known_args() if hasattr(args, 'file'): path = args.file else: assert False, (r"parser.add_argument('-f', '--file', help='The node name is?')") if hasattr(args, 'port') and args.port is not None: port = int(args.port) else: port = find_free_port() print('no --port arg, auto find:', port) load_via_json = (hasattr(args, 'cfg') and args.cfg is not None) rp = RecallProcessThreejs(path, port) rp.start() rp.join() ''' note=RVE-drone1-fixaa-run2 cp -r ./ZHECKPOINT/$note ./ZHECKPOINT/$note-bk cp -r ./ZHECKPOINT/$note/experiment.jsonc ./ZHECKPOINT/$note/experiment-bk.jsonc cp -r ./ZHECKPOINT/$note/experiment.jsonc ./ZHECKPOINT/$note/train.jsonc cp -r ./ZHECKPOINT/$note/experiment.jsonc ./ZHECKPOINT/$note/test.jsonc python << __EOF__ import commentjson as json file = "./ZHECKPOINT/$note/test.jsonc" print(file) with open(file, encoding='utf8') as f: json_data = json.load(f) json_data["config.py->GlobalConfig"]["num_threads"] = 1 json_data["config.py->GlobalConfig"]["fold"] = 1 json_data["config.py->GlobalConfig"]["test_only"] = True json_data["MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig"]["TimeDilation"] = 1 json_data["ALGORITHM.conc_4hist_hete.foundation.py->AlgorithmConfig"]["load_checkpoint"] = True with open(file, 'w') as f: json.dump(json_data, f, indent=4) __EOF__ python main.py -c ./ZHECKPOINT/$note/test.jsonc ''' ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/Dockerfile ================================================ # how to build: docker build --network=host -progress=plain -t hmp-from-scrach . # 此文件用于构建 HMAP + Starcraft(难易两个版本) + 虚幻引擎组件 # 可能需要翻墙,请自行搭梯子,然后解开下面相关的注释,并进行适当修改(socks5地址和端口) FROM nvidia/cuda:11.5.1-runtime-ubuntu20.04 RUN apt-get update RUN apt-get install -y curl proxychains RUN $useProxyNetwork curl cip.cc ENV TZ=Asia/Shanghai ENV LC_ALL zh_CN.UTF-8 RUN apt-get install -y language-pack-zh-hans \ libmysqlclient-dev \ dialog \ nano \ vim \ joe \ wget \ curl \ jq \ gawk \ psmisc \ python \ python3 \ python-yaml \ python-jinja2 \ python3-urllib3 \ python-tz \ python-nose \ python3-prettytable \ python-netifaces \ python-dev \ python3-pip \ python3-mysqldb \ openjdk-8-jre \ openjdk-8-jdk \ openssh-server \ openssh-client \ git \ sudo \ inotify-tools \ rsync \ net-tools \ cron \ swig \ cmake \ redis-tools \ redis-server\ iproute2 \ pkg-config build-essential libssl-dev libffi-dev --fix-missing RUN locale-gen zh_CN.UTF-8 && localedef -c -f UTF-8 -i zh_CN zh_CN.utf8 RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone # create user and add to sudoers RUN useradd -m hmp && echo "hmp:hmp" | chpasswd && adduser hmp sudo USER hmp CMD /bin/bash # RUN echo hmp|sudo -S apt-get install -y nano WORKDIR /home/hmp # use python3 as the system default python USER root RUN rm /usr/bin/python RUN ln /usr/bin/python3 /usr/bin/python ARG useProxyNetwork='' RUN $useProxyNetwork curl cip.cc # # comment out below if you do not need proxy network | 翻墙 - 从此行向下删除 # RUN sed -i '$ d' /etc/proxychains.conf # RUN sed -i '$ d' /etc/proxychains.conf # RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf # ARG useProxyNetwork=proxychains # RUN $useProxyNetwork curl cip.cc # # comment out above if you do not need proxy network | 翻墙 - 从此行向上删除 # pip install everything we need USER hmp SHELL ["/bin/bash", "-c"] RUN $useProxyNetwork pip install numpy scipy scikit-learn RUN $useProxyNetwork pip install lz4 gym flask cython waitress colorama func_timeout setproctitle filelock RUN $useProxyNetwork pip install commentjson matplotlib psutil paramiko ipykernel onedrivedownloader flock RUN $useProxyNetwork pip install torch --extra-index-url https://download.pytorch.org/whl/cu115 RUN $useProxyNetwork pip install pygame cmake redis numba RUN $useProxyNetwork pip install git+https://github.com/oxwhirl/smac.git # # download and extract UHMAP component # WORKDIR /home/hmp # RUN $useProxyNetwork git clone https://github.com/binary-husky/uhmap-visual-tool.git # WORKDIR /home/hmp/uhmap-visual-tool/ # RUN $useProxyNetwork python linux_deploy.py # RUN $useProxyNetwork python linux_deploy_starcraft_all_versions.py # # add execute mod to starcraft II (two versions) # RUN chmod +x /home/hmp/StarCraftIII/Version2410/StarCraftII/Versions/Base75689/ # RUN chmod +x /home/hmp/StarCraftIII/Versions/Base69232/SC2_x64 # RUN mv /home/hmp/uhmap-visual-tool/UnrealEngine/home/hmp/* /home/hmp # download UHMAP main framwork WORKDIR /home/hmp RUN $useProxyNetwork git clone https://github.com/binary-husky/hmp2g.git WORKDIR /home/hmp/hmp2g # RUN python main.py -c example.jsonc # # Installing Times New Roman font # USER root # # RUN apt-get --reinstall install ttf-mscorefonts-installer # # RUN apt-get install msttcorefonts -qq # # RUN rm /home/hmp/.cache/matplotlib -rf USER root # RUN sed -i 's/22/2233/g' /etc/ssh/sshd_config RUN echo "Port 2233" >> /etc/ssh/sshd_config RUN echo "service ssh start" >> /entrypoint.sh RUN echo "redis-server --daemonize yes" >> /entrypoint.sh # exit USER hmp WORKDIR /home/hmp RUN touch /home/hmp/.sudo_as_admin_successful COPY ./bashrc_suffix /home/hmp/bashrc_suffix RUN cat /home/hmp/bashrc_suffix >> /home/hmp/.bashrc # docker build --network=host --progress=plain -t py38 . # docker run -itd --name hmp-fqx --net host --gpus all --shm-size=16G py38:latest && docker exec -it -u 0 hmp-fqx service ssh start # docker exec -it hmp-fqx bash # docker stop hmp-fqx && docker rm hmp-fqx ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/DockerfilePython311 ================================================ # how to build: docker build --network=host --progress=plain -t py311 . # 此文件用于构建 HMAP + Starcraft(难易两个版本) + 虚幻引擎组件 # 可能需要翻墙,请自行搭梯子,然后解开下面相关的注释,并进行适当修改(socks5地址和端口) FROM nvidia/cuda:11.7.1-runtime-ubuntu20.04 RUN apt-get update RUN apt-get install -y curl proxychains software-properties-common RUN add-apt-repository ppa:deadsnakes/ppa -y ENV TZ=Asia/Shanghai ENV LC_ALL zh_CN.UTF-8 RUN apt-get install -y language-pack-zh-hans \ libmysqlclient-dev \ dialog \ nano \ vim \ joe \ wget \ curl \ jq \ gawk \ psmisc \ python-dev \ python3.11-dbg \ python3.11-dev \ python3.11-distutils \ python3.11-examples \ python3.11-full \ python3.11-gdbm-dbg \ python3.11-gdbm \ python3.11-lib2to3 \ python3.11-minimal \ python3.11-tk-dbg \ python3.11-tk \ python3.11-venv \ python3.11 \ openjdk-8-jre \ openjdk-8-jdk \ openssh-server \ openssh-client \ git \ sudo \ htop \ inotify-tools \ rsync \ net-tools \ cron \ swig \ cmake \ redis-tools \ redis-server\ iproute2 \ pkg-config build-essential libssl-dev libffi-dev --fix-missing RUN locale-gen zh_CN.UTF-8 && localedef -c -f UTF-8 -i zh_CN zh_CN.utf8 RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone # create user and add to sudoers RUN useradd -m hmp && echo "hmp:hmp" | chpasswd && adduser hmp sudo USER hmp CMD /bin/bash # RUN echo hmp|sudo -S apt-get install -y nano WORKDIR /home/hmp # use python3 as the system default python USER root RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11 RUN rm /usr/bin/python /usr/bin/python3 RUN ln /usr/bin/python3.11 /usr/bin/python RUN ln /usr/bin/python3.11 /usr/bin/python3 ARG useProxyNetwork='' RUN $useProxyNetwork curl cip.cc # # comment out below if you do not need proxy network | 翻墙 - 从此行向下删除 RUN sed -i '$ d' /etc/proxychains.conf RUN sed -i '$ d' /etc/proxychains.conf RUN echo "socks5 172.18.116.161 10880" >> /etc/proxychains.conf ARG useProxyNetwork=proxychains RUN $useProxyNetwork curl cip.cc # # comment out above if you do not need proxy network | 翻墙 - 从此行向上删除 # pip install everything we need USER hmp SHELL ["/bin/bash", "-c"] RUN $useProxyNetwork pip install numpy scipy scikit-learn RUN $useProxyNetwork pip install lz4 gym flask cython waitress colorama func_timeout setproctitle filelock RUN $useProxyNetwork pip install commentjson matplotlib psutil paramiko ipykernel onedrivedownloader flock RUN $useProxyNetwork pip install cmake redis sacred RUN $useProxyNetwork pip install pygame --pre RUN $useProxyNetwork pip install git+https://github.com/oxwhirl/smac.git RUN $useProxyNetwork pip install torch --extra-index-url https://download.pytorch.org/whl/cu117 # # download and extract UHMAP component # WORKDIR /home/hmp # RUN $useProxyNetwork git clone https://github.com/binary-husky/uhmap-visual-tool.git # WORKDIR /home/hmp/uhmap-visual-tool/ # RUN python linux_deploy.py # RUN python linux_deploy_starcraft_all_versions.py # RUN chmod +x /home/hmp/StarCraftIII/Version2410/StarCraftII/Versions/Base75689/ # RUN chmod +x /home/hmp/StarCraftIII/Versions/Base69232/SC2_x64 # RUN mv /home/hmp/uhmap-visual-tool/UnrealEngine/home/hmp/* /home/hmp # download UHMAP main framwork WORKDIR /home/hmp RUN $useProxyNetwork git clone https://github.com/binary-husky/hmp2g.git WORKDIR /home/hmp/hmp2g # RUN python main.py -c example.jsonc # # Installing Times New Roman font # USER root # # RUN apt-get --reinstall install ttf-mscorefonts-installer # # RUN apt-get install msttcorefonts -qq # # RUN rm /home/hmp/.cache/matplotlib -rf USER root # RUN sed -i 's/22/2233/g' /etc/ssh/sshd_config RUN echo "Port 2233" >> /etc/ssh/sshd_config RUN echo "service ssh start" >> /entrypoint.sh RUN echo "redis-server --daemonize yes" >> /entrypoint.sh # CMD ["/bin/bash -c echo clara | sudo -S /entrypoint.sh"] # # install numba, never should have used this # WORKDIR /home/hmp # RUN wget https://github.com/numba/llvmlite/archive/refs/tags/v0.39.1.tar.gz # RUN tar -xf v0.39.1.tar.gz # WORKDIR /home/hmp/llvmlite-0.39.1/ # RUN sed -i 's/3.11/3.12/g' setup.py # RUN pip install . # WORKDIR /home/hmp # RUN wget https://github.com/numba/numba/archive/refs/tags/0.56.3.tar.gz # RUN tar -xf 0.56.3.tar.gz # WORKDIR /home/hmp/numba-0.56.3/ # RUN sed -i 's/3.11/3.12/g' setup.py # RUN sed -i 's/0.40/0.60/g' setup.py # RUN pip install . # exit USER hmp WORKDIR /home/hmp RUN touch /home/hmp/.sudo_as_admin_successful COPY ./bashrc_suffix /home/hmp/bashrc_suffix RUN cat /home/hmp/bashrc_suffix >> /home/hmp/.bashrc # docker build --network=host --progress=plain -t py311 . # docker run -itd --name hmp-fqx --net host --gpus all --shm-size=16G py311:latest && docker exec -it -u 0 hmp-fqx service ssh start # docker exec -it hmp-fqx bash # docker stop hmp-fqx && docker rm hmp-fqx ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/bashrc_suffix ================================================ ulimit -n 4096 alias nv='watch -n 0.35 nvidia-smi' alias xrdp_reset='rm -f /var/run/xrdp/xrdp-sesman.pid' alias fq='proxychains' alias killhmp='kill -9 $(ps -ef | grep Hmap | grep hmp | grep -v grep | awk '"'"'{print $ 2}'"'"')' alias killpy='kill -9 $(ps -ef | grep python | grep hmp | grep -v grep | awk '"'"'{print $ 2}'"'"')' alias killsc='kill -9 $(ps -ef | grep StarCraft | grep hmp | grep -v grep | awk '"'"'{print $ 2}'"'"')' alias killvs='kill -9 $(ps -ef | grep vscode | grep hmp | grep -v grep | awk '"'"'{print $ 2}'"'"')' alias killnb='kill -9 $(ps -ef | grep ipykernel | grep hmp | grep -v grep | awk '"'"'{print $ 2}'"'"')' alias hmp='python main.py --cfg' alias vscode='code --no-sandbox' alias killue='kill -9 $(ps -ef | grep UHMP | grep hmp | grep -v grep | awk '"'"'{print $ 2}'"'"')' alias ftop='top -E g -s' ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/AirAttack.jsonc ================================================ { "config.py->GlobalConfig": { "note": "xxxx", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 1, "report_reward_interval": 128, "test_interval": 1280, "test_epoch": 128, "interested_team": 0, "seed": 3562, "device": "cuda:0", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ], }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [105,], "MaxEpisodeStep": 5000, "StepGameTime": 0.5, "StateProvided": false, "render": true, "UElink2editor": true, "HeteAgents": true, "UnrealLevel": "UhmapJustAnIsland", "SubTaskSelection": "UhmapJustAnIsland", "UhmapVersion":"3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 5, "TEAM_NAMES": [ "ALGORITHM.script_ai.uhmap_island->DummyAlgorithmIdle" ], "DemoType": "AirAttack" }, "MISSION.uhmap.SubTasks.UhmapJustAnIslandConf.py->SubTaskConfig": { "agent_list": [ { "team":0, "tid":0, "uid":0, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":1, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":2, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":3, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":4, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":5, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":6, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":7, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":8, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":9, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":10, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":11, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":12, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":13, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":14, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":15, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":16, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":17, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":18, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":19, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":20, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":21, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":22, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":23, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":24, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":25, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":26, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":27, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":28, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":29, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":30, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":31, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":32, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":33, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":34, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":35, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":36, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":37, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":38, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":39, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":40, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":41, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":42, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":43, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":44, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":45, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":46, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":47, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":48, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":49, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":50, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":51, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":52, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":53, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":54, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":55, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":56, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":57, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":58, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":59, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":60, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":61, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":62, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":63, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":64, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":65, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":66, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":67, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":68, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":69, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":70, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":71, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":72, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":73, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":74, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":75, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":76, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":77, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":78, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":79, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":80, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":81, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":82, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":83, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":84, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":85, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":86, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":87, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":88, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":89, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":0, "uid":90, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":91, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":92, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":93, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":94, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":95, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":96, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":97, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":98, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":99, "n_team_agent":105, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":100, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, // { "team":0, "tid":101, "uid":101, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, // { "team":0, "tid":102, "uid":102, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, // { "team":0, "tid":103, "uid":103, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, // { "team":0, "tid":104, "uid":104, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, { "team":0, "tid":5, "uid":100, "n_team_agent":105, "type":"commander", "init_fn_name":"init_target", }, { "team":0, "tid":101, "uid":101, "n_team_agent":105, "type":"commander", "init_fn_name":"init_target", }, { "team":0, "tid":102, "uid":102, "n_team_agent":105, "type":"commander", "init_fn_name":"init_target", }, { "team":0, "tid":103, "uid":103, "n_team_agent":105, "type":"commander", "init_fn_name":"init_target", }, { "team":0, "tid":104, "uid":104, "n_team_agent":105, "type":"commander", "init_fn_name":"init_target", }, // { "team":0, "tid":5, "uid":105, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, // { "team":0, "tid":101, "uid":106, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, // { "team":0, "tid":102, "uid":107, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, // { "team":0, "tid":103, "uid":108, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, // { "team":0, "tid":104, "uid":109, "n_team_agent":105, "type":"Air Defense", "init_fn_name":"init_target", }, ] }, "ALGORITHM.script_ai.uhmap_island.py->DummyAlgConfig": { "reserve": "" }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/AirShow.jsonc ================================================ { "config.py->GlobalConfig": { "note": "xxxx", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 1, "report_reward_interval": 128, "test_interval": 1280, "test_epoch": 128, "interested_team": 0, "seed": 3562, "device": "cuda:0", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ], }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10,], "MaxEpisodeStep": 5000, "StepGameTime": 0.5, "StateProvided": false, "render": true, "UElink2editor": true, "HeteAgents": true, "UnrealLevel": "UhmapJustAnIsland", "SubTaskSelection": "UhmapJustAnIsland", "UhmapVersion":"3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 5, "TEAM_NAMES": [ "ALGORITHM.script_ai.uhmap_island->DummyAlgorithmIdle" ], "DemoType": "AirShow" }, "MISSION.uhmap.SubTasks.UhmapJustAnIslandConf.py->SubTaskConfig": { "agent_list": [ { "team":0, "tid":0, "uid":0, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":1, "uid":1, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":2, "uid":2, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":3, "uid":3, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":4, "uid":4, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":5, "uid":5, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":6, "uid":6, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":7, "uid":7, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":8, "uid":8, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, { "team":0, "tid":9, "uid":9, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":10, "n_team_agent":10, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":11, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":12, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":13, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":14, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":15, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":16, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":17, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":18, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":19, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":20, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":21, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":22, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":23, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":24, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":25, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":26, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":27, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":28, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":29, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":30, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":31, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":32, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":33, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":34, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":35, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":36, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":37, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":38, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":39, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":40, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":41, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":42, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":43, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":44, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":45, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":46, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":47, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":48, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":49, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":50, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":51, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":52, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":53, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":54, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":55, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":56, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":57, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":58, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":59, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":60, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":61, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":62, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":63, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":64, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":65, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":66, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":67, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":68, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":69, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":70, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":71, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":72, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":73, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":74, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":75, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":76, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":77, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":78, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":79, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":80, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":81, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":82, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":83, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":84, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":85, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":86, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":87, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":88, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":89, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":0, "uid":90, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":1, "uid":91, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":2, "uid":92, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":3, "uid":93, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":4, "uid":94, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":5, "uid":95, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":6, "uid":96, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":7, "uid":97, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":8, "uid":98, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, // { "team":0, "tid":9, "uid":99, "n_team_agent":100, "type":"PlaneAgent", "init_fn_name":"init_air", }, ] }, "ALGORITHM.script_ai.uhmap_island.py->DummyAlgConfig": { "reserve": "" }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/hlt+50vs50.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "z-hete-50",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 16, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "test_epoch": 256, "interested_team": 0, "seed": 10098, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/hete_league_onenet_fix", "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [50, 50], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 150, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapHuge", "UhmapVersion":"3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.hete_league_onenet_fix.foundation->ReinforceAlgorithmFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack", ] }, "MISSION.uhmap.SubTasks.UhmapHugeConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":10, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":11, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":12, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":13, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":14, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":15, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":16, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":17, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":18, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":19, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":20, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":21, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":22, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":23, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":24, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":25, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":26, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":27, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":28, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":29, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":30, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":31, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":32, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":33, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":34, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":35, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":36, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":37, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":38, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":39, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":40, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":41, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":42, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":43, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":44, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":45, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":46, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":47, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":48, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":49, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":10, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":11, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":12, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":13, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":14, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":15, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":16, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":17, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":18, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":19, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":20, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":21, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":22, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":23, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":24, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":25, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":26, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":27, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":28, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":29, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":30, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":31, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":32, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":33, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":34, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":35, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":36, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":37, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":38, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":39, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":40, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":41, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":42, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":43, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":44, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":45, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":46, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":47, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":48, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":49, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, // --- Part3: config ALGORITHM 2/2 --- "ALGORITHM.hete_league_onenet_fix.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.hete_league_onenet_fix.foundation.py->AlgorithmConfig": { "train_traj_needed": 64, "hete_n_alive_frontend": 2, "hete_n_net_placeholder":5, "hete_same_prob": 1.0, "load_checkpoint": false, "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0001, "ppo_epoch": 24, "hete_lasted_n":3, "policy_resonance": true, "hete_exclude_zero_wr": true, "debug": false, "n_entity_placeholder": 11 }, "ALGORITHM.hete_league_onenet_fix.stage_planner.py->PolicyRsnConfig": { "resonance_start_at_update": 1, "yita_min_prob": 0.05, "yita_max": 0.5, "yita_shift_method": "-sin", "yita_shift_cycle": 1000, "yita_inc_per_update": 0.01, }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/ppoma+50vs50.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "ppoma-uhmap50vs50",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 32, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/ppo_ma", "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [50, 50], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 150, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapHuge", "UhmapVersion":"3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.ppo_ma.foundation->ReinforceAlgorithmFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack", ] }, "MISSION.uhmap.SubTasks.UhmapHugeConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":10, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":11, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":12, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":13, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":14, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":15, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":16, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":17, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":18, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":19, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":20, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":21, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":22, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":23, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":24, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":25, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":26, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":27, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":28, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":29, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":30, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":31, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":32, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":33, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":34, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":35, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":36, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":37, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":38, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":39, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":40, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":41, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":42, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":43, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":44, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":45, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":46, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":47, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":48, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":49, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":10, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":11, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":12, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":13, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":14, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":15, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":16, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":17, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":18, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":19, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":20, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":21, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":22, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":23, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":24, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":25, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":26, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":27, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":28, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":29, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":30, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":31, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":32, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":33, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":34, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":35, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":36, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":37, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":38, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":39, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":40, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":41, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":42, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":43, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":44, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":45, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":46, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":47, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":48, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":49, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, // --- Part3: config ALGORITHM 2/2 --- "ALGORITHM.ppo_ma.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.ppo_ma.foundation.py->AlgorithmConfig": { "train_traj_needed": 64, "use_normalization": true, "load_specific_checkpoint": "", "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0004, "ppo_epoch": 24, "policy_resonance": false, "debug": true, "n_entity_placeholder": 11 }, "ALGORITHM.ppo_ma.stage_planner.py->PolicyRsnConfig": { "resonance_start_at_update": 1, "yita_min_prob": 0.05, "yita_max": 0.5, "yita_shift_method": "-sin", "yita_shift_cycle": 1000, "yita_inc_per_update": 0.01, }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/ppoma+intercept.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "ppoma-intercept",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [5, 12], "MaxEpisodeStep": 100, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapIntercept", "SubTaskSelection": "UhmapIntercept", "UhmapVersion":"3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 2, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.ppo_ma.foundation->ReinforceAlgorithmFoundation", "TEMP.TEAM2.ALGORITHM.ppo_ma.foundation->ReinforceAlgorithmFoundation", ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.ppo_ma.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.ppo_ma.foundation.py->AlgorithmConfig": { "train_traj_needed": 64, "use_normalization": true, "load_specific_checkpoint": "", "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0004, "ppo_epoch": 24, "policy_resonance": false, "debug": true, "n_entity_placeholder": 11 }, // --- Part3: config ALGORITHM 2/2 --- "TEMP.TEAM2.ALGORITHM.ppo_ma.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "TEMP.TEAM2.ALGORITHM.ppo_ma.foundation.py->AlgorithmConfig": { "train_traj_needed": 64, "use_normalization": true, "load_specific_checkpoint": "", "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0004, "ppo_epoch": 24, "policy_resonance": false, "debug": true, "n_entity_placeholder": 11 }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/ppoma+predatorprey.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "ppoma-predatorprey",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 3], "MaxEpisodeStep": 100, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapPreyPredator", "UhmapVersion":"3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 2, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.ppo_ma.foundation->ReinforceAlgorithmFoundation", "TEMP.TEAM2.ALGORITHM.ppo_ma.foundation->ReinforceAlgorithmFoundation", ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.ppo_ma.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.ppo_ma.foundation.py->AlgorithmConfig": { "train_traj_needed": 64, "use_normalization": true, "load_specific_checkpoint": "", "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0004, "ppo_epoch": 24, "policy_resonance": false, "debug": true, "n_entity_placeholder": 11 }, // --- Part3: config ALGORITHM 2/2 --- "TEMP.TEAM2.ALGORITHM.ppo_ma.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "TEMP.TEAM2.ALGORITHM.ppo_ma.foundation.py->AlgorithmConfig": { "train_traj_needed": 64, "use_normalization": true, "load_specific_checkpoint": "", "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0004, "ppo_epoch": 24, "policy_resonance": false, "debug": true, "n_entity_placeholder": 11 }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/ppoma+uhmap10vs10hete.jsonc ================================================ { "config.py->GlobalConfig": { "note": "ppoma-uhmap10vs10", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 16, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "seed": 8834, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/ppo_ma", "MISSION/uhmap" ] }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 125, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, "TEAM_NAMES": [ "ALGORITHM.ppo_ma.foundation->ReinforceAlgorithmFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, // --- Part3: config ALGORITHM 2/2 --- "ALGORITHM.ppo_ma.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.ppo_ma.foundation.py->AlgorithmConfig": { "train_traj_needed": 256, "use_normalization": true, "load_specific_checkpoint": "", "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0004, "ppo_epoch": 24, "policy_resonance": false, "debug": true, "n_entity_placeholder": 11 } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/ppoma_waterdrop.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "ppoma-waterdorp",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "mt_act_order": "new_method", "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [ 10, 2 ], // 10 ships, 2 waterdrops "MaxEpisodeStep": 100, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": true, "HeteAgents": true, "UnrealLevel": "UhmapWaterdrop", "SubTaskSelection": "UhmapWaterdrop", "UhmapVersion":"3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.my_ai.foundation->ReinforceAlgorithmFoundation", "TEMP.TEAM2.ALGORITHM.my_ai.foundation->ReinforceAlgorithmFoundation", ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.my_ai.foundation.py->AlgorithmConfig": { }, // --- Part3: config ALGORITHM 2/2 --- "TEMP.TEAM2.ALGORITHM.my_ai.foundation.py->AlgorithmConfig": { }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/qmix+uhmap10vs10hete.jsonc ================================================ { "config.py->GlobalConfig": { "note": "RVE-drone2-qmix-fixstate-run1", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 8, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "seed": 8529, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/pymarl2_compat", "MISSION/uhmap" ] }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 125, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.pymarl2_compat.pymarl2_compat->PymarlFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, "ALGORITHM.pymarl2_compat.pymarl2_compat.py->AlgorithmConfig": { "use_shell": "mini_shell_uhmap", "state_compat": "pad", "pymarl_config_injection": { "controllers.my_n_controller.py->PymarlAlgorithmConfig": { "use_normalization": "True", "use_vae": "False" }, "config.py->GlobalConfig": { "batch_size": 128, "load_checkpoint": "False" } } } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/qmix+uhmap20vs20.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "qmix-uhmap50vs50",// "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 8, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "seed": 10098, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/pymarl2_compat", "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [20, 20], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 150, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.pymarl2_compat.pymarl2_compat->PymarlFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack", ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":10, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":11, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":12, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":13, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":14, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":15, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":16, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":17, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":18, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":19, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":10, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":11, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":12, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":13, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":14, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":15, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":16, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":17, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":18, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":19, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, // --- Part3: config ALGORITHM 2/2 --- "ALGORITHM.pymarl2_compat.pymarl2_compat.py->AlgorithmConfig": { "use_shell": "mini_shell_uhmap", "state_compat": "obs_mean", "pymarl_config_injection":{ "controllers.my_n_controller.py->PymarlAlgorithmConfig":{ "use_normalization": "True", "use_vae": "False", }, "config.py->GlobalConfig":{ "batch_size": 128, "load_checkpoint": "False", } } } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/qmix+uhmap50vs50+debug.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "qmix-uhmap50vs50",// "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "seed": 10098, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/pymarl2_compat", "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [50, 50], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 150, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": true, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapHuge", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack", "TEMP.TEAM2.ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack", ] }, "MISSION.uhmap.SubTasks.UhmapHugeConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":10, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":11, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":12, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":13, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":14, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":15, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":16, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":17, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":18, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":19, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":20, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":21, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":22, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":23, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":24, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":25, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":26, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":27, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":28, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":29, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":30, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":31, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":32, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":33, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":34, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":35, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":36, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":37, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":38, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":39, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":40, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":41, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":42, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":43, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":44, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":45, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":46, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":47, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":48, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":49, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":10, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":11, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":12, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":13, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":14, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":15, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":16, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":17, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":18, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":19, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":20, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":21, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":22, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":23, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":24, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":25, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":26, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":27, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":28, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":29, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":30, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":31, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":32, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":33, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":34, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":35, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":36, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":37, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":38, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":39, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":40, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":41, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":42, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":43, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":44, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":45, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":46, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":47, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":48, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":49, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, // --- Part3: config ALGORITHM 1/2 --- "TEMP.TEAM2.ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/qmix+uhmap50vs50.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "qmix-uhmap50vs50",// "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 8, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "seed": 10098, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/pymarl2_compat", "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [100, 100], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 150, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapHuge", "UhmapVersion": "3.5", "UhmapRenderExe": "../WindowsNoEditor/UHMP.exe", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.pymarl2_compat.pymarl2_compat->PymarlFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack", ] }, "MISSION.uhmap.SubTasks.UhmapHugeConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, // --- Part3: config ALGORITHM 2/2 --- "ALGORITHM.pymarl2_compat.pymarl2_compat.py->AlgorithmConfig": { "use_shell": "mini_shell_uhmap", "state_compat": "obs_mean", "pymarl_config_injection":{ "controllers.my_n_controller.py->PymarlAlgorithmConfig":{ "use_normalization": "True", "use_vae": "False", }, "config.py->GlobalConfig":{ "batch_size": 128, "load_checkpoint": "False", } } } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/qplex+uhmap10vs10hete.jsonc ================================================ { "config.py->GlobalConfig": { "note": "qplex-uhmap", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 8, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/pymarl2_compat", "MISSION/uhmap" ] }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 125, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, "TEAM_NAMES": [ "ALGORITHM.pymarl2_compat.pymarl2_compat->PymarlFoundationOld", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, "ALGORITHM.pymarl2_compat.pymarl2_compat.py->AlgorithmConfig": { "use_shell": "mini_shell_uhmap", "use_shell_normalization": true, "state_compat": "pad", "load_checkpoint": "False", "pymarl2_alg_select": "qplex", "pymarl2_runner_select": "parallel", "pymarl_config_injection":{ "config.py->GlobalConfig": { "batch_size": 128, "load_checkpoint": "False", "runner": "parallel", }, "controllers.my_n_controller.py->PymarlAlgorithmConfig":{ "use_normalization": "True", "use_vae": "False" }, } } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/qtran+uhmap10vs10hete.jsonc ================================================ { "config.py->GlobalConfig": { "note": "qtran-cat", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 8, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/pymarl2_compat", "MISSION/uhmap" ] }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 125, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, "TEAM_NAMES": [ "ALGORITHM.pymarl2_compat.pymarl2_compat->PymarlFoundationOld", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, "ALGORITHM.pymarl2_compat.pymarl2_compat.py->AlgorithmConfig": { "use_shell": "mini_shell_uhmap", "use_shell_normalization": true, "state_compat": "obs_cat", "load_checkpoint": "False", "pymarl2_alg_select": "qtran", "pymarl2_runner_select": "parallel", "pymarl_config_injection":{ "config.py->GlobalConfig": { "batch_size": 128, "load_checkpoint": "False", "runner": "parallel", }, "controllers.my_n_controller.py->PymarlAlgorithmConfig":{ "use_normalization": "True", "use_vae": "False" }, } } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/examples/uhmap/random_waterdrop.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "ppoma-waterdorp",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "mt_act_order": "new_method", "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [ 10, 2 ], // 10 ships, 2 waterdrops "MaxEpisodeStep": 100, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": true, "HeteAgents": true, "UnrealLevel": "UhmapWaterdrop", "SubTaskSelection": "UhmapWaterdrop", "UhmapVersion":"3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.my_ai.foundation->DiscreteRLFoundation", "TEMP.TEAM2.ALGORITHM.my_ai.foundation->DiscreteRLFoundation", ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.my_ai.foundation.py->AlgorithmConfig": { }, // --- Part3: config ALGORITHM 2/2 --- "TEMP.TEAM2.ALGORITHM.my_ai.foundation.py->AlgorithmConfig": { }, } ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/pip_requirement.md ================================================ # all pip requirements First of all, python version must >= 3.8, then, use pip to install following packages: ## Group 1: Common ``` numpy scipy torch gym scikit-learn pygame ``` ## Group 2: Unreal-HMAP and Starcraft ``` lz4 smac ``` ## Group 3: Visual ``` flask waitress colorama matplotlib ipykernel ``` ## Group 4: Performance ``` numba cython ``` ## Group 5: Functional ``` func_timeout commentjson PyYAML onedrivedownloader redis filelock ``` ## Group 6: Remote and management ``` paramiko psutil setproctitle sacred ``` ## install ``` pip install torch pip install numpy scipy gym scikit-learn pygame lz4 smac flask waitress colorama matplotlib ipykernel numba cython func_timeout commentjson PyYAML onedrivedownloader redis filelock paramiko psutil setproctitle sacred ``` ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/sc2checkversion ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function from smac.env.multiagentenv import MultiAgentEnv from smac.env.starcraft2.maps import get_map_params import atexit import os from operator import attrgetter from copy import deepcopy import numpy as np import enum import math from absl import logging from pysc2 import maps from pysc2 import run_configs from pysc2.lib import protocol from s2clientprotocol import common_pb2 as sc_common from s2clientprotocol import sc2api_pb2 as sc_pb from s2clientprotocol import raw_pb2 as r_pb from s2clientprotocol import debug_pb2 as d_pb races = { "R": sc_common.Random, "P": sc_common.Protoss, "T": sc_common.Terran, "Z": sc_common.Zerg, } difficulties = { "1": sc_pb.VeryEasy, "2": sc_pb.Easy, "3": sc_pb.Medium, "4": sc_pb.MediumHard, "5": sc_pb.Hard, "6": sc_pb.Harder, "7": sc_pb.VeryHard, "8": sc_pb.CheatVision, "9": sc_pb.CheatMoney, "A": sc_pb.CheatInsane, } actions = { "move": 16, # target: PointOrUnit "attack": 23, # target: PointOrUnit "stop": 4, # target: None "heal": 386, # Unit } class Direction(enum.IntEnum): NORTH = 0 SOUTH = 1 EAST = 2 WEST = 3 class StarCraft2Env(MultiAgentEnv): """The StarCraft II environment for decentralised multi-agent micromanagement scenarios. """ def __init__( self, sc_version, map_name="8m", step_mul=8, move_amount=2, difficulty="7", game_version=None, seed=None, continuing_episode=False, obs_all_health=True, obs_own_health=True, obs_last_action=False, obs_pathing_grid=False, obs_terrain_height=False, obs_instead_of_state=False, obs_timestep_number=False, state_last_action=True, state_timestep_number=False, reward_sparse=False, reward_only_positive=True, reward_death_value=10, reward_win=200, reward_defeat=0, reward_negative_scale=0.5, reward_scale=True, reward_scale_rate=20, reward_vec=False, return_mat=False, replay_dir="", replay_prefix="", window_size_x=1920, window_size_y=1200, heuristic_ai=False, heuristic_rest=False, debug=False, render=False, ): """ Create a StarCraftC2Env environment. Parameters ---------- map_name : str, optional The name of the SC2 map to play (default is "8m"). The full list can be found by running bin/map_list. step_mul : int, optional How many game steps per agent step (default is 8). None indicates to use the default map step_mul. move_amount : float, optional How far away units are ordered to move per step (default is 2). difficulty : str, optional The difficulty of built-in computer AI bot (default is "7"). game_version : str, optional StarCraft II game version (default is None). None indicates the latest version. seed : int, optional Random seed used during game initialisation. This allows to continuing_episode : bool, optional Whether to consider episodes continuing or finished after time limit is reached (default is False). obs_all_health : bool, optional Agents receive the health of all units (in the sight range) as part of observations (default is True). obs_own_health : bool, optional Agents receive their own health as a part of observations (default is False). This flag is ignored when obs_all_health == True. obs_last_action : bool, optional Agents receive the last actions of all units (in the sight range) as part of observations (default is False). obs_pathing_grid : bool, optional Whether observations include pathing values surrounding the agent (default is False). obs_terrain_height : bool, optional Whether observations include terrain height values surrounding the agent (default is False). obs_instead_of_state : bool, optional Use combination of all agents' observations as the global state (default is False). obs_timestep_number : bool, optional Whether observations include the current timestep of the episode (default is False). state_last_action : bool, optional Include the last actions of all agents as part of the global state (default is True). state_timestep_number : bool, optional Whether the state include the current timestep of the episode (default is False). reward_sparse : bool, optional Receive 1/-1 reward for winning/loosing an episode (default is False). Whe rest of reward parameters are ignored if True. reward_only_positive : bool, optional Reward is always positive (default is True). reward_death_value : float, optional The amount of reward received for killing an enemy unit (default is 10). This is also the negative penalty for having an allied unit killed if reward_only_positive == False. reward_win : float, optional The reward for winning in an episode (default is 200). reward_defeat : float, optional The reward for loosing in an episode (default is 0). This value should be nonpositive. reward_negative_scale : float, optional Scaling factor for negative rewards (default is 0.5). This parameter is ignored when reward_only_positive == True. reward_scale : bool, optional Whether or not to scale the reward (default is True). reward_scale_rate : float, optional Reward scale rate (default is 20). When reward_scale == True, the reward received by the agents is divided by (max_reward / reward_scale_rate), where max_reward is the maximum possible reward per episode without considering the shield regeneration of Protoss units. replay_dir : str, optional The directory to save replays (default is None). If None, the replay will be saved in Replays directory where StarCraft II is installed. replay_prefix : str, optional The prefix of the replay to be saved (default is None). If None, the name of the map will be used. window_size_x : int, optional The length of StarCraft II window size (default is 1920). window_size_y: int, optional The height of StarCraft II window size (default is 1200). heuristic_ai: bool, optional Whether or not to use a non-learning heuristic AI (default False). heuristic_rest: bool, optional At any moment, restrict the actions of the heuristic AI to be chosen from actions available to RL agents (default is False). Ignored if heuristic_ai == False. debug: bool, optional Log messages about observations, state, actions and rewards for debugging purposes (default is False). """ if sc_version=='old' or sc_version=='2.4.6': os.environ.setdefault("SC2PATH", "~/StarCraftIII") if sc_version=='new' or sc_version=='2.4.10': os.environ.setdefault("SC2PATH", "~/StarCraftIII/Version2410/StarCraftII") # Map arguments self.map_name = map_name map_params = get_map_params(self.map_name) self.n_agents = map_params["n_agents"] self.n_enemies = map_params["n_enemies"] self.episode_limit = map_params["limit"] self._move_amount = move_amount self._step_mul = step_mul self.difficulty = difficulty # Observations and state self.obs_own_health = obs_own_health self.obs_all_health = obs_all_health self.obs_instead_of_state = obs_instead_of_state self.obs_last_action = obs_last_action self.obs_pathing_grid = obs_pathing_grid self.obs_terrain_height = obs_terrain_height self.obs_timestep_number = obs_timestep_number self.state_last_action = state_last_action self.state_timestep_number = state_timestep_number if self.obs_all_health: self.obs_own_health = True self.n_obs_pathing = 8 self.n_obs_height = 9 # Rewards args self.reward_sparse = reward_sparse self.reward_only_positive = reward_only_positive self.reward_negative_scale = reward_negative_scale self.reward_death_value = reward_death_value self.reward_win = reward_win self.reward_defeat = reward_defeat self.reward_scale = reward_scale self.reward_scale_rate = reward_scale_rate # Other self.game_version = game_version self.continuing_episode = continuing_episode self._seed = seed self.heuristic_ai = heuristic_ai self.heuristic_rest = heuristic_rest self.debug = debug self.window_size = (window_size_x, window_size_y) self.replay_dir = replay_dir self.replay_prefix = replay_prefix # Actions self.n_actions_no_attack = 6 self.n_actions_move = 4 self.n_actions = self.n_actions_no_attack + self.n_enemies # Map info self._agent_race = map_params["a_race"] self._bot_race = map_params["b_race"] self.shield_bits_ally = 1 if self._agent_race == "P" else 0 self.shield_bits_enemy = 1 if self._bot_race == "P" else 0 self.unit_type_bits = map_params["unit_type_bits"] self.map_type = map_params["map_type"] self._unit_types = None self.max_reward = ( self.n_enemies * self.reward_death_value + self.reward_win ) # create lists containing the names of attributes returned in states self.ally_state_attr_names = [ "health", "energy/cooldown", "rel_x", "rel_y", ] self.enemy_state_attr_names = ["health", "rel_x", "rel_y"] if self.shield_bits_ally > 0: self.ally_state_attr_names += ["shield"] if self.shield_bits_enemy > 0: self.enemy_state_attr_names += ["shield"] if self.unit_type_bits > 0: bit_attr_names = [ "type_{}".format(bit) for bit in range(self.unit_type_bits) ] self.ally_state_attr_names += bit_attr_names self.enemy_state_attr_names += bit_attr_names self.agents = {} self.enemies = {} self._episode_count = 0 self._episode_steps = 0 self._total_steps = 0 self._obs = None self.battles_won = 0 self.battles_game = 0 self.timeouts = 0 self.force_restarts = 0 self.last_stats = None self.death_tracker_ally = np.zeros(self.n_agents) self.death_tracker_enemy = np.zeros(self.n_enemies) self.previous_ally_units = None self.previous_enemy_units = None self.last_action = np.zeros((self.n_agents, self.n_actions)) self._min_unit_type = 0 self.marine_id = self.marauder_id = self.medivac_id = 0 self.hydralisk_id = self.zergling_id = self.baneling_id = 0 self.stalker_id = self.colossus_id = self.zealot_id = 0 self.max_distance_x = 0 self.max_distance_y = 0 self.map_x = 0 self.map_y = 0 self.reward = 0 self.renderer = None self.terrain_height = None self.pathing_grid = None self._run_config = None self._sc2_proc = None self._controller = None self.return_mat = return_mat self.reward_vec = reward_vec self.enable_threejs_render = render # Try to avoid leaking SC2 processes on shutdown atexit.register(lambda: self.close()) def _launch(self): """Launch the StarCraft II game.""" self._run_config = run_configs.get(version=self.game_version) _map = maps.get(self.map_name) # Setting up the interface interface_options = sc_pb.InterfaceOptions(raw=True, score=False) self._sc2_proc = self._run_config.start( window_size=self.window_size, want_rgb=False ) self._controller = self._sc2_proc.controller # Request to create the game create = sc_pb.RequestCreateGame( local_map=sc_pb.LocalMap( map_path=_map.path, map_data=self._run_config.map_data(_map.path), ), realtime=False, random_seed=self._seed, ) create.player_setup.add(type=sc_pb.Participant) create.player_setup.add( type=sc_pb.Computer, race=races[self._bot_race], difficulty=difficulties[self.difficulty], ) self._controller.create_game(create) join = sc_pb.RequestJoinGame( race=races[self._agent_race], options=interface_options ) self._controller.join_game(join) game_info = self._controller.game_info() map_info = game_info.start_raw map_play_area_min = map_info.playable_area.p0 map_play_area_max = map_info.playable_area.p1 self.max_distance_x = map_play_area_max.x - map_play_area_min.x self.max_distance_y = map_play_area_max.y - map_play_area_min.y self.map_x = map_info.map_size.x self.map_y = map_info.map_size.y if map_info.pathing_grid.bits_per_pixel == 1: vals = np.array(list(map_info.pathing_grid.data)).reshape( self.map_x, int(self.map_y / 8) ) self.pathing_grid = np.transpose( np.array( [ [(b >> i) & 1 for b in row for i in range(7, -1, -1)] for row in vals ], dtype=np.bool, ) ) else: self.pathing_grid = np.invert( np.flip( np.transpose( np.array( list(map_info.pathing_grid.data), dtype=np.bool ).reshape(self.map_x, self.map_y) ), axis=1, ) ) self.terrain_height = ( np.flip( np.transpose( np.array(list(map_info.terrain_height.data)).reshape( self.map_x, self.map_y ) ), 1, ) / 255 ) def reset(self): """Reset the environment. Required after each full episode. Returns initial observations and states. """ self._episode_steps = 0 if self._episode_count == 0: # Launch StarCraft II self._launch() else: self._restart() # Information kept for counting the reward self.death_tracker_ally = np.zeros(self.n_agents) self.death_tracker_enemy = np.zeros(self.n_enemies) self.previous_ally_units = None self.previous_enemy_units = None self.win_counted = False self.defeat_counted = False self.last_action = np.zeros((self.n_agents, self.n_actions)) if self.heuristic_ai: self.heuristic_targets = [None] * self.n_agents try: self._obs = self._controller.observe() self.init_units() except (protocol.ProtocolError, protocol.ConnectionError): self.full_restart() if self.debug: logging.debug( "Started Episode {}".format(self._episode_count).center( 60, "*" ) ) return self.get_obs(), self.get_state() def _restart(self): """Restart the environment by killing all units on the map. There is a trigger in the SC2Map file, which restarts the episode when there are no units left. """ try: self._kill_all_units() self._controller.step(2) except (protocol.ProtocolError, protocol.ConnectionError): self.full_restart() def full_restart(self): """Full restart. Closes the SC2 process and launches a new one.""" self._sc2_proc.close() self._launch() self.force_restarts += 1 def threejs_renderer(self): if not hasattr(self, 'threejs_bridge'): # 濡傛灉娌℃湁鍒濆鍖栵紝鍏堝垵濮嬪寲 from VISUALIZE.mcom import mcom self.threejs_bridge = mcom(ip='127.0.0.1', port=12084, path='RECYCLE/v2d_logger/', digit=8, rapid_flush=False, draw_mode='Threejs') self.threejs_bridge.v2d_init() self.threejs_bridge.set_style('grid') # self.threejs_bridge.set_style('gray') self.threejs_bridge.set_style('star') self.threejs_bridge.geometry_rotate_scale_translate('ball', 0, 0, 0, 1, 1, 1, 0,0,0) self.threejs_bridge.geometry_rotate_scale_translate('box', 0, 0, 0, 1, 1, 1, 0,0,0) for a_id, a_unit in self.agents.items(): color = 'green' if a_unit.health != 0 else 'yellow' self.threejs_bridge.v2dx( 'ball|%d|%s|0.25'%(a_id, color), a_unit.pos.x, a_unit.pos.y, a_unit.pos.z, vel_dir=a_unit.facing, label='%d'%a_unit.health, label_color='white', attack_range=0) for healer_id in self.action_heal[a_id]: self.threejs_bridge.flash('beam', src=healer_id, dst=a_id, dur=0.2, size=0.1, color='LimeGreen') for a_id, a_unit in self.enemies.items(): color = 'red' if a_unit.health != 0 else 'yellow' self.threejs_bridge.v2dx( 'box|%d|%s|0.5'%(a_id+len(self.agents), color), a_unit.pos.x, a_unit.pos.y, a_unit.pos.z, label='%d'%a_unit.health, label_color='white', attack_range=0) for attack_id in self.action_blow[a_id]: self.threejs_bridge.flash('beam', src=attack_id, dst=a_id+len(self.agents), dur=0.2, size=0.2, color='HotPink') self.threejs_bridge.v2d_show() def step(self, actions): # print('-----step-----') """A single environment step. Returns reward, terminated, info.""" actions_int = [int(a) for a in actions] self.last_action = np.eye(self.n_actions)[np.array(actions_int)] # onehot version of actions self.action_blow = [[] for _ in self.enemies] self.action_heal = [[] for _ in self.agents] # Collect individual actions sc_actions = [] if self.debug: logging.debug("Actions".center(60, "-")) for a_id, action in enumerate(actions_int): if not self.heuristic_ai: sc_action = self.get_agent_action(a_id, action) else: sc_action, action_num = self.get_agent_action_heuristic( a_id, action ) actions[a_id] = action_num if sc_action: sc_actions.append(sc_action) # Send action request req_actions = sc_pb.RequestAction(actions=sc_actions) try: self._controller.actions(req_actions) # Make step in SC2, i.e. apply actions self._controller.step(self._step_mul) # Observe here so that we know if the episode is over. self._obs = self._controller.observe() except (protocol.ProtocolError, protocol.ConnectionError): self.full_restart() reward = 0 if not self.reward_vec else np.array([0. for _ in self.agents]) return reward, True, {} self._total_steps += 1 self._episode_steps += 1 # Update units, # return None: game is still on, # return -1: lose the game # return +1: win the game # return 0: draw game_end_code = self.update_units() terminated = False reward = self.reward_battle() # <1>璁$畻绋犲瘑濂栧姳锛屼絾濡傛灉self.reward_sparse锛岃繖涓猺eward鏍规湰涓嶇敤 info = {"battle_won": False} # count units that are still alive dead_allies, dead_enemies = 0, 0 for _al_id, al_unit in self.agents.items(): if al_unit.health == 0: dead_allies += 1 for _e_id, e_unit in self.enemies.items(): if e_unit.health == 0: dead_enemies += 1 info["dead_allies"] = dead_allies info["dead_enemies"] = dead_enemies if self.enable_threejs_render: self.threejs_renderer() if game_end_code is not None: # Battle is over terminated = True self.battles_game += 1 if game_end_code == 1 and not self.win_counted: # 鑳滃埄 print('\t [starcraft]: win') self.battles_won += 1 self.win_counted = True info["battle_won"] = True if not self.reward_sparse: if self.reward_vec: assert False reward += 4 # <2> 鑳滃埄鐨勯澶栧鍔? else: reward += self.reward_win # <2> 鑳滃埄鐨勯澶栧鍔? else: reward = 1 elif game_end_code == -1 and not self.defeat_counted: # 澶辫触 self.defeat_counted = True if not self.reward_sparse: if self.reward_vec: assert False reward += -4 # <2> 鑳滃埄鐨勯澶栧鍔? else: reward += self.reward_defeat else: reward = -1 elif self._episode_steps >= self.episode_limit: # Episode limit reached terminated = True if self.continuing_episode: info["episode_limit"] = True self.battles_game += 1 self.timeouts += 1 if self.debug: logging.debug("Reward = {}".format(reward).center(60, '-')) if terminated: self._episode_count += 1 # print(reward) if self.reward_scale and (not self.reward_vec): # <3> 鑳滃埄鐨剆cale reward /= self.max_reward / self.reward_scale_rate # reward = reward / (self.max_reward/self.reward_scale_rate) self.reward = reward return reward, terminated, info def get_agent_action(self, a_id, action): """Construct the action for agent a_id.""" avail_actions = self.get_avail_agent_actions(a_id) assert ( avail_actions[action] == 1 ), "Agent {} cannot perform action {}".format(a_id, action) unit = self.get_unit_by_id(a_id) tag = unit.tag x = unit.pos.x y = unit.pos.y if action == 0: # no-op (valid only when dead) assert unit.health == 0, "No-op only available for dead agents." if self.debug: logging.debug("Agent {}: Dead".format(a_id)) return None elif action == 1: # stop cmd = r_pb.ActionRawUnitCommand( ability_id=actions["stop"], unit_tags=[tag], queue_command=False, ) if self.debug: logging.debug("Agent {}: Stop".format(a_id)) elif action == 2: # move north cmd = r_pb.ActionRawUnitCommand( ability_id=actions["move"], target_world_space_pos=sc_common.Point2D( x=x, y=y + self._move_amount ), unit_tags=[tag], queue_command=False, ) if self.debug: logging.debug("Agent {}: Move North".format(a_id)) elif action == 3: # move south cmd = r_pb.ActionRawUnitCommand( ability_id=actions["move"], target_world_space_pos=sc_common.Point2D( x=x, y=y - self._move_amount ), unit_tags=[tag], queue_command=False, ) if self.debug: logging.debug("Agent {}: Move South".format(a_id)) elif action == 4: # move east cmd = r_pb.ActionRawUnitCommand( ability_id=actions["move"], target_world_space_pos=sc_common.Point2D( x=x + self._move_amount, y=y ), unit_tags=[tag], queue_command=False, ) if self.debug: logging.debug("Agent {}: Move East".format(a_id)) elif action == 5: # move west cmd = r_pb.ActionRawUnitCommand( ability_id=actions["move"], target_world_space_pos=sc_common.Point2D( x=x - self._move_amount, y=y ), unit_tags=[tag], queue_command=False, ) if self.debug: logging.debug("Agent {}: Move West".format(a_id)) else: # attack/heal units that are in range target_id = action - self.n_actions_no_attack if self.map_type == "MMM" and unit.unit_type == self.medivac_id: target_unit = self.agents[target_id] action_name = "heal" self.action_heal[target_id].append(a_id) else: target_unit = self.enemies[target_id] action_name = "attack" self.action_blow[target_id].append(a_id) action_id = actions[action_name] target_tag = target_unit.tag cmd = r_pb.ActionRawUnitCommand( ability_id=action_id, target_unit_tag=target_tag, unit_tags=[tag], queue_command=False, ) if self.debug: logging.debug( "Agent {} {}s unit # {}".format( a_id, action_name, target_id ) ) sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd)) return sc_action def get_agent_action_heuristic(self, a_id, action): unit = self.get_unit_by_id(a_id) tag = unit.tag target = self.heuristic_targets[a_id] if unit.unit_type == self.medivac_id: if ( target is None or self.agents[target].health == 0 or self.agents[target].health == self.agents[target].health_max ): min_dist = math.hypot(self.max_distance_x, self.max_distance_y) min_id = -1 for al_id, al_unit in self.agents.items(): if al_unit.unit_type == self.medivac_id: continue if ( al_unit.health != 0 and al_unit.health != al_unit.health_max ): dist = self.distance( unit.pos.x, unit.pos.y, al_unit.pos.x, al_unit.pos.y, ) if dist < min_dist: min_dist = dist min_id = al_id self.heuristic_targets[a_id] = min_id if min_id == -1: self.heuristic_targets[a_id] = None return None, 0 action_id = actions["heal"] target_tag = self.agents[self.heuristic_targets[a_id]].tag else: if target is None or self.enemies[target].health == 0: min_dist = math.hypot(self.max_distance_x, self.max_distance_y) min_id = -1 for e_id, e_unit in self.enemies.items(): if ( unit.unit_type == self.marauder_id and e_unit.unit_type == self.medivac_id ): continue if e_unit.health > 0: dist = self.distance( unit.pos.x, unit.pos.y, e_unit.pos.x, e_unit.pos.y ) if dist < min_dist: min_dist = dist min_id = e_id self.heuristic_targets[a_id] = min_id if min_id == -1: self.heuristic_targets[a_id] = None return None, 0 action_id = actions["attack"] target_tag = self.enemies[self.heuristic_targets[a_id]].tag action_num = self.heuristic_targets[a_id] + self.n_actions_no_attack # Check if the action is available if ( self.heuristic_rest and self.get_avail_agent_actions(a_id)[action_num] == 0 ): # Move towards the target rather than attacking/healing if unit.unit_type == self.medivac_id: target_unit = self.agents[self.heuristic_targets[a_id]] else: target_unit = self.enemies[self.heuristic_targets[a_id]] delta_x = target_unit.pos.x - unit.pos.x delta_y = target_unit.pos.y - unit.pos.y if abs(delta_x) > abs(delta_y): # east or west if delta_x > 0: # east target_pos = sc_common.Point2D( x=unit.pos.x + self._move_amount, y=unit.pos.y ) action_num = 4 else: # west target_pos = sc_common.Point2D( x=unit.pos.x - self._move_amount, y=unit.pos.y ) action_num = 5 else: # north or south if delta_y > 0: # north target_pos = sc_common.Point2D( x=unit.pos.x, y=unit.pos.y + self._move_amount ) action_num = 2 else: # south target_pos = sc_common.Point2D( x=unit.pos.x, y=unit.pos.y - self._move_amount ) action_num = 3 cmd = r_pb.ActionRawUnitCommand( ability_id=actions["move"], target_world_space_pos=target_pos, unit_tags=[tag], queue_command=False, ) else: # Attack/heal the target cmd = r_pb.ActionRawUnitCommand( ability_id=action_id, target_unit_tag=target_tag, unit_tags=[tag], queue_command=False, ) sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd)) return sc_action, action_num def reward_battle(self): """Reward function when self.reward_spare==False. Returns accumulative hit/shield point damage dealt to the enemy + reward_death_value per enemy unit killed, and, in case self.reward_only_positive == False, - (damage dealt to ally units + reward_death_value per ally unit killed) * self.reward_negative_scale """ if self.reward_sparse: return 0 reward = 0 delta_deaths = 0 delta_ally = 0 delta_enemy = 0 neg_scale = self.reward_negative_scale if self.reward_vec: assert False reward_awise_compat = [0. for _ in self.agents] # update deaths, al_id begin from 0 for al_id, al_unit in self.agents.items(): if not self.death_tracker_ally[al_id]: # did not die so far prev_health = ( self.previous_ally_units[al_id].health + self.previous_ally_units[al_id].shield ) if al_unit.health == 0: # just died self.death_tracker_ally[al_id] = 1 if not self.reward_only_positive: delta_deaths -= self.reward_death_value * neg_scale # reward_death_value=10, reward_negative_scale=0.5, (-5) if self.reward_vec: reward_awise_compat[al_id] -= 1 delta_ally += prev_health * neg_scale else: # still alive delta_ally += neg_scale * ( prev_health - al_unit.health - al_unit.shield ) # e_id also begin from 0 for e_id, e_unit in self.enemies.items(): if not self.death_tracker_enemy[e_id]: prev_health = ( self.previous_enemy_units[e_id].health + self.previous_enemy_units[e_id].shield ) if e_unit.health == 0: # just died self.death_tracker_enemy[e_id] = 1 delta_deaths += self.reward_death_value delta_enemy += prev_health else: delta_enemy += prev_health - e_unit.health - e_unit.shield if self.reward_vec: assert False if e_unit.health == 0: # just died for al_id in self.action_blow[e_id]: reward_awise_compat[al_id] += (-1) / len(self.action_blow[e_id]) else: for al_id in self.action_blow[e_id]: scale_= 0.01 reward_awise_compat[al_id] += (prev_health - (e_unit.health+e_unit.shield))*scale_ / len(self.action_blow[e_id]) if self.reward_only_positive: reward = abs(delta_enemy + delta_deaths) # shield regeneration else: reward = delta_enemy + delta_deaths - delta_ally if self.reward_vec: assert False reward = np.array(reward_awise_compat) return reward def get_total_actions(self): """Returns the total number of actions an agent could ever take.""" return self.n_actions @staticmethod def distance(x1, y1, x2, y2): """Distance between two points.""" return math.hypot(x2 - x1, y2 - y1) def unit_shoot_range(self, agent_id): """Returns the shooting range for an agent.""" return 6 def unit_sight_range(self, agent_id): """Returns the sight range for an agent.""" return 9 def unit_max_cooldown(self, unit): """Returns the maximal cooldown for a unit.""" switcher = { self.marine_id: 15, self.marauder_id: 25, self.medivac_id: 200, # max energy self.stalker_id: 35, self.zealot_id: 22, self.colossus_id: 24, self.hydralisk_id: 10, self.zergling_id: 11, self.baneling_id: 1, } return switcher.get(unit.unit_type, 15) def save_replay(self): """Save a replay.""" prefix = self.replay_prefix or self.map_name replay_dir = self.replay_dir or "" replay_path = self._run_config.save_replay( self._controller.save_replay(), replay_dir=replay_dir, prefix=prefix, ) logging.info("Replay saved at: %s" % replay_path) def unit_max_shield(self, unit): """Returns maximal shield for a given unit.""" if unit.unit_type == 74 or unit.unit_type == self.stalker_id: return 80 # Protoss's Stalker if unit.unit_type == 73 or unit.unit_type == self.zealot_id: return 50 # Protoss's Zaelot if unit.unit_type == 4 or unit.unit_type == self.colossus_id: return 150 # Protoss's Colossus def can_move(self, unit, direction): """Whether a unit can move in a given direction.""" m = self._move_amount / 2 if direction == Direction.NORTH: x, y = int(unit.pos.x), int(unit.pos.y + m) elif direction == Direction.SOUTH: x, y = int(unit.pos.x), int(unit.pos.y - m) elif direction == Direction.EAST: x, y = int(unit.pos.x + m), int(unit.pos.y) else: x, y = int(unit.pos.x - m), int(unit.pos.y) if self.check_bounds(x, y) and self.pathing_grid[x, y]: return True return False def get_surrounding_points(self, unit, include_self=False): """Returns the surrounding points of the unit in 8 directions.""" x = int(unit.pos.x) y = int(unit.pos.y) ma = self._move_amount points = [ (x, y + 2 * ma), (x, y - 2 * ma), (x + 2 * ma, y), (x - 2 * ma, y), (x + ma, y + ma), (x - ma, y - ma), (x + ma, y - ma), (x - ma, y + ma), ] if include_self: points.append((x, y)) return points def check_bounds(self, x, y): """Whether a point is within the map bounds.""" return (0 <= x < self.map_x and 0 <= y < self.map_y) def get_surrounding_pathing(self, unit): """Returns pathing values of the grid surrounding the given unit.""" points = self.get_surrounding_points(unit, include_self=False) vals = [ self.pathing_grid[x, y] if self.check_bounds(x, y) else 1 for x, y in points ] return vals def get_surrounding_height(self, unit): """Returns height values of the grid surrounding the given unit.""" points = self.get_surrounding_points(unit, include_self=True) vals = [ self.terrain_height[x, y] if self.check_bounds(x, y) else 1 for x, y in points ] return vals @staticmethod def stack_vec_with_padding(arr_list, max_len=None, padding=0): _len = [len(arr) for arr in arr_list] if max_len is None: max_len = max(_len) n_subject = len(arr_list) dtype = arr_list[0].dtype arr_np = np.zeros(shape=(n_subject, max_len), dtype=dtype) + padding for i, arr in enumerate(arr_list): arr_np[i,:_len[i]] = arr return arr_np def get_obs_agent(self, agent_id, return_mat=False): """Returns observation for agent_id. The observation is composed of: - agent movement features (where it can move to, height information and pathing grid) - enemy features (available_to_attack, health, relative_x, relative_y, shield, unit_type) - ally features (visible, distance, relative_x, relative_y, shield, unit_type) - agent unit features (health, shield, unit_type) All of this information is flattened and concatenated into a list, in the aforementioned order. To know the sizes of each of the features inside the final list of features, take a look at the functions ``get_obs_move_feats_size()``, ``get_obs_enemy_feats_size()``, ``get_obs_ally_feats_size()`` and ``get_obs_own_feats_size()``. The size of the observation vector may vary, depending on the environment configuration and type of units present in the map. For instance, non-Protoss units will not have shields, movement features may or may not include terrain height and pathing grid, unit_type is not included if there is only one type of unit in the map etc.). NOTE: Agents should have access only to their local observations during decentralised execution. """ unit = self.get_unit_by_id(agent_id) move_feats_dim = self.get_obs_move_feats_size() enemy_feats_dim = self.get_obs_enemy_feats_size() ally_feats_dim = self.get_obs_ally_feats_size() own_feats_dim = self.get_obs_own_feats_size() move_feats = np.zeros(move_feats_dim, dtype=np.float32) enemy_feats = np.zeros(enemy_feats_dim, dtype=np.float32) ally_feats = np.zeros(ally_feats_dim, dtype=np.float32) own_feats = np.zeros(own_feats_dim, dtype=np.float32) if unit.health > 0: # otherwise dead, return all zeros x = unit.pos.x y = unit.pos.y sight_range = self.unit_sight_range(agent_id) # Movement features avail_actions = self.get_avail_agent_actions(agent_id) for m in range(self.n_actions_move): move_feats[m] = avail_actions[m + 2] ind = self.n_actions_move if self.obs_pathing_grid: move_feats[ ind : ind + self.n_obs_pathing # noqa ] = self.get_surrounding_pathing(unit) ind += self.n_obs_pathing if self.obs_terrain_height: move_feats[ind:] = self.get_surrounding_height(unit) # Enemy features for e_id, e_unit in self.enemies.items(): e_x = e_unit.pos.x e_y = e_unit.pos.y dist = self.distance(x, y, e_x, e_y) if ( dist < sight_range and e_unit.health > 0 ): # visible and alive # Sight range > shoot range enemy_feats[e_id, 0] = avail_actions[ self.n_actions_no_attack + e_id ] # available enemy_feats[e_id, 1] = dist / sight_range # distance enemy_feats[e_id, 2] = ( e_x - x ) / sight_range # relative X enemy_feats[e_id, 3] = ( e_y - y ) / sight_range # relative Y ind = 4 if self.obs_all_health: enemy_feats[e_id, ind] = ( e_unit.health / e_unit.health_max ) # health ind += 1 if self.shield_bits_enemy > 0: max_shield = self.unit_max_shield(e_unit) enemy_feats[e_id, ind] = ( e_unit.shield / max_shield ) # shield ind += 1 if self.unit_type_bits > 0: type_id = self.get_unit_type_id(e_unit, False) enemy_feats[e_id, ind + type_id] = 1 # unit type # Ally features al_ids = [ al_id for al_id in range(self.n_agents) if al_id != agent_id ] for i, al_id in enumerate(al_ids): al_unit = self.get_unit_by_id(al_id) al_x = al_unit.pos.x al_y = al_unit.pos.y dist = self.distance(x, y, al_x, al_y) if ( dist < sight_range and al_unit.health > 0 ): # visible and alive ally_feats[i, 0] = 1 # visible ally_feats[i, 1] = dist / sight_range # distance ally_feats[i, 2] = (al_x - x) / sight_range # relative X ally_feats[i, 3] = (al_y - y) / sight_range # relative Y ind = 4 if self.obs_all_health: ally_feats[i, ind] = ( al_unit.health / al_unit.health_max ) # health ind += 1 if self.shield_bits_ally > 0: max_shield = self.unit_max_shield(al_unit) ally_feats[i, ind] = ( al_unit.shield / max_shield ) # shield ind += 1 if self.unit_type_bits > 0: type_id = self.get_unit_type_id(al_unit, True) ally_feats[i, ind + type_id] = 1 ind += self.unit_type_bits if self.obs_last_action: ally_feats[i, ind:] = self.last_action[al_id] # Own features ind = 0 if self.obs_own_health: own_feats[ind] = unit.health / unit.health_max ind += 1 if self.shield_bits_ally > 0: max_shield = self.unit_max_shield(unit) own_feats[ind] = unit.shield / max_shield ind += 1 if self.unit_type_bits > 0: type_id = self.get_unit_type_id(unit, True) own_feats[ind + type_id] = 1 if not return_mat: agent_obs = np.concatenate( ( move_feats.flatten(), enemy_feats.flatten(), ally_feats.flatten(), own_feats.flatten(), ) ) if self.obs_timestep_number: agent_obs = np.append( agent_obs, self._episode_steps / self.episode_limit) if return_mat: assert False # ally_feats = np.concatenate((ally_feats, -1 * np.ones_like(ally_feats[:,(0,)] )), -1) # enemy_feats = np.concatenate((enemy_feats, +1 * np.ones_like(enemy_feats[:,(0,)])), -1) max_len = len(enemy_feats) + len(ally_feats) + 1 # maximum len of core dimension _core_max_len = max( move_feats_dim+own_feats_dim, max(enemy_feats_dim[1], ally_feats_dim[1])+1 ) agent_obs = self.stack_vec_with_padding([ np.concatenate(( move_feats.flatten(), own_feats.flatten() )), *( np.append(feat, -1) for feat in enemy_feats if not (feat==0).all() ), *( np.append(feat, +1) for feat in ally_feats if not (feat==0).all() ), ], max_len=_core_max_len, padding=0) # shape = (??, max_len) padding = np.zeros(shape=(max_len - agent_obs.shape[0], agent_obs.shape[1]), dtype=agent_obs.dtype) + np.nan # shape = (max_len - ??, max_len) agent_obs = np.concatenate((agent_obs, padding), 0) if (unit.health <= 0): agent_obs+=np.nan # print('agent_obs', agent_obs) # pad to max_len with np.nan to compat # (30, 7) if self.debug: logging.debug("Obs Agent: {}".format(agent_id).center(60, "-")) logging.debug( "Avail. actions {}".format( self.get_avail_agent_actions(agent_id) ) ) logging.debug("Move feats {}".format(move_feats)) logging.debug("Enemy feats {}".format(enemy_feats)) logging.debug("Ally feats {}".format(ally_feats)) logging.debug("Own feats {}".format(own_feats)) return agent_obs def get_obs(self): """Returns all agent observations in a list. NOTE: Agents should have access only to their local observations during decentralised execution. """ agents_obs = [self.get_obs_agent(i, self.return_mat) for i in range(self.n_agents)] return agents_obs def get_state(self): """Returns the global state. NOTE: This functon should not be used during decentralised execution. """ if self.obs_instead_of_state: obs_concat = np.concatenate(self.get_obs(), axis=0).astype( np.float32 ) return obs_concat state_dict = self.get_state_dict() state = np.append( state_dict["allies"].flatten(), state_dict["enemies"].flatten() ) if "last_action" in state_dict: state = np.append(state, state_dict["last_action"].flatten()) if "timestep" in state_dict: state = np.append(state, state_dict["timestep"]) state = state.astype(dtype=np.float32) if self.debug: logging.debug("STATE".center(60, "-")) logging.debug("Ally state {}".format(state_dict["allies"])) logging.debug("Enemy state {}".format(state_dict["enemies"])) if self.state_last_action: logging.debug("Last actions {}".format(self.last_action)) return state def get_ally_num_attributes(self): return len(self.ally_state_attr_names) def get_enemy_num_attributes(self): return len(self.enemy_state_attr_names) def get_state_dict(self): """Returns the global state as a dictionary. - allies: numpy array containing agents and their attributes - enemies: numpy array containing enemies and their attributes - last_action: numpy array of previous actions for each agent - timestep: current no. of steps divided by total no. of steps NOTE: This function should not be used during decentralised execution. """ # number of features equals the number of attribute names nf_al = self.get_ally_num_attributes() nf_en = self.get_enemy_num_attributes() ally_state = np.zeros((self.n_agents, nf_al)) enemy_state = np.zeros((self.n_enemies, nf_en)) center_x = self.map_x / 2 center_y = self.map_y / 2 for al_id, al_unit in self.agents.items(): if al_unit.health > 0: x = al_unit.pos.x y = al_unit.pos.y max_cd = self.unit_max_cooldown(al_unit) ally_state[al_id, 0] = ( al_unit.health / al_unit.health_max ) # health if ( self.map_type == "MMM" and al_unit.unit_type == self.medivac_id ): ally_state[al_id, 1] = al_unit.energy / max_cd # energy else: ally_state[al_id, 1] = ( al_unit.weapon_cooldown / max_cd ) # cooldown ally_state[al_id, 2] = ( x - center_x ) / self.max_distance_x # relative X ally_state[al_id, 3] = ( y - center_y ) / self.max_distance_y # relative Y if self.shield_bits_ally > 0: max_shield = self.unit_max_shield(al_unit) ally_state[al_id, 4] = ( al_unit.shield / max_shield ) # shield if self.unit_type_bits > 0: type_id = self.get_unit_type_id(al_unit, True) ally_state[al_id, type_id - self.unit_type_bits] = 1 for e_id, e_unit in self.enemies.items(): if e_unit.health > 0: x = e_unit.pos.x y = e_unit.pos.y enemy_state[e_id, 0] = ( e_unit.health / e_unit.health_max ) # health enemy_state[e_id, 1] = ( x - center_x ) / self.max_distance_x # relative X enemy_state[e_id, 2] = ( y - center_y ) / self.max_distance_y # relative Y if self.shield_bits_enemy > 0: max_shield = self.unit_max_shield(e_unit) enemy_state[e_id, 3] = e_unit.shield / max_shield # shield if self.unit_type_bits > 0: type_id = self.get_unit_type_id(e_unit, False) enemy_state[e_id, type_id - self.unit_type_bits] = 1 state = {"allies": ally_state, "enemies": enemy_state} if self.state_last_action: state["last_action"] = self.last_action if self.state_timestep_number: state["timestep"] = self._episode_steps / self.episode_limit return state def get_obs_enemy_feats_size(self): """Returns the dimensions of the matrix containing enemy features. Size is n_enemies x n_features. """ nf_en = 4 + self.unit_type_bits if self.obs_all_health: nf_en += 1 + self.shield_bits_enemy return self.n_enemies, nf_en def get_obs_ally_feats_size(self): """Returns the dimensions of the matrix containing ally features. Size is n_allies x n_features. """ nf_al = 4 + self.unit_type_bits if self.obs_all_health: nf_al += 1 + self.shield_bits_ally if self.obs_last_action: nf_al += self.n_actions return self.n_agents - 1, nf_al def get_obs_own_feats_size(self): """ Returns the size of the vector containing the agents' own features. """ own_feats = self.unit_type_bits if self.obs_own_health: own_feats += 1 + self.shield_bits_ally if self.obs_timestep_number: own_feats += 1 return own_feats def get_obs_move_feats_size(self): """Returns the size of the vector containing the agents's movement- related features. """ move_feats = self.n_actions_move if self.obs_pathing_grid: move_feats += self.n_obs_pathing if self.obs_terrain_height: move_feats += self.n_obs_height return move_feats def get_obs_size(self): """Returns the size of the observation.""" own_feats = self.get_obs_own_feats_size() move_feats = self.get_obs_move_feats_size() n_enemies, n_enemy_feats = self.get_obs_enemy_feats_size() n_allies, n_ally_feats = self.get_obs_ally_feats_size() if not self.return_mat: enemy_feats = n_enemies * n_enemy_feats ally_feats = n_allies * n_ally_feats return move_feats + enemy_feats + ally_feats + own_feats else: return ( n_allies + n_enemies + 1, max(max(own_feats+move_feats, n_enemy_feats+1), n_ally_feats+1) ) def get_state_size(self): """Returns the size of the global state.""" if self.obs_instead_of_state: return self.get_obs_size() * self.n_agents nf_al = 4 + self.shield_bits_ally + self.unit_type_bits nf_en = 3 + self.shield_bits_enemy + self.unit_type_bits enemy_state = self.n_enemies * nf_en ally_state = self.n_agents * nf_al size = enemy_state + ally_state if self.state_last_action: size += self.n_agents * self.n_actions if self.state_timestep_number: size += 1 return size def get_visibility_matrix(self): """Returns a boolean numpy array of dimensions (n_agents, n_agents + n_enemies) indicating which units are visible to each agent. """ arr = np.zeros( (self.n_agents, self.n_agents + self.n_enemies), dtype=np.bool, ) for agent_id in range(self.n_agents): current_agent = self.get_unit_by_id(agent_id) if current_agent.health > 0: # it agent not dead x = current_agent.pos.x y = current_agent.pos.y sight_range = self.unit_sight_range(agent_id) # Enemies for e_id, e_unit in self.enemies.items(): e_x = e_unit.pos.x e_y = e_unit.pos.y dist = self.distance(x, y, e_x, e_y) if (dist < sight_range and e_unit.health > 0): # visible and alive arr[agent_id, self.n_agents + e_id] = 1 # The matrix for allies is filled symmetrically al_ids = [ al_id for al_id in range(self.n_agents) if al_id > agent_id ] for _, al_id in enumerate(al_ids): al_unit = self.get_unit_by_id(al_id) al_x = al_unit.pos.x al_y = al_unit.pos.y dist = self.distance(x, y, al_x, al_y) if (dist < sight_range and al_unit.health > 0): # visible and alive arr[agent_id, al_id] = arr[al_id, agent_id] = 1 return arr def get_unit_type_id(self, unit, ally): """Returns the ID of unit type in the given scenario.""" if ally: # use new SC2 unit types type_id = unit.unit_type - self._min_unit_type else: # use default SC2 unit types if self.map_type == "stalkers_and_zealots": # id(Stalker) = 74, id(Zealot) = 73 type_id = unit.unit_type - 73 elif self.map_type == "colossi_stalkers_zealots": # id(Stalker) = 74, id(Zealot) = 73, id(Colossus) = 4 if unit.unit_type == 4: type_id = 0 elif unit.unit_type == 74: type_id = 1 else: type_id = 2 elif self.map_type == "bane": if unit.unit_type == 9: type_id = 0 else: type_id = 1 elif self.map_type == "MMM": if unit.unit_type == 51: type_id = 0 elif unit.unit_type == 48: type_id = 1 else: type_id = 2 return type_id def get_avail_agent_actions(self, agent_id): """Returns the available actions for agent_id.""" unit = self.get_unit_by_id(agent_id) if unit.health > 0: # cannot choose no-op when alive avail_actions = [0] * self.n_actions # stop should be allowed avail_actions[1] = 1 # see if we can move if self.can_move(unit, Direction.NORTH): avail_actions[2] = 1 if self.can_move(unit, Direction.SOUTH): avail_actions[3] = 1 if self.can_move(unit, Direction.EAST): avail_actions[4] = 1 if self.can_move(unit, Direction.WEST): avail_actions[5] = 1 # Can attack only alive units that are alive in the shooting range shoot_range = self.unit_shoot_range(agent_id) target_items = self.enemies.items() if self.map_type == "MMM" and unit.unit_type == self.medivac_id: # Medivacs cannot heal themselves or other flying units target_items = [ (t_id, t_unit) for (t_id, t_unit) in self.agents.items() if t_unit.unit_type != self.medivac_id ] for t_id, t_unit in target_items: if t_unit.health > 0: dist = self.distance( unit.pos.x, unit.pos.y, t_unit.pos.x, t_unit.pos.y ) if dist <= shoot_range: avail_actions[t_id + self.n_actions_no_attack] = 1 return avail_actions else: # only no-op allowed return [1] + [0] * (self.n_actions - 1) def get_avail_actions(self): """Returns the available actions of all agents in a list.""" avail_actions = [] for agent_id in range(self.n_agents): avail_agent = self.get_avail_agent_actions(agent_id) avail_actions.append(avail_agent) return avail_actions def close(self): """Close StarCraft II.""" if self.renderer is not None: self.renderer.close() self.renderer = None if self._sc2_proc: self._sc2_proc.close() def seed(self): """Returns the random seed used by the environment.""" return self._seed def render_original(self, mode="human"): if self.renderer is None: from smac.env.starcraft2.render import StarCraft2Renderer self.renderer = StarCraft2Renderer(self, mode) assert ( mode == self.renderer.mode ), "mode must be consistent across render calls" return self.renderer.render(mode) def _kill_all_units(self): """Kill all units on the map.""" units_alive = [ unit.tag for unit in self.agents.values() if unit.health > 0 ] + [unit.tag for unit in self.enemies.values() if unit.health > 0] debug_command = [ d_pb.DebugCommand(kill_unit=d_pb.DebugKillUnit(tag=units_alive)) ] self._controller.debug(debug_command) def init_units(self): """Initialise the units.""" while True: # Sometimes not all units have yet been created by SC2 self.agents = {} self.enemies = {} # ------------ ally units ---------- ally_units = [ unit for unit in self._obs.observation.raw_data.units if unit.owner == 1 ] ally_units_sorted = sorted( ally_units, key=attrgetter("unit_type", "pos.x", "pos.y"), reverse=False, ) for i in range(len(ally_units_sorted)): self.agents[i] = ally_units_sorted[i] if self.debug: logging.debug( "Unit {} is {}, x = {}, y = {}".format( len(self.agents), self.agents[i].unit_type, self.agents[i].pos.x, self.agents[i].pos.y, ) ) # ------------ enemy units ---------- for unit in self._obs.observation.raw_data.units: if unit.owner == 2: self.enemies[len(self.enemies)] = unit if self._episode_count == 0: self.max_reward += unit.health_max + unit.shield_max if self._episode_count == 0: min_unit_type = min( unit.unit_type for unit in self.agents.values() ) self._init_ally_unit_types(min_unit_type) all_agents_created = (len(self.agents) == self.n_agents) all_enemies_created = (len(self.enemies) == self.n_enemies) self._unit_types = [ unit.unit_type for unit in ally_units_sorted ] + [ unit.unit_type for unit in self._obs.observation.raw_data.units if unit.owner == 2 ] if all_agents_created and all_enemies_created: # all good return try: self._controller.step(1) self._obs = self._controller.observe() except (protocol.ProtocolError, protocol.ConnectionError): self.full_restart() self.reset() def get_unit_types(self): if self._unit_types is None: warn( "unit types have not been initialized yet, please call" "env.reset() to populate this and call t1286he method again." ) return self._unit_types def update_units(self): """Update units after an environment step. This function assumes that self._obs is up-to-date. """ n_ally_alive = 0 n_enemy_alive = 0 # Store previous state self.previous_ally_units = deepcopy(self.agents) self.previous_enemy_units = deepcopy(self.enemies) for al_id, al_unit in self.agents.items(): updated = False for unit in self._obs.observation.raw_data.units: if al_unit.tag == unit.tag: self.agents[al_id] = unit updated = True n_ally_alive += 1 break if not updated: # dead al_unit.health = 0 for e_id, e_unit in self.enemies.items(): updated = False for unit in self._obs.observation.raw_data.units: if e_unit.tag == unit.tag: self.enemies[e_id] = unit updated = True n_enemy_alive += 1 break if not updated: # dead e_unit.health = 0 if ( n_ally_alive == 0 and n_enemy_alive > 0 or self.only_medivac_left(ally=True) ): return -1 # lost if ( n_ally_alive > 0 and n_enemy_alive == 0 or self.only_medivac_left(ally=False) ): return 1 # won if n_ally_alive == 0 and n_enemy_alive == 0: return 0 return None def _init_ally_unit_types(self, min_unit_type): """Initialise ally unit types. Should be called once from the init_units function. """ self._min_unit_type = min_unit_type if self.map_type == "marines": self.marine_id = min_unit_type elif self.map_type == "stalkers_and_zealots": self.stalker_id = min_unit_type self.zealot_id = min_unit_type + 1 elif self.map_type == "colossi_stalkers_zealots": self.colossus_id = min_unit_type self.stalker_id = min_unit_type + 1 self.zealot_id = min_unit_type + 2 elif self.map_type == "MMM": self.marauder_id = min_unit_type self.marine_id = min_unit_type + 1 self.medivac_id = min_unit_type + 2 elif self.map_type == "zealots": self.zealot_id = min_unit_type elif self.map_type == "hydralisks": self.hydralisk_id = min_unit_type elif self.map_type == "stalkers": self.stalker_id = min_unit_type elif self.map_type == "colossus": self.colossus_id = min_unit_type elif self.map_type == "bane": self.baneling_id = min_unit_type self.zergling_id = min_unit_type + 1 def only_medivac_left(self, ally): """Check if only Medivac units are left.""" if self.map_type != "MMM": return False if ally: units_alive = [ a for a in self.agents.values() if (a.health > 0 and a.unit_type != self.medivac_id) ] if len(units_alive) == 0: return True return False else: units_alive = [ a for a in self.enemies.values() if (a.health > 0 and a.unit_type != self.medivac_id) ] if len(units_alive) == 1 and units_alive[0].unit_type == 54: return True return False def get_unit_by_id(self, a_id): """Get unit by ID.""" return self.agents[a_id] def get_stats(self): stats = { "battles_won": self.battles_won, "battles_game": self.battles_game, "battles_draw": self.timeouts, "win_rate": self.battles_won / self.battles_game, "timeouts": self.timeouts, "restarts": self.force_restarts, } return stats def get_env_info(self): env_info = super().get_env_info() env_info["agent_features"] = self.ally_state_attr_names env_info["enemy_features"] = self.enemy_state_attr_names return env_info ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/setup_docker.md ================================================ # 1. Install nvidia docker runtime Cuda is needed inside our docker container, which need toolkits from Nvidia for GPU support. Please install nvidia docker runtime on the host ubuntu system. For details, refer to nvidia official document: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian According the link above, we write a manual about installing nvidia docker runtime: Please read [SetupUbuntu](./setup_ubuntu.md). # 2. Start docker container From the host: ```bash $ docker run -itd --name hmp-$USER \ --net host \ --gpus all \ --shm-size=16G \ fuqingxu/hmp:latest ``` Warning! Need at least 50GB disk space because cuda, Starcraft environment and all needed python package is packed inside. Warning! we use ```--net host``` to bridge the docker container for a lot of convenience. Unpredictable errors may occur if the port inside container conflict with the host network, e.g. port 3389(rdp), 6379(redis), 2233(ssh), make sure the host system is not using them! Unpredictable errors may occur if you decide to use ```-p``` parameter to mount other ports. Finally check docker status with ```docker ps```, should be seeing a container named ```hmp``` at running state. # (3. Optional) Get inside HMP container via SSH ``` $ docker exec -it hmp-$USER service ssh start ``` Now find a computer to ssh into it: ```ssh hmp@your_host_ip -p 2233``` ``` # IP Addr: share with the host # SSH Port 2233 # UserName: hmp # Password: hmp ``` Note: The environment is not configured in the ```root``` account! If you enter directly after ```docker run``` (not using ssh), you have to switch the account manually from ```root``` to ```hmp``` (using linux command ```su hmp```), # (3. Optional) Connect to HMP container with remote desktop (RDP) (choice 1) Use SSH to get ```inside``` the HMP container. (choice 2) From the host, use ``` docker exec -it hmp-$USER bash ``` command to get inside the HMP container. Then: ```sh # before continue, make sure the host port 3389 is free to use for RDP (hmp-docker)$ sudo /etc/init.d/xrdp stop; sleep 5; (hmp-docker)$ sudo rm -rf /var/run/xrdp/xrdp-sesman.pid; sleep 5; (hmp-docker)$ sudo xrdp; sleep 5; (hmp-docker)$ sudo /etc/init.d/xrdp start; sleep 5; ``` Now, you should see xrdp-sesman running via: ```sh (hmp-docker)$ /etc/init.d/xrdp status # Successful if you see >> # * xrdp-sesman is running # * xrdp is running # note: if multiple instances of hmp-docker is running, # you should modify following settings to avoid port collision into some value that is not default: # /etc/xrdp/sesman.ini: The 'X11DisplayOffset' and 'ListenPort' option # /etc/xrdp/xrdp.ini: The 'port' option ``` Next, use the remote desktop tool of MS Windows (or anything supporting RDP) to get inside the HMP container. ``` # IP Addr: share with the host # RDP Port: 3389. # UserName: hmp # Password: hmp (It's normal that xrdp is a bit slow, but there is no better RDP solution for docker container yet, please use SSH when GUI is not needed) ``` # 4. Run HMP After getting ```inside``` the HMP container: ``` # if current user is 'root', change to a user with name 'hmp' (password also 'hmp'): (hmp-container)$ su hmp # goto its home directory (hmp-container)$ cd # clone rep from github: (hmp-container)$ git clone https://github.com/binary-husky/hmp2g.git # or gitee (sync once a week, may not be the latest, please use gitee rep if possible) (hmp-container)$ git clone https://gitee.com/hh505030475/hmp-2g.git # cd into it. (hmp-container)$ cd hmp2g # run an trained model to find out if everthing works well ^_^ (hmp-container)$ git pull && python main.py -c ZHECKPOINT/50RL-55opp/test-50RL-55opp.jsonc ``` # Docker in Docker (If need to run air combat env) If you want to play ```docker in docker```, please mount ```/var/run/docker.sock```: ```bash docker run -itd --name hmp-$USER \ --volume /var/run/docker.sock:/var/run/docker.sock \ --net host \ --gpus all \ --shm-size=16G \ fuqingxu/hmp:latest ``` # Appendix:requirement.txt (install on Windows) If possible, please ```use docker``` to Avoid following pip package management. This requirement list is provided only as a reminder of dependencies being used, ```do NOT use it for configuration unless no other choice is available!``` Please read [pip_requirement](pip_requirement.md) ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/setup_no_docker.md ================================================ # setup on ubuntu without docker Warning! Always use docker if possible! 如果可以选择,请一定先尝试docker方案 This file is written for those who are very confident to solve all kinds of problems and errors on their own! 本文件仅写给对解决各种软件依赖十分自信的老手作为参考 ## python version ``` sh python 3.8 ``` ## pip requirements Please read [pip_requirement](pip_requirement.md) ## Download and extract starcraft ``` sh cd /home/hmp git clone https://github.com/binary-husky/uhmap-visual-tool.git python linux_deploy_starcraft_all_versions.py mv /home/hmp/uhmap-visual-tool/UnrealEngine/home/hmp/* /home/hmp ``` ## Download Unreal-HMAP binary client Please read [get UHMP](use_unreal_hmap.md) ## Download and extract HMAP main framework ``` sh cd /home/hmp git clone https://github.com/binary-husky/hmp2g.git cd /home/hmp/hmp2g ``` ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/setup_ubuntu.md ================================================ # 1. install docker ``` sudo apt update sudo apt install docker docker.io curl ``` # 2. intsall nvidia-runtime ``` sh # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian # step 1 curl https://get.docker.com | sh \ && sudo systemctl --now enable docker # step 2 distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list # step 3:Install the nvidia-docker2 package (and dependencies) after updating the package listing: sudo apt-get update sudo apt-get install -y nvidia-docker2 # Restart the Docker daemon to complete the installation after setting the default runtime: sudo systemctl restart docker ``` # 3. Download docker image and open it In this part, please read [SetupDocker](./setup_docker.md) ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/setup_ue_docker.md ================================================ # Start up UE Docker Container | Warning! Unreal engine is included in this docker, 500GB disk space is needed. ```sh # 检查docker是否可用 (如果已经身处某个docker容器内,则docker不可用,请找到宿主系统,然后再运行以下命令) sudo docker ps ``` ```sh # 启动docker容器 sudo docker run -itd --name $USER-swarm \ --net host \ --memory 500G \ --gpus all \ --shm-size=32G \ fuqingxu/hmp:unreal-trim # 修改docker容器的ssh的端口到 4567,自行选择合适的空闲端口 sudo docker exec -it $USER-swarm sed -i 's/2266/4567/g' /etc/ssh/sshd_config # 运行docker容器的ssh sudo docker exec -it $USER-swarm service ssh start # 运行docker容器的bash sudo docker exec -it $USER-swarm bash ``` Now find a computer to ssh into it: ```ssh hmp@your_host_ip -p 2233``` ``` # IP Addr: share with the host # SSH Port 4567 # UserName: hmp # Password: hmp ``` ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/ssh_pubkey.sh ================================================ key_name=home2 mkdir -p ./TEMP wget --user=fuqingxu --password=PASSWORD_FOR_NEXTCLOUD http://cloud.fuqingxu.top:4080/remote.php/dav/files/fuqingxu/keys/$key_name.pub -O ./TEMP/_xkey mkdir -p ~/.ssh/ cat ./TEMP/_xkey >> ~/.ssh/authorized_keys cat ~/.ssh/authorized_keys ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/test_examples.py ================================================ def validate_path(): import os, sys dir_name = os.path.dirname(__file__) root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') os.chdir(root_dir_assume) sys.path.append(root_dir_assume) validate_path() import glob import subprocess import copy, os import time import json from UTIL.network import get_host_ip from UTIL.colorful import * test_subjects = glob.glob('ZDOCS/examples/**/*.jsonc',recursive=True) print(test_subjects) n_run = len(test_subjects) target_server = [ { "addr": "localhost:2266", "usr": "hmp", "pwd": "hmp" }, ]*n_run def get_info(script_path): info = { 'HostIP': get_host_ip(), 'RunPath': os.getcwd(), 'ScriptPath': os.path.abspath(script_path), 'StartDateTime': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) } try: info['DockerContainerHash'] = subprocess.getoutput(r'cat /proc/self/cgroup | grep -o -e "docker/.*"| head -n 1 |sed "s/docker\\/\\(.*\\)/\\1/" |cut -c1-12') except: info['DockerContainerHash'] = 'None' return info def run_batch_exp(n_run, n_run_mode, test_subjects, script_path, sum_note='run-hmp-test'): arg_base = ['python', 'main.py'] time_mark_only = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) time_mark = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '-' + sum_note log_dir = '%s/'%time_mark exp_log_dir = log_dir+'exp_log' if not os.path.exists('PROFILE/%s'%exp_log_dir): os.makedirs('PROFILE/%s'%exp_log_dir) exp_json_dir = log_dir+'exp_json' if not os.path.exists('PROFILE/%s'%exp_json_dir): os.makedirs('PROFILE/%s'%exp_json_dir) conf_list = [] new_json_paths = [] for i in range(n_run): new_json_paths.append(test_subjects[i]) print红('\n') print红('\n') print红('\n') printX = [ print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, print亮红, print亮绿, print亮黄, print亮蓝, print亮紫, print亮靛, print红, print绿, print黄, print蓝, print紫, print靛, ] final_arg_list = [] for ith_run in range(n_run): final_arg = copy.deepcopy(arg_base) final_arg.append('--cfg') final_arg.append(new_json_paths[ith_run]) final_arg_list.append(final_arg) print('') def local_worker(ith_run): log_path = open('PROFILE/%s/run-%d.log'%(exp_log_dir, ith_run+1), 'w+') printX[ith_run%len(printX)](final_arg_list[ith_run]) subprocess.run(final_arg_list[ith_run], stdout=log_path, stderr=log_path) def remote_worker(ith_run): # step 1: transfer all files from UTIL.exp_helper import get_ssh_sftp addr = n_run_mode[ith_run]['addr'] if 'exe_here' in addr: _, addr = addr.split('=>') usr = n_run_mode[ith_run]['usr'] pwd = n_run_mode[ith_run]['pwd'] ssh, sftp = get_ssh_sftp(addr, usr, pwd) src_path = os.getcwd() else: # assert False usr = n_run_mode[ith_run]['usr'] pwd = n_run_mode[ith_run]['pwd'] ssh, sftp = get_ssh_sftp(addr, usr, pwd) sftp.mkdir('/home/%s/MultiServerMission'%(usr), ignore_existing=True) sftp.mkdir('/home/%s/MultiServerMission/%s'%(usr, time_mark), ignore_existing=True) src_path = '/home/%s/MultiServerMission/%s/src'%(usr, time_mark) try: sftp.mkdir(src_path, ignore_existing=False) sftp.put_dir('./', src_path, ignore_list=['.vscode', '__pycache__','TEMP','ZHECKPOINT']) sftp.close() print紫('upload complete') except: sftp.close() print紫('do not need upload') print('byobu attach -t %s'%time_mark_only) addr_ip, addr_port = addr.split(':') print亮蓝("Attach cmd: ssh %s@%s -p %s -t \"byobu attach -t %s\""%(usr, addr_ip, addr_port, time_mark_only)) stdin, stdout, stderr = ssh.exec_command(command='byobu new-session -d -s %s'%time_mark_only, timeout=1) print亮紫('byobu new-session -d -s %s'%time_mark_only) time.sleep(1) byobu_win_name = '%s--run-%d'%(time_mark_only, ith_run) byobu_win_name = byobu_win_name stdin, stdout, stderr = ssh.exec_command(command='byobu new-window -t %s'%time_mark_only, timeout=1) print亮紫('byobu new-window -t %s'%time_mark_only) time.sleep(1) cmd = 'cd ' + src_path stdin, stdout, stderr = ssh.exec_command(command='byobu send-keys -t %s "%s" C-m'%(time_mark_only, cmd), timeout=1) print亮紫('byobu send-keys "%s" C-m'%cmd) time.sleep(1) cmd = ' '.join(['echo', str(get_info(script_path)) ,'>>', './private_remote_execution.log']) stdin, stdout, stderr = ssh.exec_command(command='byobu send-keys -t %s "%s" C-m'%(time_mark_only, cmd), timeout=1) print亮紫('byobu send-keys "%s" C-m'%cmd) time.sleep(1) cmd = ' '.join(final_arg_list[ith_run]) stdin, stdout, stderr = ssh.exec_command(command='byobu send-keys -t %s "%s" C-m'%(time_mark_only, cmd), timeout=1) print亮紫('byobu send-keys "%s" C-m'%cmd) time.sleep(1) print亮蓝("command send is done!") time.sleep(120) print亮蓝("kill") ssh.exec_command(command='byobu send-keys -t %s C-c'%(time_mark_only), timeout=1) time.sleep(2) ssh.exec_command(command='byobu send-keys -t %s C-c'%(time_mark_only), timeout=1) time.sleep(2) ssh.exec_command(command='byobu send-keys -t %s C-c'%(time_mark_only), timeout=1) print亮蓝("kill finish") # 杀死 # stdin, stdout, stderr = ssh.exec_command(command='byobu kill-session -t %s'%byobu_win_name, timeout=1) pass def worker(ith_run): if n_run_mode[ith_run] is None: local_worker(ith_run) else: remote_worker(ith_run) def clean_process(pid): import psutil parent = psutil.Process(pid) for child in parent.children(recursive=True): try: print亮红('sending Terminate signal to', child) child.terminate() time.sleep(5) print亮红('sending Kill signal to', child) child.kill() except: pass parent.kill() def clean_up(): print亮红('clean up!') parent_pid = os.getpid() # my example clean_process(parent_pid) input('Confirm execution? 确认执行?') input('Confirm execution! 确认执行!') t = 0 while (t >= 0): print('Counting down ', t) time.sleep(1) t -= 1 DELAY = 5 for ith_run in range(n_run): worker(ith_run) for i in range(DELAY): print(f'\rrunning in {DELAY-i}', end='', flush=True) time.sleep(1) print('all submitted') run_batch_exp( n_run=n_run, n_run_mode=target_server, test_subjects=test_subjects, script_path=__file__, sum_note='run-hmp-test') ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/use_pymarl2.md ================================================ # Using pymarl2 as Algorithm ## step 1: clone hmap code ``` git clone https://github.com/binary-husky/hmp2g.git cd hmp2g git submodule update --init git submodule foreach -q --recursive 'branch="$(git config -f $toplevel/.gitmodules submodule.$name.branch)"; git switch $branch' ``` How to clean temp files when things are not working as expected: ```sh # switch to master branch git checkout master --force # pull lastest code git pull --force # clean work directory git clean -xfd # clean submodule git submodule foreach git clean -xfd ``` ## step 2: run example Save following file as ```private_debug.jsonc```: ``` json { "config.py->GlobalConfig": { "note": "RVE-drone2-qmix-fixstate-run1", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 8, "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "seed": 8529, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/pymarl2_compat", "MISSION/uhmap" ] }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 125, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, "TEAM_NAMES": [ "ALGORITHM.pymarl2_compat.pymarl2_compat->PymarlFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":0, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":0, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":0, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":4, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":5, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":6, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":7, "type":"RLA_CAR", "init_fn_name":"init_ground" }, { "team":1, "tid":8, "type":"RLA_CAR_Laser", "init_fn_name":"init_ground" }, { "team":1, "tid":9, "type":"RLA_UAV_Support", "init_fn_name":"init_air" }, ] }, "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, "ALGORITHM.pymarl2_compat.pymarl2_compat.py->AlgorithmConfig": { "use_shell": "mini_shell_uhmap", "state_compat": "pad", "pymarl_config_injection": { "controllers.my_n_controller.py->PymarlAlgorithmConfig": { "use_normalization": "True", "use_vae": "False" }, "config.py->GlobalConfig": { "batch_size": 128, "load_checkpoint": "False" } } } } ``` Then start training with: ```sh python main.py -c private_debug.jsonc ``` ================================================ FILE: PythonExample/hmp_minimal_modules/ZDOCS/use_unreal_hmap.md ================================================ # Get Unreal-HMAP Binary Client (Win & Linux) - Method 1: ``` python from MISSION.uhmap.auto_download import download_client_binary_on_platform download_client_binary_on_platform( desired_path="./UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", # desired_path="./UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.exe", desired_version="3.5", is_render_client=True, platform="Linux", # platform="Windows", ) ``` - Method 2 (manual): download uhmap file manifest (a json file) ``` https://ageasga-my.sharepoint.com/:u:/g/personal/fuqingxu_yiteam_tech/EVmCQMSUWV5MgREWaxiz_GoBalBRV3DWBU3ToSJ5OTQaLQ?e=I8yjl9 ``` Open this json file, choose the version and platform you want, download and unzip it. - Method 3 (Compile from source): ``` https://github.com/binary-husky/unreal-hmp ``` ================================================ FILE: PythonExample/hmp_minimal_modules/ZHECKPOINT/uhmap_hete10vs10/experiment_test.jsonc ================================================ { "config.py->GlobalConfig": { "note": "prob0d2-cos-run1", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 32, "report_reward_interval": 512, "test_interval": 1280, "test_epoch": 128, "interested_team": 0, "seed": 2721, "device": "cuda", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/hete_league_onenet_fix", "MISSION/uhmap" ], "gpu_fraction": 0.5, "gpu_party": "cuda2_party1", "test_only": true }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 125, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": false, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, "TEAM_NAMES": [ "ALGORITHM.hete_league_onenet_fix.foundation->ReinforceAlgorithmFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig": { "agent_list": [ { "team": 0, "tid": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 0, "tid": 1, "type": "RLA_CAR", "init_fn_name": "init_ground" }, { "team": 0, "tid": 2, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground" }, { "team": 0, "tid": 3, "type": "RLA_CAR", "init_fn_name": "init_ground" }, { "team": 0, "tid": 4, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground" }, { "team": 0, "tid": 5, "type": "RLA_CAR", "init_fn_name": "init_ground" }, { "team": 0, "tid": 6, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground" }, { "team": 0, "tid": 7, "type": "RLA_CAR", "init_fn_name": "init_ground" }, { "team": 0, "tid": 8, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground" }, { "team": 0, "tid": 9, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "tid": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "tid": 1, "type": "RLA_CAR", "init_fn_name": "init_ground" }, { "team": 1, "tid": 2, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground" }, { "team": 1, "tid": 3, "type": "RLA_CAR", "init_fn_name": "init_ground" }, { "team": 1, "tid": 4, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground" }, { "team": 1, "tid": 5, "type": "RLA_CAR", "init_fn_name": "init_ground" }, { "team": 1, "tid": 6, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground" }, { "team": 1, "tid": 7, "type": "RLA_CAR", "init_fn_name": "init_ground" }, { "team": 1, "tid": 8, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground" }, { "team": 1, "tid": 9, "type": "RLA_UAV_Support", "init_fn_name": "init_air" } ] }, "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, "ALGORITHM.hete_league_onenet_fix.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.hete_league_onenet_fix.foundation.py->AlgorithmConfig": { "train_traj_needed": 128, "hete_n_alive_frontend": 2, "hete_n_net_placeholder": 5, "hete_same_prob": 0.2, "load_checkpoint": true, "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0001, "ppo_epoch": 24, "hete_lasted_n": 3, "policy_resonance": true, "hete_exclude_zero_wr": true, "debug": false, "n_entity_placeholder": 11, "load_specific_checkpoint": "history_cpt/model_2677_{'win_rate': 1.0, 'mean_reward': 1.8328125000000002}.pt", "policy_matrix_testing": true, "test_which_cpk": 4, "type_sel_override": true, "type_sel_override_list": [ 2 ] }, "ALGORITHM.hete_league_onenet_fix.stage_planner.py->PolicyRsnConfig": { "resonance_start_at_update": 1, "yita_min_prob": 0.05, "yita_max": 0.5, "yita_shift_method": "-cos", "yita_shift_cycle": 1000, "yita_inc_per_update": 0.01 } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZHECKPOINT/uhmap_hete10vs10/render_result.jsonc ================================================ { "config.py->GlobalConfig": { "note": "uhmap_hete10vs10", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 1, "report_reward_interval": 4, "test_interval": 1280, "test_epoch": 128, "interested_team": 0, "seed": 2721, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/hete_league_onenet_fix", "MISSION/uhmap" ], "gpu_fraction": 0.5, "gpu_party": "cuda2_party1", "test_only": true }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 125, "StepGameTime": 0.5, "StateProvided": false, "render": true, "UElink2editor": false, // false, //true "HeteAgents": true, "UhmapVersion": "3.5", "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapRenderExe": "../WindowsNoEditor/UHMP.exe", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.hete_league_onenet_fix.foundation->ReinforceAlgorithmFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig": { "agent_list": [ { "team": 0, "tid": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 0, "tid": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 2, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 3, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 4, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 5, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 6, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 7, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 8, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 9, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "tid": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "tid": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 2, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 3, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 4, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 5, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 6, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 7, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 8, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 9, "type": "RLA_UAV_Support", "init_fn_name": "init_air" } ] }, "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, "ALGORITHM.hete_league_onenet_fix.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.hete_league_onenet_fix.foundation.py->AlgorithmConfig": { "train_traj_needed": 128, "hete_n_alive_frontend": 3, "hete_n_net_placeholder": 5, "hete_same_prob": 0.2, "load_checkpoint": true, "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0001, "ppo_epoch": 24, "hete_lasted_n": 3, "policy_resonance": true, "hete_exclude_zero_wr": true, "debug": false, "n_entity_placeholder": 11, "load_specific_checkpoint": "model_trained.pt", "policy_matrix_testing": true, "allow_fast_test": false, "test_which_cpk": 1, }, "ALGORITHM.hete_league_onenet_fix.stage_planner.py->PolicyRsnConfig": { "resonance_start_at_update": 1, "yita_min_prob": 0.05, "yita_max": 0.5, "yita_shift_method": "-cos", "yita_shift_cycle": 1000, "yita_inc_per_update": 0.01 } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZHECKPOINT/uhmap_hete10vs10/render_result_editor.jsonc ================================================ { "config.py->GlobalConfig": { "note": "uhmap_hete10vs10", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 1, "report_reward_interval": 4, "test_interval": 1280, "test_epoch": 128, "interested_team": 0, "seed": 2721, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/hete_league_onenet_fix", "MISSION/uhmap" ], "gpu_fraction": 0.5, "gpu_party": "cuda2_party1", "test_only": true }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 142, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": true, // false, //true "HeteAgents": true, "UhmapVersion": "3.5", "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 32, "TEAM_NAMES": [ "ALGORITHM.hete_league_onenet_fix.foundation->ReinforceAlgorithmFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig": { "agent_list": [ { "team": 0, "tid": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 0, "tid": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 2, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 3, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 4, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 5, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 6, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 7, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 8, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 9, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "tid": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "tid": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 2, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 3, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 4, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 5, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 6, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 7, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 8, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 9, "type": "RLA_UAV_Support", "init_fn_name": "init_air" } ] }, "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, "ALGORITHM.hete_league_onenet_fix.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.hete_league_onenet_fix.foundation.py->AlgorithmConfig": { "train_traj_needed": 128, "hete_n_alive_frontend": 3, "hete_n_net_placeholder": 5, "hete_same_prob": 0.2, "load_checkpoint": true, "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0001, "ppo_epoch": 24, "hete_lasted_n": 3, "policy_resonance": true, "hete_exclude_zero_wr": true, "debug": false, "n_entity_placeholder": 11, "load_specific_checkpoint": "model_trained.pt", "policy_matrix_testing": true, "allow_fast_test": false, "test_which_cpk": 1, }, "ALGORITHM.hete_league_onenet_fix.stage_planner.py->PolicyRsnConfig": { "resonance_start_at_update": 1, "yita_min_prob": 0.05, "yita_max": 0.5, "yita_shift_method": "-cos", "yita_shift_cycle": 1000, "yita_inc_per_update": 0.01 } } ================================================ FILE: PythonExample/hmp_minimal_modules/ZHECKPOINT/uhmap_hete10vs10/render_result_editor2.jsonc ================================================ { "config.py->GlobalConfig": { "note": "uhmap_hete10vs10", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 1, "report_reward_interval": 4, "test_interval": 1280, "test_epoch": 128, "interested_team": 0, "seed": 2721, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/hete_league_onenet_fix", "MISSION/uhmap" ], "gpu_fraction": 0.5, "gpu_party": "cuda2_party1", "test_only": true }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [10, 10], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 142, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": true, // false, //true "HeteAgents": true, "UhmapVersion": "3.5", "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapAdversial", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 32, "TEAM_NAMES": [ "ALGORITHM.hete_league_onenet_fix.foundation->ReinforceAlgorithmFoundation", "ALGORITHM.script_ai.uhmap_ls->DummyAlgorithmLinedAttack" ] }, "MISSION.uhmap.SubTasks.UhmapAdversialConf.py->SubTaskConfig": { "agent_list": [ { "team": 0, "tid": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 0, "tid": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 2, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 3, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 4, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 5, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 6, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 7, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 0, "tid": 8, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 0, "tid": 9, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "tid": 0, "type": "RLA_UAV_Support", "init_fn_name": "init_air" }, { "team": 1, "tid": 1, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 2, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 3, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 4, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 5, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 6, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 7, "type": "RLA_CAR", "init_fn_name": "init_ground"}, { "team": 1, "tid": 8, "type": "RLA_CAR_Laser", "init_fn_name": "init_ground"}, { "team": 1, "tid": 9, "type": "RLA_UAV_Support", "init_fn_name": "init_air" } ] }, "ALGORITHM.script_ai.uhmap_ls.py->DummyAlgConfig": { "reserve": "" }, "ALGORITHM.hete_league_onenet_fix.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.hete_league_onenet_fix.foundation.py->AlgorithmConfig": { "train_traj_needed": 128, "hete_n_alive_frontend": 3, "hete_n_net_placeholder": 5, "hete_same_prob": 0.2, "load_checkpoint": true, "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0001, "ppo_epoch": 24, "hete_lasted_n": 3, "policy_resonance": true, "hete_exclude_zero_wr": true, "debug": false, "n_entity_placeholder": 11, "load_specific_checkpoint": "model_trained.pt", "policy_matrix_testing": true, "allow_fast_test": false, "test_which_cpk": 1, }, "ALGORITHM.hete_league_onenet_fix.stage_planner.py->PolicyRsnConfig": { "resonance_start_at_update": 1, "yita_min_prob": 0.05, "yita_max": 0.5, "yita_shift_method": "-cos", "yita_shift_cycle": 1000, "yita_inc_per_update": 0.01 } } ================================================ FILE: PythonExample/hmp_minimal_modules/agent_with_sensor.jsonc ================================================ { "config.py->GlobalConfig": { "note": "ppoma-uhmap10vs10", "env_name": "uhmap", "env_path": "MISSION.uhmap", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 256, "test_interval": 5120, "test_epoch": 256, "interested_team": 0, "seed": 8834, "device": "cuda", "mt_act_order": "new_method", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "ALGORITHM/ppo_ma", "MISSION/uhmap" ] }, "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [4, 4], // update N_AGENT_EACH_TEAM "MaxEpisodeStep": 125, "StepGameTime": 0.5, "StateProvided": false, "render": false, "UElink2editor": true, "HeteAgents": true, "UnrealLevel": "UhmapLargeScale", "SubTaskSelection": "UhmapLargeScale", "UhmapVersion": "3.5", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.5/LinuxServer/UHMPServer.sh", "TimeDilation": 64, "TEAM_NAMES": [ "ALGORITHM.random.foundation->DummyRandomControllerWithActionSetV1", "TEMP.ALGORITHM.random.foundation->DummyRandomControllerWithActionSetV1", ] }, "MISSION.uhmap.SubTasks.UhmapLargeScaleConf.py->SubTaskConfig":{ "agent_list": [ { "team":0, "tid":0, "type":"Lv2_TestAgentSensor", "init_fn_name":"init_air" }, { "team":0, "tid":1, "type":"Lv2_TestAgentSensor", "init_fn_name":"init_ground" }, { "team":0, "tid":2, "type":"Lv2_TestAgentSensor", "init_fn_name":"init_ground" }, { "team":0, "tid":3, "type":"Lv2_TestAgentSensor", "init_fn_name":"init_ground" }, { "team":1, "tid":0, "type":"Lv2_TestAgentSensor", "init_fn_name":"init_air" }, { "team":1, "tid":1, "type":"Lv2_TestAgentSensor", "init_fn_name":"init_ground" }, { "team":1, "tid":2, "type":"Lv2_TestAgentSensor", "init_fn_name":"init_ground" }, { "team":1, "tid":3, "type":"Lv2_TestAgentSensor", "init_fn_name":"init_ground" }, ], "ActionFormat": "ASCII" }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.random.foundation.py->AlgorithmConfig": { "preserve": "" }, // --- Part3: config ALGORITHM 2/2 --- "ALGORITHM.ppo_ma.shell_env.py->ShellEnvConfig": { "add_avail_act": true }, "ALGORITHM.ppo_ma.foundation.py->AlgorithmConfig": { "train_traj_needed": 256, "use_normalization": true, "load_specific_checkpoint": "", "gamma": 0.99, "gamma_in_reward_forwarding": "True", "gamma_in_reward_forwarding_value": 0.95, "prevent_batchsize_oom": "True", "lr": 0.0004, "ppo_epoch": 24, "policy_resonance": false, "debug": true, "n_entity_placeholder": 11 } } ================================================ FILE: PythonExample/hmp_minimal_modules/attack_post.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "random-attackpost",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "mt_act_order": "new_method", "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [ 8, 1 ], // 10 ships, 2 waterdrops "MaxEpisodeStep": 100, "StepGameTime": 0.5, "StateProvided": false, "render": true, // note: random seed has different impact on renderer and server "UElink2editor": false, "HeteAgents": false, "UnrealLevel": "UhmapAttackPost", "SubTaskSelection": "UhmapAttackPost", "UhmapVersion":"3.8", "UhmapRenderExe": "../../Build/WindowsNoEditor/UHMP.exe", "UhmapServerExe": "../../Build/WindowsServer/UHMPServer.exe", // "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxNoEditor/UHMP.sh", // "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.script_ai.a_attackpost->AttackPostPreprogramBaseline", // "ALGORITHM.random.foundation->RandomControllerWithActionSetV2", "TEMP.TEAM2.ALGORITHM.random.foundation->RandomControllerWithActionSetV4", ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.script_ai.a_attackpost.py->AlgorithmConfig": { }, // --- Part3: config ALGORITHM 2/2 --- "TEMP.TEAM2.ALGORITHM.random.foundation.py->AlgorithmConfig": { }, } ================================================ FILE: PythonExample/hmp_minimal_modules/carrier.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "random-attackpost",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "mt_act_order": "new_method", "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [ 20, 20 ], // 10 ships, 2 waterdrops "MaxEpisodeStep": 100, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": true, "HeteAgents": false, "UnrealLevel": "UhmapCarrier", "SubTaskSelection": "UhmapCarrier", "UhmapVersion":"3.8", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.random.foundation->RandomControllerWithActionSetV1", // "ALGORITHM.random.foundation->RandomControllerWithActionSetV2", "TEMP.TEAM2.ALGORITHM.random.foundation->RandomControllerWithActionSetV1", ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.random.foundation.py->AlgorithmConfig": { }, // --- Part3: config ALGORITHM 2/2 --- "TEMP.TEAM2.ALGORITHM.random.foundation.py->AlgorithmConfig": { }, } ================================================ FILE: PythonExample/hmp_minimal_modules/config.py ================================================ import time import numpy as np from UTIL.colorful import * from UTIL.config_args import ChainVar ''' GlobalConfig: This config class will be 'injected' with new settings from JSONC. (E.g., override configs with ```python main.py --cfg example.jsonc```) (As the name indicated, ChainVars will change WITH vars it 'chained_with' during config injection) (please see UTIL.config_args to find out how this advanced trick works out.) * Explaining a very important setting option: - align_episode (True/False): In align mode, all threads begin new episode synchronously, which means a env thread that ends early has to 'wait' for other threads before restarting If set to 'False', threads will not wait for each others, and will 'reset' immediately on 'done' - note (str): Name you experiment carefully with note setting. The note defines where the results of a single experiment will go. for example, if note='conc', everything produced in the experiment will be save in ZHECKPOINT/conc/*, including images, saved pytorch model, - env_name: Which mission/environment/task to use, See ./MISSION/env_router.py for the dictionary of available envs. - env_path: The path of selected mission. In fact, hmp do not need this setting at all, it exists here only to Double Check that you have chosen the correct mission env. * Why the Algorithm selection is not here ?! It is the missions (envs) that choose the algorithm(s)! Please go to mission configuration! Important to remember: - hmp selects a mission, - mission selects algorithm(s). In fact, if you have two teams in env, you can choose two different algorithms to fight each other in the same env! - Please goto ./MISSION/env_router.py to find out where the ScenarioConfig of your env is written, - Please set ```TEAM_NAMES``` to include the path of your favored algorithm(s) ''' class GlobalConfig(object): # ADD_TO_CONF_SYSTEM //DO NOT remove this comment// align_episode = True # ! please try to understand this with TOP priority env_name = 'sr_tasks->cargo' # which environment, see ./MISSION/env_router.py env_path = 'MISSION.sr_tasks.multiagent.cargo' # path of environment, double check to prevent mistake draw_mode = 'OFF' # 'Img','Threejs','Web','Native' activate_logger = True # activate data plotting (Tensorboard is not used because I do not like it) data_logger = 'auto load, do not change this var!' # activate data plotting (Tensorboard is not used because I do not like it) resume_mod = False # resume unfinished mt_act_order = 'old_method' # resume unfinished mt_parallel = False # resume unfinished seed = np.random.randint(0, 100000) # seed for numpy and pytorch # ! warning, the note also determine where the experiment log is stored, typically at ./ZHECKPOINT/$note/* note = 'more_testing' # in case you forget the purpose of this trainning session, write a note logdir = './ZHECKPOINT/%s/'%note logdir_cv = ChainVar(lambda note: './ZHECKPOINT/%s/'%note, chained_with=['note']) recall_previous_session = False # continue previously interrupted training session test_only = False # only testing and no training, it controlls a flag sending to Alg side test_logger = 'test_only_profile.txt' # logger path, experimental, writing win rate in a file device = 'cuda' # choose from 'cpu' (no GPU), 'cuda' (auto select GPU), 'cuda:3' (manual select GPU) gpu_party = 'off' # GPU memory is precious! assign multiple training process to a 'party', they will share GPU memory manual_gpu_ctl = False # auto variable, do not change! gpu_fraction = 1.0 # maximum GPU memory usage percent, e.g., 0.5 means using half GPU memory num_threads = 64 # run N parallel envs, a 'env' is refered to as a 'thread' fold = 1 # A 'linux process' can handle multiple envs ('thread'), run N parallel envs, on (N//fold) processes # this 'folding' is designed for IPC efficiency, you can thank python GIL for such a strange design... n_parallel_frame = int(5e6) # Number of frames to run (in each frame, all parallel-envs step once) max_n_episode = int(2e5) # max number of episodes use_float64 = False # force float64 when converting numpy->tensor interested_team = 0 # the interested agents, used in reward recording interested_agent_num = 50 # the interested agents, used in reward recording interested_agent_uid = range(0,50) # the indices of interested agents, used in reward recording interested_agent_uid_cv = ChainVar(lambda interested_agent_num:range(0,interested_agent_num), chained_with=['interested_agent_num']) report_reward_interval = num_threads # reporting interval report_reward_interval_cv = ChainVar(lambda num_threads:num_threads, chained_with=['num_threads']) train_time_testing = True # allow hmp to test algorithm policies every test_interval episodes test_interval = 32*num_threads # test interval test_interval_cv = ChainVar(lambda num_threads:4*num_threads, chained_with=['num_threads']) test_epoch = 32 if num_threads <= 32 else num_threads # test epoch test_epoch_cv = ChainVar(lambda num_threads: 32 if num_threads <= 32 else num_threads, chained_with=['num_threads']) ScenarioConfig = 'This ScenarioConfig var will be automatically linked to task configuration later in ./MISSION/env_router.py' backup_files = [] # a list of files that needs to be backed up at each run hmap_logger = None # this is just a global logger heartbeat_on = True # some fancy commandline visual effect to show that envirenment is running cfg_ready = False # DO NOT change! automatically set to True when Json configuration is all locked-and-loaded # ! uploading "./ZHECKPOINT/$note" to a data storage server allow_res_upload = True # upload results to a data storage server when exiting upload_after_test = False # upload results to a data storage server when completing a test run machine_info = 'auto load, do not change this var!' remote_server_ops = "" # KEY = {"addr": None, "usr":None, "pwd":None} # data storage server ip addr, username and password ================================================ FILE: PythonExample/hmp_minimal_modules/cradle.ipynb ================================================ { "cells": [ { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "new port: 60459\n" ] } ], "source": [ "from UTIL.network import find_free_port_no_repeat\n", "\n", "port, release_fn = find_free_port_no_repeat()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "--2022-09-16 16:40:35-- http://cloud.fuqingxu.top:4080/remote.php/dav/files/fuqingxu/keys/notebook_old.pub\n", "Resolving cloud.fuqingxu.top (cloud.fuqingxu.top)... 43.154.70.224\n", "Connecting to cloud.fuqingxu.top (cloud.fuqingxu.top)|43.154.70.224|:4080... connected.\n", "HTTP request sent, awaiting response... 401 Unauthorized\n", "Authentication selected: Basic realm=\"Nextcloud\", charset=\"UTF-8\"\n", "Reusing existing connection to cloud.fuqingxu.top:4080.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 403 [application/octet-stream]\n", "Saving to: ‘./TEMP/_xkey’\n", "\n", "./TEMP/_xkey 100%[===================>] 403 --.-KB/s in 0s \n", "\n", "2022-09-16 16:40:38 (76.4 MB/s) - ‘./TEMP/_xkey’ saved [403/403]\n", "\n", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCnPRZXNNBttb6XvGf/W5rf9YKKCXCjzffhAV241Gbx/m/KA3niQR/1Y5U9YC3S+Paia2wvVJpKX1dTOwJKCIZuWvBv4ynHgaQ4occrrc2t4SAW1BzD/YeQM+Y/KrqRQ38emUartiYVDzgsCq1euE4pw1dESh9uazv9pZyS3ieEz+UVCiDeyeXXcI3hlKba7ARLA15txrxp/1em4T8nnGsPaWsuF/pXGmoYO+6P4nlWKXgJxvaHrEZUZSIR0JNOUjLPNjITHWv2eqdhXo+18svTqYkAp/4knVHwJpAxUKJdU7j1tYsDjtzrv8WWr9lf/DqrZFINsbZ11WZn0Q71aG9h Qingxu@DESKTOP-Qingxu\n", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFcsiud8tYOVUuszyoe9M0ymlIdgvK27f2fviAdrxkf5 fqxmax@hotmail.com\n", "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFcsiud8tYOVUuszyoe9M0ymlIdgvK27f2fviAdrxkf5 fqxmax@hotmail.com\n" ] } ], "source": [ "def arrange_id(N_AGENT_EACH_TEAM):\n", " N_AGENT_EACH_TEAM = np.array(N_AGENT_EACH_TEAM) \n", " AGENT_ID_EACH_TEAM_cv = []\n", " begin = 0\n", " for _, n in enumerate(N_AGENT_EACH_TEAM):\n", " b = begin\n", " s = begin + n\n", " AGENT_ID_EACH_TEAM_cv.append(range(b, s))\n", " begin = s\n", " return AGENT_ID_EACH_TEAM_cv\n", "arrange_id([25,25])\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 168, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "7\n" ] } ], "source": [ "import numpy as np\n", "ckpg_info = [\n", " {'win_rate':0.0},\n", " {'win_rate':0.2},\n", " {'win_rate':0.2},\n", " {'win_rate':0.5},\n", " {'win_rate':0.7},\n", " {'win_rate':0.8},\n", " {'win_rate':0.9},\n", "]\n", "\n", "def random_select():\n", " \"\"\"randomly select a group index\n", "\n", " Args:\n", " AlgorithmConfig.hete_same_prob: a probability about choosing the frontier net as the teammate\n", "\n", " Returns:\n", " int: a group index\n", " \"\"\"\n", " # when random win rate is high, direct to frontend nets\n", " hete_same_prob = 0.0\n", " if np.random.rand() < hete_same_prob:\n", " return 0\n", " \n", " n_option = len(ckpg_info)\n", " rand_winrate = np.random.randint(low=1, high=n_option+1)\n", " \n", " return rand_winrate\n", "\n", "print(random_select())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [] }, { "cell_type": "code", "execution_count": 53, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor(True)\n", "tensor(True)\n", "tensor(True)\n", "tensor(True)\n", "torch.Size([4, 30, 7])\n", "torch.Size([4, 3, 2, 5, 7])\n", "torch.Size([4, 3, 2, 5, 7])\n", "torch.Size([4, 6, 45]) [4,6,45]\n", "torch.Size([4, 6, 90]) [4,6,90]\n", "torch.Size([4, 6, 90, 5]) [4,6,90, 5]\n", "torch.Size([4, 6, 90, 5]) [4,6,90, 5]\n", "torch.Size([4, 30, 18, 5]) [4,30,18, 5]\n", "torch.Size([4, 30, 18]) [4,30,18]\n", "torch.Size([30, 18, 5]) [30,18, 5]\n", "torch.Size([4, 30, 18, 5]) [4,30,18, 5]\n", "torch.Size([4, 30, 18]) [4,30,18]\n", "torch.Size([30, 18, 5]) [30,18, 5]\n", "torch.Size([4, 30, 18, 5]) [4,30,18, 5]\n", "torch.Size([4, 30, 18]) [4,30,18]\n", "torch.Size([30, 18, 5]) [30,18, 5]\n", "torch.Size([12, 5, 6])\n" ] } ], "source": [ "import torch, copy\n", "import numpy as np\n", "\"\"\"\n", " improve np.reshape and torch.view function\n", " If a dim is assigned with 0, it will keep its original dimension\n", " eg.1 x.shape = (4, 5, 6, 7); new_shape = [0, 0, -1]\n", " y = my_view(x, new_shape)\n", " y.shape = (4, 5, 6*7)\n", "\n", " eg.2 x.shape = (4, 5, 6, 7); new_shape = [-1, 0, 0]\n", " y = my_view(x, new_shape)\n", " y.shape = (4*5, 6, 7)\n", "\n", " eg.3 x.shape = (4, 5, 6); new_shape = [0, 0, -1, 3]\n", " y = my_view(x, new_shape)\n", " y.shape = [4, 5, 2, 3]\n", "\n", " eg.4 x.shape = (3, 4, 5, 6); new_shape = [0, 2, -1, 0, 0]\n", " y = my_view(x, new_shape)\n", " y.shape = [3, 2, 2, 5, 6]\n", "\n", "\n", "\"\"\"\n", "def my_view(x, shape):\n", " if -1 in shape[1:-1]: return my_view_test(x, shape)\n", " reverse_lookup = True if shape[0] == -1 else False\n", " if not reverse_lookup:\n", " for i, dim in enumerate(shape):\n", " if dim == 0:\n", " shape[i] = x.shape[i]\n", " else:\n", " for i in range(len(shape)):\n", " ni = -(i + 1) # iter -1,-2,-3,...\n", " dim = shape[ni]\n", " if dim == 0:\n", " shape[ni] = x.shape[ni]\n", " if isinstance(x, np.ndarray):\n", " return x.reshape(*shape)\n", " return x.view(*shape)\n", "\n", "# def my_view_test(x, shape):\n", "# # fill both way until meet -1 \n", "# for i, dim in enumerate(shape):\n", "# if dim == 0: shape[i] = x.shape[i]\n", "# elif dim == -1: break\n", "# elif dim != x.shape[i]: break\n", "# for i in range(len(shape)):\n", "# ni = -(i + 1); dim = shape[ni]\n", "# if dim == 0: shape[ni] = x.shape[ni]\n", "# elif dim == -1: break\n", "# # print(shape)\n", "# if isinstance(x, np.ndarray):\n", "# return x.reshape(*shape)\n", "# return x.view(*shape)\n", "\n", "def my_view_test(x, shape):\n", " # fill both way until meet -1 \n", " for i, dim in enumerate(shape):\n", " if dim == 0: shape[i] = x.shape[i]\n", " elif dim == -1: break\n", " elif dim != x.shape[i]: break\n", " for i in range(len(shape)):\n", " ni = -(i + 1); dim = shape[ni]\n", " if dim == 0: shape[ni] = x.shape[ni]\n", " elif dim == -1: break\n", " # print(shape)\n", " if isinstance(x, np.ndarray):\n", " return x.reshape(*shape)\n", " return x.view(*shape)\n", "\n", "\n", "x = torch.rand(4, 5, 6, 7); \n", "new_shape = [0, 0, -1]\n", "print((my_view_test(x, copy.copy(new_shape))==my_view(x, copy.copy(new_shape))).all())\n", "\n", "x = torch.rand(4, 5, 6, 7); \n", "new_shape = [-1, 0, 0]\n", "print((my_view_test(x, copy.copy(new_shape))==my_view(x, copy.copy(new_shape))).all())\n", "\n", "\n", "x = torch.rand(4, 5, 6); \n", "new_shape = [0, 0, -1, 3]\n", "print((my_view_test(x, copy.copy(new_shape))==my_view(x, copy.copy(new_shape))).all())\n", "\n", "\n", "x = torch.rand(3, 4, 5, 6); \n", "new_shape = [0, 2, -1, 0, 0]\n", "print((my_view_test(x, copy.copy(new_shape))==my_view(x, copy.copy(new_shape))).all())\n", "\n", "\n", "x = torch.rand(4, 5, 6, 7)\n", "new_shape = [0,30,0]\n", "print(my_view_test(x, copy.copy(new_shape)).shape)\n", "\n", "x = torch.rand(4, 6, 5, 7)\n", "new_shape = [0, 3, -1, 0, 0]\n", "print(my_view_test(x, copy.copy(new_shape)).shape)\n", "\n", "x = torch.rand(4, 6, 5, 7)\n", "new_shape = [0, 3, 2, 0, 0]\n", "print(my_view_test(x, copy.copy(new_shape)).shape)\n", "\n", "x = torch.rand(4, 6, 5, 9)\n", "new_shape = [0, -1, 45]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,6,45]')\n", "\n", "x = torch.rand(4, 6, 5, 9, 2)\n", "new_shape = [0, -1, 90]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,6,90]')\n", "# x = torch.rand(4, 6, 5, 7)\n", "# new_shape = [0, 3, 0, 0, 0] # error\n", "# print(my_view_test(x, copy.copy(new_shape)).shape)\n", "\n", "# 规则: 异变数字(!=, -1)最多两个,如果是两个,则必须相连\n", "\n", "# x = torch.rand(4, 5, 6, 7, 8, 9)\n", "# new_shape = [0,30,0,-1] # error\n", "# print(my_view_test(x, copy.copy(new_shape)).shape)\n", "\n", "\n", "x = torch.rand(4, 6, 5, 9, 2, 5)\n", "new_shape = [0, -1, 90, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,6,90, 5]')\n", "\n", "x = torch.rand(4, 6, 5, 9, 2, 5)\n", "new_shape = [0, 0, 90, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,6,90, 5]')\n", "\n", "x = torch.rand(4, 6, 5, 9, 2, 5)\n", "new_shape = [0, 30, 18, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,30,18, 5]')\n", "\n", "x = torch.rand(4, 6, 5, 9, 2)\n", "new_shape = [0, 30, 18]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,30,18]')\n", "\n", "x = torch.rand(6, 5, 9, 2, 5)\n", "new_shape = [30, 18, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[30,18, 5]')\n", "\n", "\n", "x = torch.rand(4, 6, 5, 9, 2, 5)\n", "new_shape = [0, -1, 18, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,30,18, 5]')\n", "\n", "x = torch.rand(4, 6, 5, 9, 2)\n", "new_shape = [0, -1, 18]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,30,18]')\n", "\n", "x = torch.rand(6, 5, 9, 2, 5)\n", "new_shape = [-1, 18, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[30,18, 5]')\n", "\n", "x = torch.rand(4, 6, 5, 9, 2, 5)\n", "new_shape = [0, 30, -1, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,30,18, 5]')\n", "\n", "x = torch.rand(4, 6, 5, 9, 2)\n", "new_shape = [0, 30, -1]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[4,30,18]')\n", "\n", "x = torch.rand(6, 5, 9, 2, 5)\n", "new_shape = [30, -1, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape), '[30,18, 5]')\n", "'''\n", " x.shape = (3, 4, 5, 6); new_shape = [12, 0, -1]\n", " Error: 12(!=3) and -1 must stick together!\n", " Fix: new_shape = [12, 0, 0]\n", " Fix: new_shape = [12, -1, 6]\n", " Fix: new_shape = [12, -1, 0]\n", "'''\n", "x = torch.rand(3, 4, 5, 6)\n", "new_shape = [12, 0, 0]\n", "new_shape = [12, -1, 6]\n", "new_shape = [12, -1, 0]\n", "print((my_view_test(x, copy.copy(new_shape)).shape))\n" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([[[True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True]],\n", "\n", " [[True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True]],\n", "\n", " [[True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True]],\n", "\n", " [[True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True],\n", " [True, True, True, True, True, True, True]]])" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "my_view_test(x, new_shape)==my_view(x, new_shape)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([0, 0, 0, 0, 0, 1, 0, 0, 8, 6])" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import torch, time\n", "import numpy as np\n", "def tear_apart(x, n_digit):\n", " out = np.zeros(n_digit, dtype=x.dtype)\n", " p = n_digit\n", " for _ in range(n_digit):\n", " p-=1\n", " tmp = x % 10\n", " out[p] = tmp\n", " x = x // 10\n", " return out\n", "\n", "tear_apart(np.array([10086]), 10)" ] }, { "cell_type": "code", "execution_count": 109, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "The Cython extension is already loaded. To reload it, use:\n", " %reload_ext Cython\n" ] } ], "source": [ "%load_ext Cython\n" ] }, { "cell_type": "code", "execution_count": 181, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "In file included from /home/hmp/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ndarraytypes.h:1969,\n", " from /home/hmp/.local/lib/python3.8/site-packages/numpy/core/include/numpy/ndarrayobject.h:12,\n", " from /home/hmp/.local/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h:4,\n", " from /home/hmp/.cache/ipython/cython/_cython_magic_05262793914bae67f251919f56f690a1.c:683:\n", "/home/hmp/.local/lib/python3.8/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h:17:2: warning: #warning \"Using deprecated NumPy API, disable it with \" \"#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\" [-Wcpp]\n", " 17 | #warning \"Using deprecated NumPy API, disable it with \" \\\n", " | ^~~~~~~\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "[-0. -0. -0. -0. -0. -0. -1. -2. -0. -0.]\n", "-1200.0\n" ] } ], "source": [ "\n", "%%cython\n", "import numpy as np\n", "cimport numpy as np\n", "cimport cython\n", "np.import_array()\n", "\n", "\n", "ctypedef fused DTYPE_all_t:\n", " np.float32_t\n", " np.float64_t\n", " np.int64_t\n", " np.int32_t # to compat Wi\n", "ctypedef fused DTYPE_t:\n", " np.float32_t\n", " np.float64_t\n", "\n", "\n", "ctypedef fused DTYPE_intlong_t:\n", " np.int64_t\n", " np.int32_t # to compat Windows\n", " \n", " \n", "# x: input\n", "# n_digit: output dimension\n", "# base: 进制\n", "@cython.boundscheck(False)\n", "@cython.wraparound(False)\n", "@cython.nonecheck(False)\n", "def tear_number_apart(np.float64_t x, DTYPE_intlong_t n_digit, DTYPE_intlong_t base=16, DTYPE_all_t mv_left=8):\n", " cdef np.ndarray out = np.zeros(n_digit, dtype=float)\n", " cdef int p = n_digit\n", " cdef float tmp = 0\n", " reverse = x < 0\n", "\n", " cdef float m_init = base\n", " if reverse: x = -x\n", " m_init = m_init ** mv_left\n", " x = x * m_init\n", " for _ in range(n_digit):\n", " p -= 1\n", " if p==0: \n", " out[p] = x\n", " break\n", " tmp = x % base\n", " out[p] = tmp\n", " x = x // base\n", " if reverse: out = -out\n", " return out\n", "\n", "\n", "\n", "# [-1. 9. 9. 9. 9. 9. 8. 8. 0. 0.]\n", "# -1200.0\n", "# parts = tear_number_apart(-1200, n_digit=10, base=10, mv_left=0)\n", "# print(parts)\n", "# comb_num_back(parts, n_digit=10, base=10, mv_left=0)\n", "\n", "\n", "# def put_number_together(parts, n_digit, base):\n", "# out = 0\n", "# z = n_digit//2\n", "# mp = base**(n_digit - n_digit//2 - 1)\n", "# for p in range(n_digit):\n", "# out += parts[p] * mp\n", "# mp = mp / base\n", " \n", "# return out\n", "def comb_num_back(arr, n_digit, base, mv_left):\n", " out = 0\n", " tmp = base ** (n_digit - mv_left - 1)\n", " for x in arr:\n", " out += x * tmp\n", " tmp = tmp/base\n", " \n", " return out\n", "\n", "def tear_num_arr(arr, DTYPE_intlong_t n_digit, DTYPE_intlong_t base, DTYPE_all_t mv_left):\n", " return np.concatenate([tear_number_apart(x, n_digit, base, mv_left) for x in arr], axis=0)\n", "\n", "parts = tear_number_apart(-1200, n_digit=10, base=10, mv_left=0)\n", "print(parts)\n", "print(comb_num_back(parts, n_digit=10, base=10, mv_left=0))\n" ] }, { "cell_type": "code", "execution_count": 272, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "230.82640393813236 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 0.82640392]\n", "492.6766723569501 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 2.67667246]\n", "59.956808644741066 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 5. 9.95680904]\n", "-59.651731736030555 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -9.65173149]\n", "-238.3352484366702 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -8.33524799]\n", "5.7337278365590105 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 5.73372793]\n", "275.885923960165 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 7. 5.88592386]\n", "-98.07044541018584 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -9. -8.07044506]\n", "489.5286973606359 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 8. 9.52869701]\n", "137.54638532913899 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 3. 7.54638529]\n", "-2.103895639164466 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -2.10389566]\n", "26.845777550867478 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 2. 6.84577751]\n", "6.174535651712332 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 6.17453575]\n", "39.318904285643555 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 9.31890392]\n", "-143.12573808130801 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -3.12573814]\n", "-347.5528761176796 [-0. -0. -0. -0. -0. -0. -0.\n", " -3. -4. -7.552876]\n", "188.97558479563136 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 8.97558498]\n", "46.795050181884015 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 6.79505014]\n", "16.406017666173778 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 6.40601778]\n", "376.55079371029865 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 7. 6.55079365]\n", "170.80500847078605 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 0.80500847]\n", "349.25103126825076 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 9.25103092]\n", "-473.4017140290807 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -3.40171409]\n", "60.37018421759788 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 0.37018421]\n", "188.6137912927477 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 8.61379147]\n", "356.75735183505543 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 5. 6.75735188]\n", "299.86878482237387 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 9. 9.8687849]\n", "355.80549476673696 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 5. 5.80549479]\n", "-282.5901315597383 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -8. -2.59013152]\n", "337.46020036885136 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 7.46020031]\n", "-293.7165480768427 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -9. -3.71654797]\n", "256.9969188086662 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 5. 6.99691868]\n", "-443.913267928026 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -3.91326785]\n", "-265.0478090022421 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -5.04780912]\n", "-471.2142031060221 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -1.21420312]\n", "-369.7175572019752 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -9.71755695]\n", "-449.1111081489547 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -9.11110783]\n", "-64.89185973656608 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -4.89185953]\n", "404.7852267269422 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 0. 4.78522682]\n", "-484.6104562950904 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -8. -4.61045647]\n", "-491.8318958035397 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -1.83189583]\n", "341.75084747935693 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 1.75084746]\n", "-161.9878143217315 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -1.98781431]\n", "-0.5227803408573983 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -0.52278036]\n", "-495.7167584916426 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -5.71675873]\n", "392.18254810438526 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 2.18254805]\n", "355.7805804652482 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 5. 5.78058052]\n", "121.58216570700253 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 1.58216572]\n", "-85.9208312664893 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -8. -5.9208312]\n", "-221.66995828488245 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -1.66995823]\n", "-463.12324927995627 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -6. -3.12324929]\n", "220.16909036312236 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 0.16909036]\n", "479.67136964284293 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 9.67136955]\n", "-434.6094126740596 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -3. -4.60941267]\n", "369.8775505932793 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 6. 9.87755013]\n", "239.68600116266592 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 9.68600082]\n", "-94.02128223839445 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -9. -4.0212822]\n", "-392.4472062144585 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -9. -2.44720626]\n", "-257.83747123068747 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -7.83747101]\n", "149.2744757568001 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 9.27447605]\n", "221.5218708247577 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 1.52187085]\n", "-135.48763858373147 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -5.48763847]\n", "65.61677451503122 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 5.61677456]\n", "298.6917627464725 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 9. 8.69176292]\n", "244.83693698559205 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 4. 4.83693695]\n", "336.7943197636023 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 6.79431963]\n", "137.48377484776475 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 3. 7.48377466]\n", "360.82992727136497 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 6. 0.82992727]\n", "-375.2222241123636 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -5.22222424]\n", "27.965407036407818 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 2. 7.96540689]\n", "428.3203546424583 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 2. 8.32035446]\n", "-246.49758950751533 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -6.49758959]\n", "-329.003990686788 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -2. -9.00399113]\n", "377.65676905990983 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 7. 7.65676928]\n", "-452.3022625244916 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -5. -2.30226254]\n", "-55.50855235104668 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -5.50855255]\n", "328.05721372694904 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 2. 8.05721378]\n", "-131.62576089992484 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -1.62576091]\n", "-428.25905917247667 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -8.25905895]\n", "44.60946453956227 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 4.60946465]\n", "434.7273175713673 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 3. 4.72731733]\n", "-21.22929536962248 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -1.22929537]\n", "-334.72156136835383 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -3. -4.72156143]\n", "-247.26735284192014 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -7.26735306]\n", "-259.4297610097589 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -9.42976093]\n", "-332.7135076625024 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -3. -2.71350765]\n", "-91.13378156422525 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -9. -1.13378155]\n", "-80.06103945114351 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -8. -0.06103945]\n", "313.4959717860629 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 1. 3.49597168]\n", "-220.0728630675326 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -0.07286306]\n", "-326.7995034605826 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -2. -6.79950333]\n", "-243.43991102652728 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -3.43991113]\n", "340.15380656736306 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 0.15380657]\n", "487.6076045926337 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 8. 7.6076045]\n", "127.04895777989222 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 7.04895782]\n", "468.21983556578226 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 6. 8.21983528]\n", "102.79211050610738 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 2.79211044]\n", "55.88231540426791 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 5. 5.88231564]\n", "-73.19657120602452 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -3.19657111]\n", "345.8591615643367 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 5.85916138]\n", "391.4506532621457 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 1.45065331]\n", "-233.4536250607684 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -3.45362496]\n", "395.765201751574 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 5.76520157]\n", "421.03272289044156 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 2. 1.03272295]\n", "-45.95333744524888 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -5.95333767]\n", "41.915363806866665 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 1.91536379]\n", "112.51040994637785 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 2.51040983]\n", "291.98255116790705 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 9. 1.98255122]\n", "-474.77035527455513 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -4.77035522]\n", "253.77934048125195 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 5. 3.77934051]\n", "-79.86958361799856 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -9.86958408]\n", "-299.4443655644471 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -9. -9.4443655]\n", "-215.8931536851104 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -1. -5.89315367]\n", "-0.9617640788727178 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -0.9617641]\n", "-348.44507577573216 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -4. -8.44507599]\n", "83.11253482463665 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 8. 3.11253476]\n", "280.67948490762774 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 8. 0.6794849]\n", "-183.39057881777788 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -8. -3.39057875]\n", "-38.99148157777588 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -8.99148178]\n", "442.4752907907672 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 4. 2.47529078]\n", "161.6157902424954 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 6. 1.61579025]\n", "-318.77332001786885 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -8.7733202]\n", "366.9611132273823 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 6. 6.96111345]\n", "-353.7128895196047 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -3.71288943]\n", "-105.41674095739417 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -5.41674089]\n", "-67.51418672344033 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -7.51418686]\n", "369.25465887217155 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 6. 9.2546587]\n", "245.16029222778346 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 4. 5.16029215]\n", "-55.14855658542428 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -5.14855671]\n", "77.6565780498072 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 7.65657806]\n", "353.1104473172967 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 5. 3.11044741]\n", "-312.7778325343301 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -2.77783251]\n", "187.22779530008071 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 7.22779512]\n", "-53.79208269188496 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -3.79208279]\n", "-210.29741353576236 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -1. -0.29741353]\n", "-381.91815621412127 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -8. -1.91815627]\n", "81.84209252296482 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 8. 1.84209251]\n", "-277.2724126200516 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -7.27241278]\n", "217.41065477658893 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 1. 7.41065454]\n", "-188.72875372469466 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -8. -8.72875404]\n", "170.70329876620394 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 0.70329875]\n", "-164.395594201303 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -4.39559412]\n", "-222.6287168406985 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -2.62871695]\n", "-256.9310806257938 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -6.93108082]\n", "-264.4290492903771 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -4.42904949]\n", "215.122617709276 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 1. 5.12261772]\n", "-382.11248326728185 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -8. -2.11248326]\n", "499.5267383052591 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 9.52673817]\n", "129.86317393748737 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 9.86317348]\n", "-75.89369305940852 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -5.89369297]\n", "394.9595561956092 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 9. 4.9595561]\n", "-401.5964692613657 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -0. -1.59646928]\n", "-413.3471785648082 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -3.34717846]\n", "-348.4194415279612 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -4. -8.41944122]\n", "260.9353986701871 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 6. 0.9353987]\n", "-275.33039367908185 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -5.33039379]\n", "426.2687566040506 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 2. 6.26875639]\n", "-330.3729411219386 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -3. -0.37294114]\n", "-355.3817148053097 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -5.38171482]\n", "-65.4056922555991 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -5.4056921]\n", "218.73429117003363 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 1. 8.73429108]\n", "-73.74184204527012 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -3.74184203]\n", "33.62706825943218 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 3.62706828]\n", "-372.5021613209377 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -2.50216126]\n", "-68.80320608490165 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -8.80320644]\n", "2.7182951368065478 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 0. 2.7182951]\n", "-150.5381485291709 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -5. -0.53814852]\n", "-280.1988088180758 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -8. -0.19880882]\n", "-486.8794519220411 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -8. -6.87945175]\n", "39.433947408987976 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 9.43394756]\n", "229.14300188862046 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 9.14300156]\n", "5.826423428465399 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 5.82642365]\n", "-344.6744864969875 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -4. -4.67448664]\n", "135.97333273783929 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 3. 5.97333288]\n", "103.52874775866606 [0. 0. 0. 0. 0. 0. 0.\n", " 1. 0. 3.5287478]\n", "64.3924450077713 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 4.39244509]\n", "-75.75051244361086 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -5.7505126]\n", "-274.7153755980284 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -4.71537542]\n", "255.29646524195104 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 5. 5.2964654]\n", "401.32474918608807 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 0. 1.32474923]\n", "-253.3440567391716 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -3.34405684]\n", "-365.8168835853613 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -5.81688356]\n", "111.35007160127886 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 1.35007155]\n", "340.3595673726175 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 0.35956737]\n", "141.2416975473597 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 1.24169755]\n", "41.9582763714772 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 1.95827639]\n", "-342.1534272676331 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -4. -2.15342736]\n", "-15.11909178977644 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -5.11909199]\n", "-0.7494675660728589 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -0.74946755]\n", "-29.96639377147026 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -9.96639347]\n", "188.75365570851156 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 8.75365543]\n", "-134.1307149460681 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -4.13071489]\n", "-22.04566163712185 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -2.04566169]\n", "162.98835728088645 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 6. 2.98835731]\n", "-237.32906203766657 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -7.32906199]\n", "143.69109320357998 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 3.69109321]\n", "-427.9069244344028 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -7.90692425]\n", "-119.05578467604805 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -1. -9.05578423]\n", "477.95205588886824 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 7.95205593]\n", "-134.60817487303999 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -4.6081748]\n", "248.595822682531 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 4. 8.59582233]\n", "-6.1861013460756675 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -6.18610144]\n", "204.28005914300783 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 0. 4.28005934]\n", "-301.24513736058566 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -1.24513733]\n", "395.40034818484907 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 5.40034819]\n", "-279.5330324753561 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -9.53303242]\n", "-308.16000650304653 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -8.16000652]\n", "-193.97612080123028 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -3.97612071]\n", "279.11887354516483 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 7. 9.1188736]\n", "449.5502759692573 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 4. 9.5502758]\n", "-56.21456194255825 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -6.21456194]\n", "249.6582448928404 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 4. 9.65824509]\n", "38.85054021639822 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 8.85054016]\n", "275.2262080837393 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 7. 5.22620821]\n", "99.4580298448201 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 9. 9.45802975]\n", "-428.93773114042585 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -8.93773079]\n", "-304.8547723931375 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -4.85477257]\n", "-205.72042913948187 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -5.72042894]\n", "-242.9435511123559 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -2.94355106]\n", "154.31539917899352 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 4.31539917]\n", "140.32973007812433 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 0.32973006]\n", "24.71974023528267 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 2. 4.71974039]\n", "-428.36942650681954 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -8.36942673]\n", "-4.580391401526551 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -4.58039141]\n", "-215.62973227728955 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -1. -5.62973213]\n", "-21.064262976175364 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -1.06426299]\n", "112.12558604645817 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 2.12558603]\n", "-427.6535546042114 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -7.65355444]\n", "382.57639839635937 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 2.57639837]\n", "313.92626332910214 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 1. 3.92626333]\n", "-1.0876759854191453 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -1.08767593]\n", "343.13521909309964 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 4. 3.1352191]\n", "305.237482528453 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 0. 5.23748255]\n", "-116.1804176856277 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -1. -6.18041754]\n", "-350.90531670170844 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -0.90531671]\n", "467.1961516052979 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 6. 7.19615173]\n", "106.27341464948992 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 6.27341461]\n", "-396.271931063796 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -9. -6.27193117]\n", "-268.56477381913425 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -8.56477356]\n", "265.773743173759 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 6. 5.77374315]\n", "95.06479874326656 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 9. 5.06479883]\n", "409.1636418464082 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 0. 9.16364193]\n", "-67.29429973668488 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -7.2942996]\n", "152.36388169623393 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 2.36388159]\n", "-354.8097704105878 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -4.80977058]\n", "-199.38158235100366 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -9.38158226]\n", "-129.93805959805604 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -9.93805981]\n", "-106.8290890645831 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -6.82908916]\n", "319.3416568477493 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 1. 9.34165668]\n", "-5.080162704495295 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -5.08016253]\n", "344.7352473389096 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 4.73524714]\n", "162.27433620440402 [0. 0. 0. 0. 0. 0. 0.\n", " 1. 6. 2.2743361]\n", "-383.3693688889532 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -8. -3.36936879]\n", "-378.32840355299004 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -8.32840347]\n", "-275.99695802067305 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -5.99695826]\n", "-327.0515893701671 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -2. -7.05158949]\n", "-168.24713922298395 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -8.24713898]\n", "321.67135518040755 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 2. 1.67135513]\n", "163.22357991678726 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 6. 3.22357988]\n", "71.91056099916793 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 1.91056097]\n", "498.814643990679 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 8.81464386]\n", "-223.0083297010419 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -3.00832963]\n", "-398.987968189617 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -9. -8.98796844]\n", "-244.32367001227072 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -4.32366991]\n", "118.17755856479472 [0. 0. 0. 0. 0. 0. 0.\n", " 1. 1. 8.1775589]\n", "8.548031675793322 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 8.54803181]\n", "371.4827803057329 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 7. 1.48278034]\n", "-79.22767431119327 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -9.22767448]\n", "173.9483971602199 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 3.94839716]\n", "-417.47146232437274 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -7.47146225]\n", "331.54890549942206 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 1.54890549]\n", "125.99311892439235 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 5.99311876]\n", "88.75680410553277 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 8. 8.75680447]\n", "-135.71750819537488 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -5.71750832]\n", "69.58315804562842 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 9.58315849]\n", "114.92769886847631 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 4.92769909]\n", "480.92408090108694 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 8. 0.92408091]\n", "243.31667585358463 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 4. 3.3166759]\n", "394.5451591577459 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 4.54515934]\n", "460.64826220185904 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 6. 0.6482622]\n", "162.64211846754228 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 6. 2.64211845]\n", "-495.27673648536796 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -5.27673626]\n", "0.642284945905458 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 0.64228493]\n", "-359.02190943847677 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -9.02190971]\n", "305.5005243310127 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 0. 5.50052452]\n", "-396.49363348366853 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -9. -6.49363327]\n", "-55.26309863901702 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -5.26309872]\n", "-446.62208851308816 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -6.62208843]\n", "358.92521179047253 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 5. 8.92521191]\n", "-72.97871803885269 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -2.97871804]\n", "121.43397712575864 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 1.43397713]\n", "330.8118891252132 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 0.81188911]\n", "496.83282294944155 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 9. 6.8328228]\n", "107.7720563432073 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 7.77205658]\n", "-9.159264060972006 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -9.15926361]\n", "428.27568758691683 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 2. 8.27568722]\n", "-398.10858735870505 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -9. -8.10858727]\n", "159.6271524098143 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 9.62715244]\n", "-474.64083828355854 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -4.64083815]\n", "-154.07682304830328 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -5. -4.07682323]\n", "-422.49255259174004 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -2.49255252]\n", "-393.99154773458775 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -9. -3.99154782]\n", "-43.680149041343206 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -3.68014908]\n", "196.38240634887373 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 6.38240623]\n", "130.71341992735174 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 3. 0.71341991]\n", "-480.39908467459304 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -8. -0.39908469]\n", "48.9476828347416 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 8.94768238]\n", "-165.91167076139158 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -5.91167068]\n", "-14.69766907147163 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -4.69766903]\n", "224.21995340736567 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 4.21995354]\n", "208.94993270938622 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 0. 8.94993305]\n", "-387.40332146149905 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -8. -7.40332127]\n", "229.51442547683266 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 9.51442528]\n", "-207.23020490028986 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -7.23020506]\n", "52.78104204933109 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 5. 2.7810421]\n", "-335.923602395204 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -3. -5.92360258]\n", "109.35836256575948 [0. 0. 0. 0. 0. 0. 0.\n", " 1. 0. 9.3583622]\n", "-207.1579309132331 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -7.15793085]\n", "-459.55903378031746 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -5. -9.55903339]\n", "-49.90224371661034 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -9.90224361]\n", "46.39335776746378 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 6.39335775]\n", "235.77923410562752 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 5.77923393]\n", "-32.191817634135475 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -2.19181752]\n", "-233.93915567101863 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -3.93915558]\n", "-192.36199899619766 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -2.36199903]\n", "226.79709439660357 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 6.79709435]\n", "71.75980737462272 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 1.75980735]\n", "-438.0500776346408 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -3. -8.05007744]\n", "274.9978681145407 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 7. 4.99786806]\n", "-363.2875978673524 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -3.28759789]\n", "-100.31595210478217 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -0.31595209]\n", "-184.2750594078917 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -8. -4.27505922]\n", "339.6683586151754 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 3. 9.6683588]\n", "-368.91351031988637 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -8.91351032]\n", "490.1801885420003 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 0.18018854]\n", "497.70422280087377 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 7.70422268]\n", "124.87402441437656 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 4.87402439]\n", "149.71164764510536 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 9.71164799]\n", "366.94536955771116 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 6. 6.94536972]\n", "130.05211944266736 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 3. 0.05211944]\n", "91.97282170601173 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 9. 1.97282171]\n", "-121.41741765030145 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -1.41741765]\n", "-428.35317935634953 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -8.35317898]\n", "-264.62772070879726 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -4.62772083]\n", "-121.26872275912415 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -1.26872277]\n", "433.35537740412076 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 3. 3.35537744]\n", "-62.60810641060033 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -2.60810637]\n", "317.4220234092721 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 1. 7.4220233]\n", "-459.77532719992377 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -5. -9.77532673]\n", "-235.69328173167213 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -5.69328165]\n", "295.76992501258815 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 9. 5.76992512]\n", "-46.5767514360983 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -6.57675123]\n", "-16.260424008672338 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -6.26042414]\n", "6.166947614218987 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 6.16694784]\n", "189.46045326550276 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 9.46045303]\n", "243.6327416956484 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 4. 3.63274169]\n", "-489.5028098630434 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -8. -9.50280952]\n", "-273.52312248930224 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -3.52312255]\n", "457.1623504602803 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 5. 7.16235065]\n", "466.9010169501754 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 6. 6.90101671]\n", "494.18583154539607 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 4.18583155]\n", "408.5891265885484 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 0. 8.58912659]\n", "441.58131108547485 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 4. 1.58131111]\n", "-179.5921687462062 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -9.59216881]\n", "141.9769862849909 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 1.97698629]\n", "385.88868398228004 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 8. 5.8886838]\n", "-316.4533324461213 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -6.45333242]\n", "-209.6742744982063 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -9.67427444]\n", "-171.54918451534817 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -1.54918456]\n", "-70.36953060501439 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -0.36953062]\n", "99.05283762290674 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 9. 9.05283737]\n", "129.11174458588494 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 9.11174488]\n", "471.9882523727814 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 7. 1.9882524]\n", "122.81049861332927 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 2.81049871]\n", "345.85173314635733 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 5.85173321]\n", "314.8918967802342 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 1. 4.89189672]\n", "-478.6498395683444 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -8.6498394]\n", "386.0198720794964 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 6.01987219]\n", "-123.95955841412554 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -3.95955849]\n", "494.51049724132514 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 4.51049709]\n", "172.75407652243436 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 2.75407648]\n", "-52.06992109410802 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -2.06992102]\n", "49.11503170494424 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 9.11503124]\n", "327.6199769564614 [0. 0. 0. 0. 0. 0. 0. 3.\n", " 2. 7.619977]\n", "-378.4125093278964 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -8.41250896]\n", "396.35038149695924 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 6.35038137]\n", "-51.12398474363622 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -1.12398469]\n", "-186.9925750721937 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -8. -6.99257517]\n", "-48.225954355239686 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -8.22595406]\n", "239.83349163488643 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 9.83349133]\n", "90.65102730698594 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 9. 0.65102732]\n", "144.00594786281783 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 4.00594807]\n", "-308.2166339847694 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -8.2166338]\n", "-18.85589619473538 [-0. -0. -0. -0. -0. -0. -0.\n", " -0. -1. -8.855896]\n", "195.90798108890607 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 5.90798092]\n", "176.2255162850873 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 6.22551632]\n", "-314.52933019395766 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -4.52933025]\n", "-172.38655866728948 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -2.38655877]\n", "-127.60038582325383 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -7.60038567]\n", "338.1476394665358 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 8.14763927]\n", "377.3202896801408 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 7. 7.32028961]\n", "225.2908821372236 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 5.29088211]\n", "398.27315425365197 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 8.27315426]\n", "197.89010977643008 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 7.89010954]\n", "212.1869640821229 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 1. 2.18696404]\n", "-297.8495214165556 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -9. -7.84952164]\n", "334.087286195963 [0. 0. 0. 0. 0. 0. 0. 3.\n", " 3. 4.087286]\n", "-267.64883785397984 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -7.64883804]\n", "-426.6585009294657 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -6.65850115]\n", "-6.859845581155377 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -6.85984564]\n", "6.879865961109011 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 6.87986612]\n", "-267.4203692982318 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -7.42036915]\n", "418.06422348770377 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 1. 8.06422329]\n", "-301.8162366756212 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -1.81623673]\n", "-125.05642239899085 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -5.05642223]\n", "327.4884713877 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 2. 7.48847151]\n", "-130.26327510920964 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -0.26327512]\n", "39.365344674485605 [0. 0. 0. 0. 0. 0. 0. 0.\n", " 3. 9.365345]\n", "-209.2202559447528 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -9.22025585]\n", "-310.788863663181 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -0.78886366]\n", "470.15433177138135 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 0.15433177]\n", "197.09087017260273 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 7.09087038]\n", "-157.44386775374596 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -5. -7.44386768]\n", "-3.4882267684137602 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -3.48822665]\n", "-356.50216883390726 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -6.50216866]\n", "-9.005468952834605 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -9.00546932]\n", "-118.97439881329319 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -1. -8.97439861]\n", "-46.04439057162946 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -6.04439068]\n", "-115.50529705053869 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -1. -5.50529718]\n", "-425.5952571650743 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -5.59525728]\n", "199.96211810021137 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 9.96211815]\n", "397.40966357564855 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 7.40966368]\n", "-405.7589344648791 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -0. -5.7589345]\n", "34.827961432374096 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 4.82796144]\n", "-474.35956707656635 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -4.35956717]\n", "16.834656684333705 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 6.83465672]\n", "-35.660457992899076 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -5.66045809]\n", "-311.96448873569926 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -1.96448874]\n", "489.259269306771 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 8. 9.25926971]\n", "493.1925497565006 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 3.19254971]\n", "457.42217600037515 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 5. 7.42217588]\n", "-135.93972282352595 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -5.93972301]\n", "-472.5457513949065 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -2.54575133]\n", "325.6627341198152 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 2. 5.66273403]\n", "282.0289312264831 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 8. 2.02893114]\n", "234.32099350960888 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 4.32099342]\n", "292.828610585384 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 9. 2.82861066]\n", "-326.3856640208916 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -2. -6.38566399]\n", "10.804692295551144 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 0.80469227]\n", "-441.1245524698034 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -1.12455249]\n", "-352.92610149182815 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -2.92610145]\n", "-306.05287975475017 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -6.05287981]\n", "88.20061988262783 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 8. 8.2006197]\n", "110.71823236530875 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 0.71823239]\n", "379.1400278528342 [0. 0. 0. 0. 0. 0. 0. 3.\n", " 7. 9.140028]\n", "470.83819699184846 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 0.83819699]\n", "84.1104530993394 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 8. 4.11045313]\n", "302.85687596131163 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 0. 2.8568759]\n", "341.9254378436502 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 1.92543781]\n", "-313.73801347392197 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -3.73801351]\n", "-72.01769439978145 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -2.01769447]\n", "10.43345122831385 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 0.43345124]\n", "385.9296140253349 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 5.92961407]\n", "-146.20214860066295 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -6.20214844]\n", "73.31352959894366 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 3.31352949]\n", "153.2274750443986 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 3.22747493]\n", "305.67975078487177 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 0. 5.67975092]\n", "47.57379073652168 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 7.57379055]\n", "8.30135737734583 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 8.30135727]\n", "366.29442354304643 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 6. 6.29442358]\n", "-259.68484993223996 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -9.68484974]\n", "71.4172154540963 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 1.41721547]\n", "-4.087571782114163 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -4.08757162]\n", "340.79668512308956 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 4. 0.7966851]\n", "47.76956945993349 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 4. 7.7695694]\n", "379.402201676632 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 7. 9.40220165]\n", "66.14579617955829 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 6. 6.1457963]\n", "104.63457822421118 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 4.63457823]\n", "-261.37880830073647 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -1.37880826]\n", "-423.59819062256577 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -3.59819055]\n", "411.4182070993884 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 1. 1.41820705]\n", "-140.84572606160106 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -0.84572607]\n", "57.43353414905128 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 5. 7.43353415]\n", "171.03041113953887 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 1.03041112]\n", "455.54189799823587 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 5. 5.54189777]\n", "389.7395132102449 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 8. 9.7395134]\n", "-274.30231915207514 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -4.30231905]\n", "226.60923836197455 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 6.60923815]\n", "35.941904822470505 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 5.94190502]\n", "-402.48016862471223 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -0. -2.48016858]\n", "386.33079194287455 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 6.33079195]\n", "-201.2661261311176 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -1.26612616]\n", "-48.55932367702054 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -8.55932331]\n", "103.48067678220197 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 3.48067689]\n", "225.54703433267932 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 5.54703426]\n", "108.78328524535807 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 8.78328514]\n", "-320.5949347591963 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -2. -0.59493476]\n", "193.1564032041887 [0. 0. 0. 0. 0. 0. 0.\n", " 1. 9. 3.1564033]\n", "44.8716004344385 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 4.87160063]\n", "-404.5633448600623 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -0. -4.56334496]\n", "138.5945354883208 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 3. 8.59453583]\n", "-360.9242133110592 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -0.92421329]\n", "-431.8853225262498 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -3. -1.88532257]\n", "10.807255056181075 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 0.80725503]\n", "288.2804734986652 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 8. 8.28047371]\n", "-34.12729907827239 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -4.12729931]\n", "137.80427629118375 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 3. 7.80427647]\n", "-425.0523126102047 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -5.05231237]\n", "309.63960681270106 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 0. 9.63960648]\n", "71.93690515422402 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 1.93690515]\n", "-359.4661713091961 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -9.46617126]\n", "399.53249184298534 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 9.53249168]\n", "-373.4253367614285 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -3.42533684]\n", "311.5071441848546 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 1. 1.50714421]\n", "263.0624469974201 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 6. 3.06244707]\n", "-461.8864901848915 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -6. -1.88649023]\n", "-238.18212203013601 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -8.18212223]\n", "-317.1419303897158 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -7.14193058]\n", "-480.99883370464494 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -8. -0.99883372]\n", "-95.41177786914845 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -9. -5.41177797]\n", "4.878546263106243 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 4.87854624]\n", "-101.32305327041213 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -1.32305324]\n", "53.479420394845654 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 5. 3.47942042]\n", "-234.61908169376343 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -4.6190815]\n", "-146.93795373841567 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -6.93795395]\n", "44.7290546594219 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 4.72905445]\n", "308.70834180778706 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 0. 8.7083416]\n", "-196.22550483649437 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -6.22550488]\n", "200.09594322188295 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 0. 0.09594322]\n", "62.65719450099716 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 2.65719461]\n", "-211.02589183964338 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -1. -1.02589178]\n", "-32.17613642473127 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -2.17613649]\n", "-465.6062206787892 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -6. -5.60622072]\n", "24.318124079337533 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 2. 4.31812429]\n", "236.5532985394688 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 6.55329847]\n", "206.0116616269524 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 0. 6.01166153]\n", "132.9122159541447 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 3. 2.91221595]\n", "-21.491944375434446 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -1.49194443]\n", "-457.1630075606471 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -5. -7.16300774]\n", "-358.65396737134006 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -8.6539669]\n", "-220.31138050353405 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -0.31138051]\n", "58.80276180020616 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 5. 8.80276203]\n", "-371.3043637295676 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -1.30436373]\n", "-288.04379491785556 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -8. -8.04379463]\n", "47.5609333876138 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 7.56093359]\n", "-165.17873331391297 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -5.17873335]\n", "472.18912374704723 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 2.18912363]\n", "-148.00154848946946 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -8.00154877]\n", "-180.17147310350768 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -8. -0.1714731]\n", "-201.01143982044323 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -1.0114398]\n", "293.0313394483371 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 9. 3.03133941]\n", "-203.19987866093214 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -3.19987869]\n", "-428.45731044982847 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -8.45731068]\n", "258.5409222909009 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 5. 8.54092216]\n", "-365.91094966668726 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -5.91094971]\n", "391.5745414704725 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 1.57454145]\n", "-19.358951098474964 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -9.35895157]\n", "-365.13664320740094 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -5.13664341]\n", "-328.83735435850815 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -2. -8.83735466]\n", "300.8552257779958 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 0. 0.8552258]\n", "491.6456938544073 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 9. 1.6456939]\n", "-79.41072618300538 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -9.41072655]\n", "-474.5273943174483 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -4.52739429]\n", "104.68058862767504 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 4.68058872]\n", "492.4059287026351 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 2.40592861]\n", "-441.7927029096442 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -1.79270291]\n", "-102.98490382098657 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -2.98490381]\n", "-205.54803118720577 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -5.54803133]\n", "-64.36046111296689 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -4.36046124]\n", "-150.98068675020514 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -5. -0.98068672]\n", "143.1061801692821 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 3.10618019]\n", "-178.17113607606805 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -8.1711359]\n", "-341.5878848752122 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -4. -1.5878849]\n", "362.760938432983 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 6. 2.76093841]\n", "-379.9962298423384 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -9.99623013]\n", "-103.11938011302234 [-0. -0. -0. -0. -0. -0. -0. -1.\n", " -0. -3.11938]\n", "477.42939591800706 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 7.42939615]\n", "-69.41533124228216 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -9.41533089]\n", "-294.723730601968 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -9. -4.72373056]\n", "-415.06480182477065 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -5.06480169]\n", "347.62520010890177 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 7.62520027]\n", "189.32811560034435 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 9.32811546]\n", "-123.19111691481832 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -3.19111681]\n", "100.67749296427242 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 0.67749298]\n", "-301.6772565952325 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -1.67725658]\n", "-108.16104999908393 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -8.16104984]\n", "-301.1506007896884 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -1.15060079]\n", "287.8108980948163 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 8. 7.8108983]\n", "-35.27037324554594 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -5.27037334]\n", "-281.92453604819434 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -8. -1.92453599]\n", "-28.102911894934635 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -8.10291195]\n", "174.11574576855938 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 4.11574554]\n", "382.01137637649305 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 2.01137638]\n", "-112.08165918164481 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -1. -2.08165908]\n", "188.17603492061784 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 8.17603493]\n", "60.01896569090337 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 0.01896569]\n", "-101.31680523070929 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -1.31680524]\n", "112.75970294458837 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 2.75970292]\n", "104.46314602901296 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 4.46314621]\n", "362.5516029067584 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 6. 2.55160284]\n", "-205.58743971276715 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -5.58743954]\n", "-268.00667395274627 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -8.00667381]\n", "36.690715978517254 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 6.69071579]\n", "23.5690772212096 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 2. 3.56907725]\n", "65.02841533574288 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 6. 5.0284152]\n", "499.28861154004267 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 9.28861141]\n", "341.65278350232853 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 1.65278351]\n", "470.5095001667428 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 0.50950015]\n", "46.3727180448521 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 6.37271786]\n", "-198.01066491754293 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -8.01066494]\n", "-49.82980921035684 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -9.82980919]\n", "-306.1319919199889 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -6.13199186]\n", "-196.86872839699376 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -6.86872816]\n", "324.3794098489308 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 2. 4.37940979]\n", "-278.2084491978215 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -8.20844936]\n", "235.47179515598194 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 5.47179508]\n", "58.03797353363982 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 5. 8.0379734]\n", "-397.79281652253826 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -9. -7.79281664]\n", "452.21342032158884 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 5. 2.21342039]\n", "-464.8700016266698 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -6. -4.87000179]\n", "88.35434029491918 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 8. 8.35434055]\n", "-140.4241094999209 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -0.42410949]\n", "394.5904337294258 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 9. 4.5904336]\n", "-205.40100120911265 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -5.40100098]\n", "235.81069972655078 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 5.81069994]\n", "-421.29376600962377 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -1.29376602]\n", "-423.18550473600123 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -2. -3.18550467]\n", "-236.3946824884371 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -6.39468241]\n", "494.2223525873546 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 9. 4.2223525]\n", "381.6328274813765 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 1.63282752]\n", "160.32544415298966 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 6. 0.32544416]\n", "432.9686448287368 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 3. 2.96864486]\n", "-103.2079355574025 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -3.20793557]\n", "116.80507371838999 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 6.80507374]\n", "70.99137575797998 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 0.99137574]\n", "411.40926054906066 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 1. 1.40926051]\n", "352.0239038603196 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 5. 2.02390385]\n", "-360.23523315880067 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -0.23523316]\n", "380.90689592642013 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 0.90689594]\n", "105.76604677715318 [0. 0. 0. 0. 0. 0. 0. 1.\n", " 0. 5.766047]\n", "-271.30788701930487 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -1.30788708]\n", "264.18151238517487 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 6. 4.18151236]\n", "158.51205801624957 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 8.51205826]\n", "-50.77058559724201 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -0.7705856]\n", "-491.1597928611564 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -1.1597929]\n", "-108.18441580996607 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -8.18441582]\n", "-224.23337553895306 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -4.23337555]\n", "301.97830446396404 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 0. 1.97830451]\n", "-72.52668100497195 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -2.52668095]\n", "490.30939283798614 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 0.30939284]\n", "-376.5117261858708 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -6.51172638]\n", "106.29975463503527 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 6.29975462]\n", "371.3995006582238 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 7. 1.39950061]\n", "190.87484167790313 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 0.87484169]\n", "481.5972895318178 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 8. 1.59728956]\n", "37.042906798599226 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 7.04290676]\n", "-498.16110408782555 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -8.1611042]\n", "-340.7034462841348 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -4. -0.70344627]\n", "-180.09826116680705 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -8. -0.09826117]\n", "-301.0064676597545 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -1.0064677]\n", "-33.64829914217948 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -3.64829922]\n", "-279.48708729432036 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -9.48708725]\n", "392.2456714659056 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 2.24567151]\n", "220.31526648905563 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 0.31526649]\n", "-377.5600848337253 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -7. -7.56008482]\n", "-257.65576973408133 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -7.65576982]\n", "-258.3493022672273 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -8.34930229]\n", "332.94619520724655 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 2.94619513]\n", "163.71114802500176 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 6. 3.71114802]\n", "101.52961442734143 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 1.52961445]\n", "-407.9271620860048 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -0. -7.92716217]\n", "-8.786333525146128 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -8.78633308]\n", "-18.765718774419792 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -8.76571846]\n", "278.8577992776409 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 7. 8.85779953]\n", "160.81692063288355 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 6. 0.81692064]\n", "116.29082650264489 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 6.29082632]\n", "15.354438862586584 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 5.35443878]\n", "-244.13062770494653 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -4.13062763]\n", "345.72118579963063 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 5.72118568]\n", "-385.8418560723539 [-0. -0. -0. -0. -0. -0. -0.\n", " -3. -8. -5.841856]\n", "179.57264217101542 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 9.57264233]\n", "-190.29411175963108 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -0.29411176]\n", "377.9705994586952 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 7. 7.97059965]\n", "10.74081593825149 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 0.74081594]\n", "-221.33604579579458 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -1.33604574]\n", "-224.37603698222864 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -4.37603712]\n", "-25.74779840048913 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -5.74779844]\n", "-265.90666459915957 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -5.90666437]\n", "-221.5273254263177 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -1.52732539]\n", "88.69715149587343 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 8. 8.69715118]\n", "68.06962777988757 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 8.06962776]\n", "311.2903515355647 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 1. 1.29035151]\n", "-202.4858709614571 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -2.48587108]\n", "-228.39104049882974 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -2. -8.3910408]\n", "95.44997394878784 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 9. 5.44997406]\n", "181.3291146362741 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 1.32911468]\n", "393.13724016604414 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 3.13724017]\n", "72.59507633738771 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 2.59507632]\n", "-134.14668103307827 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -4.14668083]\n", "233.2852735772447 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 3.28527355]\n", "1.3510576139152963 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 1.35105765]\n", "457.68828007852005 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 5. 7.68828011]\n", "270.5911285372213 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 7. 0.59112853]\n", "-382.6205955318277 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -8. -2.62059546]\n", "408.4696338037548 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 0. 8.46963406]\n", "-11.14007244084525 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -1.14007246]\n", "-293.3081269608415 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -9. -3.30812693]\n", "-163.6039698433006 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -3.60396981]\n", "-254.26318054075148 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -4.26318073]\n", "174.83879607000398 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 4.83879614]\n", "106.23780564272745 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 6.23780584]\n", "-84.8229966527515 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -8. -4.82299662]\n", "-141.69614787497665 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -1.69614792]\n", "364.7043829364108 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 6. 4.7043829]\n", "-316.7102987810195 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -6.71029902]\n", "271.08194095388825 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 7. 1.08194101]\n", "95.2499405764915 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 9. 5.2499404]\n", "240.42990997904124 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 4. 0.42990997]\n", "10.099758959124294 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 0.09975896]\n", "-158.24553829506982 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -5. -8.24553871]\n", "459.1243318559735 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 5. 9.12433147]\n", "-107.1290847183185 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -7.12908459]\n", "-234.13374234176632 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -4.13374233]\n", "225.9945626272164 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 5.99456263]\n", "-445.7044734883838 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -5.7044735]\n", "11.711333233610844 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 1.71133327]\n", "-313.5278872521896 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -3.52788734]\n", "-245.57377380966838 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -5.57377386]\n", "481.9432620693226 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 8. 1.9432621]\n", "58.027183812352945 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 5. 8.02718353]\n", "-46.29274176582443 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -6.29274178]\n", "-415.1533099910012 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -5.15330982]\n", "-119.0978430425732 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -1. -9.09784317]\n", "472.89082460701724 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 2.89082456]\n", "280.1018121441957 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 8. 0.10181215]\n", "33.73377210998707 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 3.73377204]\n", "-474.6513747504427 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -4.65137482]\n", "210.96719424741684 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 1. 0.96719426]\n", "-469.63333909932436 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -6. -9.63333893]\n", "-472.11731052673514 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -2.11731052]\n", "336.4889093884846 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 6.48890924]\n", "275.8937723814684 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 7. 5.8937726]\n", "-126.70359558978927 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -6.70359564]\n", "-277.1860044695068 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -7.18600464]\n", "41.37815043077031 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 1.37815046]\n", "-357.4853082320448 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -7.48530817]\n", "190.95213411216662 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 0.95213413]\n", "-121.99755794429856 [-0. -0. -0. -0. -0. -0. -0.\n", " -1. -2. -1.997558]\n", "70.31055912391382 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 0.31055912]\n", "-134.50271167202166 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -3. -4.50271177]\n", "89.4283156533564 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 8. 9.42831612]\n", "96.58569028528862 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 9. 6.5856905]\n", "421.30848490271757 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 2. 1.30848491]\n", "225.71324140417624 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 5.71324158]\n", "-4.9080859728388715 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -4.90808582]\n", "285.9187254104432 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 8. 5.91872549]\n", "245.27993769584745 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 4. 5.27993774]\n", "-457.04608698499993 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -5. -7.04608679]\n", "407.9682434706801 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 0. 7.9682436]\n", "-173.49391504849444 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -3.49391508]\n", "-66.09086824199484 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -6.09086847]\n", "-230.71325918409258 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -0.71325916]\n", "-30.59336711767213 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -0.5933671]\n", "-162.73952346776878 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -2.73952341]\n", "-498.8933641318919 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -8.89336395]\n", "-40.50105805535187 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -0.50105804]\n", "97.73430280409312 [0. 0. 0. 0. 0. 0. 0. 0.\n", " 9. 7.734303]\n", "-469.6364262835193 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -6. -9.63642597]\n", "-442.10781656949763 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -2.10781646]\n", "-415.1294428520661 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -5.12944269]\n", "-244.63706206242807 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -4.63706207]\n", "-478.8184347994656 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -8.81843472]\n", "113.76390205962028 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 3.76390195]\n", "-165.80498162082168 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -5.80498171]\n", "-477.01756480154376 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -7.01756477]\n", "177.5672718556065 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 7.56727171]\n", "298.6300529265613 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 9. 8.63005257]\n", "389.4414749026629 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 9.44147491]\n", "-196.24800245632025 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -6.24800253]\n", "-121.34320151865907 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -1.34320152]\n", "-355.17597945261326 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -5.17597961]\n", "433.4172276355376 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 3. 3.41722775]\n", "215.49303268636777 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 1. 5.49303246]\n", "379.5396987256737 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 7. 9.5396986]\n", "381.3216895372549 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 1.32168949]\n", "150.01414475568342 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 0.01414476]\n", "-31.51192967511207 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -1.51192963]\n", "-5.620754014434737 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -5.62075424]\n", "-336.31629800530646 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -3. -6.31629801]\n", "89.9300951468136 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 8. 9.93009472]\n", "172.2096775060038 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 2.20967746]\n", "-33.89668864072615 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -3.8966887]\n", "-498.165187442586 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -8.16518784]\n", "-437.0412877435752 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -3. -7.0412879]\n", "257.19761033967904 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 5. 7.19761038]\n", "353.3705562335615 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 5. 3.37055612]\n", "-258.18931443980586 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -8.18931484]\n", "436.80120640806985 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 3. 6.80120659]\n", "-141.30037629492975 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -1.3003763]\n", "-69.45342368370999 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -9.4534235]\n", "458.75025124038405 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 5. 8.75025082]\n", "-398.99433139442743 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -9. -8.99433136]\n", "428.66915108701767 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 2. 8.66915131]\n", "234.44315025863872 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 4.44315004]\n", "147.87793561776962 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 7.87793541]\n", "35.96613662923942 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 5.96613646]\n", "55.87558309793705 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 5. 5.87558317]\n", "-126.8375896195243 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -6.83758974]\n", "336.2080549585532 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 6.20805502]\n", "324.81818437858954 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 2. 4.81818438]\n", "-419.2280071575505 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -9.22800732]\n", "-12.993722074701953 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -2.99372196]\n", "129.2746234810782 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 9.27462387]\n", "93.96696567690654 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 9. 3.96696568]\n", "-79.43104670953916 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -9.43104649]\n", "397.4631631979908 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 7.46316338]\n", "-411.0499939449011 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -1.04999399]\n", "156.82618719745656 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 6.82618713]\n", "-432.18986528418736 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -3. -2.18986535]\n", "133.33898326946704 [0. 0. 0. 0. 0. 0. 0.\n", " 1. 3. 3.3389833]\n", "171.02163402545912 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 1.02163398]\n", "-142.99856433430503 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -2.99856424]\n", "-248.38987568060267 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -8.38987541]\n", "92.99514887321736 [0. 0. 0. 0. 0. 0. 0.\n", " 0. 9. 2.9951489]\n", "154.68059142436286 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 4.68059158]\n", "52.79489364924517 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 5. 2.79489374]\n", "-145.38723418396282 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -4. -5.38723421]\n", "-232.52545737247422 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -2.52545738]\n", "173.58605343321975 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 7. 3.58605337]\n", "336.5748128599669 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 6.57481289]\n", "-251.04467989675783 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -1.04467988]\n", "441.15290990796296 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 4. 1.15290987]\n", "236.5576867062137 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 6.55768681]\n", "-172.27894632282704 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -2.2789464]\n", "-116.79032879409135 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -1. -6.79032898]\n", "-443.077562417462 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -3.07756233]\n", "157.23964975224425 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 5. 7.23964977]\n", "-38.638314660690895 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -3. -8.63831425]\n", "-171.17955822688901 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -1.17955828]\n", "346.861445315422 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 6.86144543]\n", "-308.8485358266215 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -8.84853554]\n", "-409.73564844750973 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -0. -9.73564816]\n", "-191.42459846372884 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -1.42459846]\n", "-235.43165365193218 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -5.4316535]\n", "-413.3038378598505 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -3.30383778]\n", "76.87509437531736 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 6.87509441]\n", "-96.81345882273662 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -9. -6.81345892]\n", "-408.30475144713915 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -0. -8.3047514]\n", "202.7470809432891 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 0. 2.74708104]\n", "-82.55633168472276 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -8. -2.55633163]\n", "-58.13794238350567 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -8.13794231]\n", "-152.64386206860158 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -5. -2.64386201]\n", "262.7060240999891 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 6. 2.70602417]\n", "-56.17949312419257 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -6.17949295]\n", "403.1904456013009 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 0. 3.19044566]\n", "19.85620831549251 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 1. 9.85620785]\n", "115.61117698472667 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 5.61117697]\n", "5.954821188715931 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 5.95482111]\n", "-44.95711342367026 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -4. -4.95711327]\n", "-100.67725463828936 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -0.67725462]\n", "-211.86179807254547 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -1. -1.86179805]\n", "-232.02071228253985 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -2.02071238]\n", "452.8033935987449 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 5. 2.8033936]\n", "380.0109961604446 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 0.01099616]\n", "-89.77200613038261 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -8. -9.77200603]\n", "226.21404998082906 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 6.21404982]\n", "288.15834778539727 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 8. 8.15834808]\n", "-497.5317910316236 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -7.53179121]\n", "-272.0713161267645 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -2.07131624]\n", "143.56966761942368 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 3.56966758]\n", "454.4934444948675 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 5. 4.49344444]\n", "-198.87129385867652 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -8.87129402]\n", "-356.0281315070064 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -5. -6.02813148]\n", "-209.26119665170862 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -9.26119709]\n", "232.39983586563872 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 2.39983582]\n", "480.5631414035888 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 8. 0.56314141]\n", "394.96687400636864 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 4.96687412]\n", "-453.4609836586365 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -5. -3.46098375]\n", "378.2462428908964 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 7. 8.24624252]\n", "258.0182442421155 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 5. 8.01824379]\n", "-102.14994978984527 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -2.14994979]\n", "277.4427246086756 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 7. 7.4427247]\n", "-463.875069729917 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -6. -3.87506962]\n", "230.90193659704616 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 0.90193659]\n", "30.566272369639712 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 0.56627238]\n", "194.86368437412972 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 4.86368418]\n", "470.3914816217548 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 0.39148161]\n", "-193.0088758835553 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -3.00887585]\n", "-87.04280875524573 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -8. -7.04280853]\n", "397.5523441408838 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 7.55234432]\n", "-157.17261149315786 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -5. -7.17261171]\n", "-13.495223681279978 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -3.49522376]\n", "45.91450753882454 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 5.91450739]\n", "-242.00682692464602 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -2.00682688]\n", "-413.44643357154473 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -1. -3.44643354]\n", "-448.6542663401468 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -8.65426636]\n", "365.01672831561183 [0. 0. 0. 0. 0. 0. 0.\n", " 3. 6. 5.0167284]\n", "69.6478840660425 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 9.64788437]\n", "477.2856547118553 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 7.28565454]\n", "-432.7183458207543 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -3. -2.71834588]\n", "-488.51688782640645 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -8. -8.51688766]\n", "140.6537677427525 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 0.65376776]\n", "-26.499627754888124 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -6.49962759]\n", "-185.76791507179902 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -8. -5.76791525]\n", "388.36605431568637 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 8. 8.36605453]\n", "498.5655137517721 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 9. 8.56551361]\n", "477.7796663580064 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 7. 7.77966642]\n", "-233.24559426102465 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -3. -3.24559426]\n", "-347.3476787667839 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -4. -7.34767866]\n", "282.3281198398767 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 8. 2.32811975]\n", "322.1302301206603 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 2. 2.13023019]\n", "450.6465134105924 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 5. 0.6465134]\n", "-345.32896707859106 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -4. -5.32896709]\n", "117.36118525673544 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 1. 7.36118507]\n", "-380.3425054209237 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -8. -0.34250543]\n", "-269.96716558903364 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -9.96716595]\n", "-310.5571318531801 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -1. -0.55713183]\n", "65.39532355399791 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 6. 5.39532375]\n", "211.23856910410933 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 1. 1.23856914]\n", "184.2989127204634 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 8. 4.29891253]\n", "357.526682424392 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 5. 7.52668238]\n", "101.73886601326609 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 0. 1.73886597]\n", "-179.88644722942837 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -9.88644695]\n", "-77.33659457028908 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -7.33659458]\n", "-54.813704913663216 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -5. -4.81370497]\n", "-365.11512396755097 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -5.11512375]\n", "411.2206101227943 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 1. 1.22061014]\n", "-88.41044490114581 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -8. -8.41044521]\n", "-100.48127503076132 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -0.48127502]\n", "430.5512495149471 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 3. 0.5512495]\n", "-4.3239491069823055 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -0. -4.32394934]\n", "-191.3621257537933 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -9. -1.36212575]\n", "288.89857550374467 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 8. 8.89857578]\n", "-327.0922637515912 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -2. -7.0922637]\n", "409.32301266971814 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 0. 9.32301235]\n", "420.1764706960616 [0. 0. 0. 0. 0. 0. 0.\n", " 4. 2. 0.1764707]\n", "-471.73346827703165 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -1.73346829]\n", "316.0729322906193 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 1. 6.07293224]\n", "73.45207015585386 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 3.45207024]\n", "75.40587770092611 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 7. 5.40587759]\n", "-106.70699783494142 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -0. -6.70699787]\n", "-111.47554540304449 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -1. -1.47554541]\n", "-301.3739792165889 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -0. -1.37397921]\n", "120.17488173866109 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 2. 0.17488174]\n", "25.42114751157365 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 2. 5.42114735]\n", "464.8770355159141 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 6. 4.87703562]\n", "395.93832035682965 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 5.93832016]\n", "214.01871926757877 [0. 0. 0. 0. 0. 0. 0.\n", " 2. 1. 4.0187192]\n", "0.4973925977069804 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 0. 0.49739259]\n", "-167.3829570995724 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -6. -7.38295698]\n", "-112.63641501670385 [-0. -0. -0. -0. -0. -0. -0.\n", " -1. -1. -2.636415]\n", "-268.0081724943796 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -8.00817204]\n", "-181.12118351102558 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -8. -1.12118351]\n", "35.33764749968393 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 3. 5.33764744]\n", "336.0466576310549 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 6.04665756]\n", "91.70690447870089 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 9. 1.70690453]\n", "-469.47684008939564 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -6. -9.47684002]\n", "237.38162439186615 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 7.38162422]\n", "-250.18701711814197 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -0.18701711]\n", "-79.68666730609564 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -7. -9.68666744]\n", "484.1756040385994 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 8. 4.17560387]\n", "-276.73411216809484 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -7. -6.73411226]\n", "340.53919140067103 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 0.53919142]\n", "-473.67346749490116 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -7. -3.6734674]\n", "146.76415447671408 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 4. 6.76415443]\n", "266.20026024672217 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 6. 6.20026016]\n", "197.73846828991026 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 9. 7.73846817]\n", "-250.88250452010652 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -5. -0.88250452]\n", "284.8622493449217 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 8. 4.86224937]\n", "-202.9958574458821 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -0. -2.99585748]\n", "-338.2227458407002 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -3. -8.2227459]\n", "162.09954343107947 [0. 0. 0. 0. 0. 0.\n", " 0. 1. 6. 2.09954333]\n", "-444.65383112843625 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -4. -4.65383101]\n", "335.58028584878366 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 3. 5.58028603]\n", "-98.31265625987339 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -9. -8.3126564]\n", "47.82935429848023 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 7.82935429]\n", "-432.5156562439786 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -3. -2.51565623]\n", "279.2032389827309 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 7. 9.20323944]\n", "398.4135792571383 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 9. 8.41357899]\n", "230.36124762659583 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 3. 0.36124763]\n", "-361.1756393388362 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -6. -1.17563939]\n", "227.5747455308219 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 2. 7.57474566]\n", "112.4548255051081 [0. 0. 0. 0. 0. 0. 0.\n", " 1. 1. 2.4548254]\n", "-155.3119163680934 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -5. -5.31191635]\n", "-11.714341492229718 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -1. -1.71434152]\n", "-269.436760941968 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -6. -9.4367609]\n", "-22.859242111159418 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -2. -2.8592422]\n", "-129.76794512250322 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -2. -9.76794529]\n", "-499.09822388770175 [-0. -0. -0. -0. -0. -0.\n", " -0. -4. -9. -9.09822369]\n", "248.0023273172286 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 4. 8.00232697]\n", "348.18563198628715 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 4. 8.18563175]\n", "-241.11132893795008 [-0. -0. -0. -0. -0. -0.\n", " -0. -2. -4. -1.11132896]\n", "-62.970755518410805 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -6. -2.97075558]\n", "-178.78620921577294 [-0. -0. -0. -0. -0. -0.\n", " -0. -1. -7. -8.78620911]\n", "298.5002801056519 [0. 0. 0. 0. 0. 0.\n", " 0. 2. 9. 8.50028038]\n", "-384.6884195872957 [-0. -0. -0. -0. -0. -0.\n", " -0. -3. -8. -4.68841982]\n", "43.49249121452192 [0. 0. 0. 0. 0. 0.\n", " 0. 0. 4. 3.49249125]\n", "-98.15937336954849 [-0. -0. -0. -0. -0. -0.\n", " -0. -0. -9. -8.15937328]\n", "404.4605816247577 [0. 0. 0. 0. 0. 0.\n", " 0. 4. 0. 4.46058178]\n", "363.14678611459215 [0. 0. 0. 0. 0. 0.\n", " 0. 3. 6. 3.14678621]\n" ] } ], "source": [ "for i in range(1000):\n", " q = (np.random.rand() - 0.5)*1e3\n", " parts = tear_number_apart(q, n_digit=10, base=10, mv_left=0)\n", " print(q, parts)\n", " res = np.abs(comb_num_back(parts, n_digit=10, base=10, mv_left=0)-q) < 1e-6\n", " if not res:\n", " print('??? np.abs(comb_num_back(parts, n_digit=10, base=10, mv_left=0)-q)', np.abs(comb_num_back(parts, n_digit=10, base=10, mv_left=0)-q))\n", " assert False" ] }, { "cell_type": "code", "execution_count": 184, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[-0. -0. -0. -0. -0. -0. -1. -2. -0. -0.]\n", "-1200.0\n" ] } ], "source": [ "parts = tear_number_apart(-1200, n_digit=10, base=10, mv_left=0)\n", "print(parts)\n", "print(comb_num_back(parts, n_digit=10, base=10, mv_left=0))\n" ] }, { "cell_type": "code", "execution_count": 185, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[0. 1. 1. 1. 1. 1. 1. 1. 1. 0.]\n" ] }, { "data": { "text/plain": [ "255.0" ] }, "execution_count": 185, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "\n", "parts = tear_number_apart(255, n_digit=10, base=2, mv_left=1)\n", "print(parts)\n", "comb_num_back(parts, n_digit=10, base=2, mv_left=1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "client.send_targeted_dgram('ddd')" ] }, { "cell_type": "code", "execution_count": 186, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[0. 0. 0. 0. 0. 0.\n", " 0. 0. 2. 5.57780027]\n" ] }, { "data": { "text/plain": [ "255.77800273895264" ] }, "execution_count": 186, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "\n", "parts = tear_number_apart(255.778, n_digit=10, base=10, mv_left=-1)\n", "print(parts)\n", "comb_num_back(parts, n_digit=10, base=10, mv_left=-1)" ] }, { "cell_type": "code", "execution_count": 187, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[-0. -0. -0. -0. -0. -0. -1. -2. -0. -0.]\n" ] }, { "data": { "text/plain": [ "-1200.0" ] }, "execution_count": 187, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "parts = tear_number_apart(-1200, n_digit=10, base=10, mv_left=0)\n", "print(parts)\n", "comb_num_back(parts, n_digit=10, base=10, mv_left=0)" ] }, { "cell_type": "code", "execution_count": 54, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([[0.0000, 0.0226, 0.0226],\n", " [0.0216, 0.0000, 0.0000],\n", " [0.0216, 0.0000, 0.0000]])" ] }, "execution_count": 54, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import torch\n", "from torch.distributions.categorical import Categorical\n", "from torch.nn.functional import kl_div\n", "import torch.nn.functional as F\n", "from UTIL.tensor_ops import repeat_at\n", "probs = torch.Tensor(\n", " [\n", " [0.4, 0.6],\n", " [0.3, 0.7],\n", " [0.3, 0.7]\n", " ]\n", " )\n", "\n", "\n", "# probs.S # (?, n_agent, n_action)\n", "n_agent = probs.shape[-2]\n", "probs_rep = repeat_at(tensor=probs, insert_dim=-2, n_times=n_agent)\n", "# probs_rep.S # (?, n_agent, n_agent, n_action)\n", "probs_rep_transpose = probs_rep.swapaxes(-2,-3)\n", "mat = (probs_rep*probs_rep.log()-probs_rep*probs_rep_transpose.log()).sum(-1)\n", "mat # (?, n_agent, n_agent)\n", "# F.kl_div(probs_rep.log(), probs_rep_transpose, reduction='batchmean')\n", "\n" ] }, { "cell_type": "code", "execution_count": 110, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[[0. 2. 1. 1. 2. 1. 1. 2. 2. 2. 0. 2. 1. 0. 2. 0. 2. 2.]\n", " [2. 0. 2. 2. 1. 2. 2. 0. 0. 1. 2. 1. 2. 2. 0. 2. 1. 1.]\n", " [1. 2. 0. 0. 2. 0. 0. 2. 2. 2. 1. 2. 0. 1. 2. 1. 2. 2.]\n", " [1. 2. 0. 0. 2. 0. 0. 2. 2. 2. 1. 2. 0. 1. 2. 1. 2. 2.]\n", " [2. 1. 2. 2. 0. 2. 2. 1. 1. 0. 2. 0. 2. 2. 1. 2. 0. 0.]\n", " [1. 2. 0. 0. 2. 0. 0. 2. 2. 2. 1. 2. 0. 1. 2. 1. 2. 2.]\n", " [1. 2. 0. 0. 2. 0. 0. 2. 2. 2. 1. 2. 0. 1. 2. 1. 2. 2.]\n", " [2. 0. 2. 2. 1. 2. 2. 0. 0. 1. 2. 1. 2. 2. 0. 2. 1. 1.]\n", " [2. 0. 2. 2. 1. 2. 2. 0. 0. 1. 2. 1. 2. 2. 0. 2. 1. 1.]\n", " [2. 1. 2. 2. 0. 2. 2. 1. 1. 0. 2. 0. 2. 2. 1. 2. 0. 0.]\n", " [0. 2. 1. 1. 2. 1. 1. 2. 2. 2. 0. 2. 1. 0. 2. 0. 2. 2.]\n", " [2. 1. 2. 2. 0. 2. 2. 1. 1. 0. 2. 0. 2. 2. 1. 2. 0. 0.]\n", " [1. 2. 0. 0. 2. 0. 0. 2. 2. 2. 1. 2. 0. 1. 2. 1. 2. 2.]\n", " [0. 2. 1. 1. 2. 1. 1. 2. 2. 2. 0. 2. 1. 0. 2. 0. 2. 2.]\n", " [2. 0. 2. 2. 1. 2. 2. 0. 0. 1. 2. 1. 2. 2. 0. 2. 1. 1.]\n", " [0. 2. 1. 1. 2. 1. 1. 2. 2. 2. 0. 2. 1. 0. 2. 0. 2. 2.]\n", " [2. 1. 2. 2. 0. 2. 2. 1. 1. 0. 2. 0. 2. 2. 1. 2. 0. 0.]\n", " [2. 1. 2. 2. 0. 2. 2. 1. 1. 0. 2. 0. 2. 2. 1. 2. 0. 0.]]\n", "[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]\n", " [ 1 0 1 1 0 1 1 0 0 0 1 0 1 1 0 1 0 0]\n", " [ 3 2 1 1 0 1 1 2 2 0 3 0 1 3 2 3 0 0]\n", " [ 3 5 6 6 4 1 1 5 2 0 7 0 1 7 2 3 4 0]\n", " [ 3 11 6 13 9 1 1 5 10 0 7 0 12 15 2 14 4 8]\n", " [ 3 11 6 13 9 1 17 5 10 16 7 0 12 15 2 14 4 8]]\n" ] } ], "source": [ "# n_agent = 16\n", "tree = get_division_tree(18)\n", "# current_level = 3\n", "# blood_distance = np.ones(shape=(n_agent,n_agent), dtype=np.float64) * np.nan\n", "# for i in range(n_agent):\n", "# for j in range(n_agent):\n", "# if i==j:blood_distance[i,j] = 0\n", "# if not np.isnan(blood_distance[i,j]):\n", "# continue\n", "# for t in range(current_level+1):\n", "# investigate_level = (current_level) - t\n", "# if tree[investigate_level, i] == tree[investigate_level, j]:\n", "# blood_distance[i,j] = t\n", "# blood_distance[j,i] = t\n", "# break\n", "\n", "\n", "blood_distance = get_blood_distance(tree, 2)\n", "print(blood_distance)\n", "print(tree)" ] }, { "cell_type": "code", "execution_count": 72, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n", " [ 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0],\n", " [ 1, 0, 3, 2, 3, 1, 1, 1, 2, 3, 0, 2, 2, 3, 0, 0],\n", " [ 1, 0, 3, 5, 7, 6, 1, 6, 2, 7, 4, 5, 2, 3, 4, 0],\n", " [12, 0, 3, 11, 15, 6, 1, 13, 10, 7, 9, 5, 2, 14, 4, 8]])" ] }, "execution_count": 72, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "[[ 0. 2. 2. nan 2. nan 2. nan nan 2. nan 2. nan nan 2. nan]\n", " [ 2. 0. 2. nan 2. nan 2. nan nan 2. nan 2. nan nan 2. nan]\n", " [ 2. 2. 0. nan 2. nan 2. nan nan 2. nan 2. nan nan 2. nan]\n", " [nan nan nan 0. nan 2. nan 2. 2. nan 2. nan 2. 2. nan 2.]\n", " [ 2. 2. 2. nan 0. nan 2. nan nan 2. nan 2. nan nan 2. nan]\n", " [nan nan nan 2. nan 0. nan 2. 2. nan 2. nan 2. 2. nan 2.]\n", " [ 2. 2. 2. nan 2. nan 0. nan nan 2. nan 2. nan nan 2. nan]\n", " [nan nan nan 2. nan 2. nan 0. 2. nan 2. nan 2. 2. nan 2.]\n", " [nan nan nan 2. nan 2. nan 2. 0. nan 2. nan 2. 2. nan 2.]\n", " [ 2. 2. 2. nan 2. nan 2. nan nan 0. nan 2. nan nan 2. nan]\n", " [nan nan nan 2. nan 2. nan 2. 2. nan 0. nan 2. 2. nan 2.]\n", " [ 2. 2. 2. nan 2. nan 2. nan nan 2. nan 0. nan nan 2. nan]\n", " [nan nan nan 2. nan 2. nan 2. 2. nan 2. nan 0. 2. nan 2.]\n", " [nan nan nan 2. nan 2. nan 2. 2. nan 2. nan 2. 0. nan 2.]\n", " [ 2. 2. 2. nan 2. nan 2. nan nan 2. nan 2. nan nan 0. nan]\n", " [nan nan nan 2. nan 2. nan 2. 2. nan 2. nan 2. 2. nan 0.]]\n", "\n", " " ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'d1': tensor([1.1618e-13, 4.3868e-16, 3.4678e-18, 0.0000e+00, 1.0000e+00, 8.3701e-14,\n", " 1.8273e-12], device='cuda:0', requires_grad=True),\n", " 'd2': tensor([0.0756, 0.2109, 0.0628, 0.0104, 0.3506, 0.1273, 0.1624],\n", " device='cuda:0', requires_grad=True)}" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import pickle\n", "with open('TEMP/wifi', 'rb') as f:\n", " p_list = pickle.load(f)\n", "\n", "p_list" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([5.0909e-02, 1.9223e-04, 1.5197e-06, 8.2279e-11, 1.1147e-01, 3.6681e-02,\n", " 8.0075e-01], dtype=torch.float64)\n", "tensor([0.0756, 0.2109, 0.0628, 0.0104, 0.3506, 0.1273, 0.1624],\n", " dtype=torch.float64)\n", "tensor(1.0829, dtype=torch.float64)\n", "tensor(2.6698, dtype=torch.float64)\n" ] } ], "source": [ "import torch\n", "import torch.nn.functional as F\n", "from torch.distributions.categorical import Categorical\n", "from torch.nn.functional import kl_div\n", "\n", "\n", "logits1 = torch.Tensor([ 4.4786, -1.1005, -5.9407, -15.7646, 5.2623, 4.1508, 7.2341]).type(torch.DoubleTensor)\n", "logits2 = torch.Tensor([ -0.5480, 0.4779, -0.7330, -2.5290, 0.9862, -0.0273, 0.2163]).type(torch.DoubleTensor)\n", "\n", "dist1 = Categorical(logits = logits1)\n", "dist2 = Categorical(logits = logits2)\n", "print(dist1.probs)\n", "print(dist2.probs)\n", "from torch.distributions import kl_divergence\n", "\n", "print(kl_divergence(dist1, dist2))\n", "print(kl_divergence(dist2, dist1))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([1.1618e-13, 4.3868e-16, 3.4678e-18, 0.0000e+00, 1.0000e+00, 8.3701e-14,\n", " 1.8273e-12], device='cuda:0', requires_grad=True)" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# probs = torch.Tensor(\n", "# [0, 1])\n", "# probs2 = torch.Tensor(\n", "# [0.3, 0.7])\n", "\n", "# probs = p_list['d1']\n", "# probs2 = p_list['d2']\n", "\n", "# print(kl_div(probs.log(), probs2))\n", "# print(kl_div(probs2.log(), probs))\n", "\n", "# probs = torch.Tensor(\n", "# [0, 1])\n", "# probs2 = torch.Tensor(\n", "# [0.3, 0.7])\n", "\n", "# probs = p_list['d1']\n", "# probs2 = p_list['d2']\n", "\n", "# print(kl_div(probs.log(), probs2))\n", "# print(kl_div(probs2.log(), probs))\n", "\n", "# probs = torch.Tensor(\n", "# [0, 1])\n", "# probs2 = torch.Tensor(\n", "# [0.3, 0.7])\n", "\n", "# probs = p_list['d1']\n", "# probs2 = p_list['d2']\n", "\n", "# print(kl_div(probs.log(), probs2))\n", "# print(kl_div(probs2.log(), probs))\n", "\n", "# probs = torch.Tensor(\n", "# [0, 1])\n", "# probs2 = torch.Tensor(\n", "# [0.3, 0.7])\n", "\n", "# probs = p_list['d1']\n", "# probs2 = p_list['d2']\n", "\n", "# print(kl_div(probs.log(), probs2))\n", "# print(kl_div(probs2.log(), probs))" ] }, { "cell_type": "code", "execution_count": 64, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[[ 0., 1., 2.],\n", " [ 3., 4., 5.]],\n", "\n", " [[ 6., 7., 8.],\n", " [ 9., 10., 11.]],\n", "\n", " [[12., 13., 14.],\n", " [15., 16., 17.]]])\n", "tensor([[[ 0., 0., 0.],\n", " [ 3., 4., 5.]],\n", "\n", " [[ 6., 7., 8.],\n", " [ 0., 0., 0.]],\n", "\n", " [[ 0., 0., 0.],\n", " [15., 16., 17.]]])\n" ] } ], "source": [ "import torch\n", "import numpy as np\n", "from UTIL.tensor_ops import gather_righthand, repeat_at\n", "def scatter_righthand(scatter_into, src, index, check=True):\n", " index = index.long()\n", " i_dim = index.dim()\n", " s_dim = src.dim()\n", " t_dim = i_dim - 1\n", " index_new_shape = list(src.shape)\n", " index_new_shape[t_dim] = index.shape[t_dim]\n", " for _ in range(i_dim, s_dim):\n", " index = index.unsqueeze(-1)\n", " index_expand = index.expand(index_new_shape) # only this two line matters\n", " return scatter_into.scatter(t_dim, index_expand, src)\n", "\n", "orig = torch.Tensor([[[ 0, 1, 2], [ 3, 4, 5]],\n", " [[ 6, 7, 8], [ 9, 10, 11]],\n", " [[12, 13, 14], [15, 16, 17]]])\n", "index = torch.Tensor([[0], [1], [0]])\n", "print(orig)\n", "\n", "res = gather_righthand(src=orig, index=index)\n", "res[:] = 0\n", "\n", "orig_fix = orig.clone().detach()\n", "orig_fix = scatter_righthand(orig_fix, src=res, index=index)\n", "print(orig_fix)\n", "\n", "\n", "# orig.scatter(dim=1,index=index.long(),src=res)" ] }, { "cell_type": "code", "execution_count": 71, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "s1\n", "s1\n", "s2\n", "s2\n", "torch.Size([64, 16, 8, 88, 888])\n" ] } ], "source": [ "index = torch.randint(high=16,size=(64,5))\n", "print('s1')\n", "res = gather_righthand(src,index)\n", "print('s1')\n", "res[:] = 0\n", "print('s2')\n", "resX = scatter_righthand(scatter_into=src, src=res, index=index)\n", "print('s2')\n", "print(resX.S)\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "x2 tensor([[101., 101., 101., 101., 100., 100., 100.],\n", " [202., 202., 202., 200., 201., 200., 200.],\n", " [303., 303., 303., 300., 300., 301., 300.],\n", " [404., 404., 404., 400., 400., 400., 401.]])\n", "perm_index tensor([2, 1, 3, 0])\n", "confact_x2 tensor([[401., 401., 401., 400., 400., 400., 401.],\n", " [202., 202., 202., 200., 201., 200., 200.],\n", " [103., 103., 103., 101., 100., 100., 100.],\n", " [304., 304., 304., 300., 300., 301., 300.]])\n" ] } ], "source": [ "import torch\n", "import numpy as np\n", "from UTIL.tensor_ops import gather_righthand, repeat_at\n", "from UTIL.tensor_ops import add_onehot_id_at_last_dim, repeat_at, _2tensor, gather_righthand, scatter_righthand\n", "x_in = torch.Tensor([\n", " [1,1,1],\n", " [2,2,2],\n", " [3,3,3],\n", " [4,4,4],\n", "])\n", "n_agent = 4\n", "\n", "\n", "nets = [\n", " lambda x: x+100,\n", " lambda x: x+200,\n", " lambda x: x+300,\n", " lambda x: x+400\n", "]\n", "\n", "\n", "x0 = add_onehot_id_at_last_dim(x_in)\n", "# x1 = self.shared_net(x0)\n", "res = []\n", "for i in range(n_agent):\n", " res.append(nets[i](x0[..., i, :]))\n", "x2 = torch.stack(res, -2)\n", "# x22 = self.nets[0](x1)\n", "print('x2',x2)\n", "\n", "######### forward twice: shuffle forward\n", "perm_index = torch.randperm(n_agent, device=x_in.device) # shape = (n_agent)\n", "print('perm_index',perm_index)\n", "\n", "perm_index = perm_index.expand(x_in.shape[:-1]) # shape = (...?, n_agent)\n", "# x_in shape = (...?, n_agent, coredim)\n", "perm_x_in = gather_righthand(src=x_in, index=perm_index, check=False)\n", "perm_x0 = add_onehot_id_at_last_dim(perm_x_in)\n", "perm_res = []\n", "for i in range(n_agent):\n", " perm_res.append(nets[i](perm_x0[..., i, :]))\n", "perm_x2 = torch.stack(perm_res, -2)\n", "\n", "# 103\n", "# 202\n", "# 304\n", "# 401\n", "\n", "\n", "# 401 vs 101\n", "# 202 vs 202\n", "# 103 vs 303\n", "# 304 vs 404\n", "\n", "confact_x2 = torch.zeros_like(perm_x2) + np.nan\n", "confact_x2 = scatter_righthand(scatter_into=confact_x2, src=perm_x2, index=perm_index)\n", "print('confact_x2',confact_x2)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([False, False, False, False, True])\n", "tensor([ True, False, True, False, False])\n" ] } ], "source": [ "import torch\n", "\n", "res=torch.isnan(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))\n", "print(res)\n", "\n", "res=torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))\n", "print(res)\n", "\n", "\n", "\n", "\n", "\n" ] } ], "metadata": { "interpreter": { "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" }, "kernelspec": { "display_name": "Python 3.8.10 64-bit", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" }, "orig_nbformat": 4, "vscode": { "interpreter": { "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" } } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: PythonExample/hmp_minimal_modules/cradle.py ================================================ from UTIL.network import UnixTcpClientP2P, UnixTcpServerP2P import time ================================================ FILE: PythonExample/hmp_minimal_modules/escape.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "random-escape",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "mt_act_order": "new_method", "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [ 8, 4 ], "MaxEpisodeStep": 100, "StepGameTime": 0.5, "StateProvided": false, "render": true, // note: random seed has different impact on renderer and server "UElink2editor": true, "HeteAgents": false, "UnrealLevel": "UhmapAttackPost", "SubTaskSelection": "UhmapEscape", "UhmapVersion":"3.8", "UhmapRenderExe": "../../Build/WindowsNoEditor/UHMP.exe", "UhmapServerExe": "../../Build/WindowsServer/UHMPServer.exe", // "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxNoEditor/UHMP.sh", // "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.script_ai.a_escape->EscapeGreenPreprogramBaseline", // "ALGORITHM.random.foundation->RandomControllerWithActionSetV2", "ALGORITHM.script_ai.a_escape->EscapeRedPreprogramBaseline", ] }, "ALGORITHM.script_ai.a_escape.py->AlgorithmConfig": { }, } ================================================ FILE: PythonExample/hmp_minimal_modules/formation.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "random-attackpost",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "mt_act_order": "new_method", "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [ 20, 20 ], // 10 ships, 2 waterdrops "MaxEpisodeStep": 100, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": true, "HeteAgents": false, "UnrealLevel": "UhmapFormation", "SubTaskSelection": "UhmapFormation", "UhmapVersion":"3.8", "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxNoEditor/UHMP.sh", "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxServer/UHMPServer.sh", "TimeDilation": 64, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.random.foundation->RandomControllerWithMomentumAgent", // "ALGORITHM.random.foundation->RandomControllerWithActionSetV2", "TEMP.TEAM2.ALGORITHM.random.foundation->RandomControllerWithMomentumAgent" ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.random.foundation.py->AlgorithmConfig": { }, // --- Part3: config ALGORITHM 2/2 --- "TEMP.TEAM2.ALGORITHM.random.foundation.py->AlgorithmConfig": { } } ================================================ FILE: PythonExample/hmp_minimal_modules/main.py ================================================ # This Python file uses the following encoding: utf-8 """ Author: Fu Qingxu,CASIA Description: Enterance for everything in HMP In this file you can find: 1.Config-Parsing; 2.Multiprocess-Initilization 3.GPU-Selection; 4.Seed-Setting If you are interested in something, you may continue to read: Handling parallel environment --> task_runner.py & shm_env.py Link between teams and diverse algorithms --> multi_team.py Adding new env --> MISSION.env_router.py Adding algorithm --> ALGORITHM.example_foundation.py Configuring by writing py files --> config.py Configuring by json --> xx.json colorful printing --> colorful.py auto pip deployer --> pip_find_missing.py efficient parallel execting --> shm_pool.pyx auto gpu selection --> auto_gpu.py hmap logging/plotting bridge --> mcom.py & mcom_rec.py experiment batch executor --> mprofile.py """ import os, atexit, platform def SET_NUM_THREADS(internal_threads): os.environ['NUM_THREADS'] = str(internal_threads) os.environ['OPENBLAS_NUM_THREADS'] = str(internal_threads) os.environ['MKL_NUM_THREADS'] = str(internal_threads) os.environ['OMP_NUM_THREADS'] = str(internal_threads) SET_NUM_THREADS(1) # do NOT edit this func def pytorch_gpu_init(cfg): import torch from UTIL.auto_gpu import sel_gpu torch.set_num_threads(int(os.environ['NUM_THREADS'])) seed = cfg.seed; device = cfg.device torch.manual_seed(seed) torch.set_printoptions(precision=4, sci_mode=False) # e.g. device='cpu' os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" if not 'cuda' in device: return if device == 'cuda': gpu_index = sel_gpu().auto_choice() else: # e.g. device='cuda:0' gpu_index = int(device.split(':')[-1]) cfg.manual_gpu_ctl = True if cfg.gpu_fraction!=1: torch.cuda.set_per_process_memory_fraction(cfg.gpu_fraction, gpu_index) os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_index) cfg.device = 'cuda' # remove ':x', the selected gpu is cuda:0 from now on torch.cuda.manual_seed(seed) if cfg.use_float64: torch.set_default_dtype(torch.float64) def register_daemon(cfg): from UTIL.hmp_daemon import start_periodic_daemon start_periodic_daemon(cfg) if __name__ == '__main__': import numpy import pyximport; pyximport.install(build_dir='./TEMP/build/', inplace=True, language_level=3, setup_args={'include_dirs': numpy.get_include()}) from UTIL.colorful import * from UTIL.config_args import prepare_args from UTIL.shm_pool import SmartPool cfg = prepare_args() register_daemon(cfg) # Set numpy seed numpy.random.seed(cfg.seed) numpy.set_printoptions(3, suppress=True) # Get mem-sharing process pool assert cfg.num_threads % cfg.fold == 0, ('Use n process to run n*m parallel threads!') smart_pool = SmartPool(fold=cfg.fold, proc_num=cfg.num_threads // cfg.fold, base_seed=cfg.seed) atexit.register(smart_pool.party_over) # failsafe, handles shm leak # Pytorch has to be init AFTER the process pool starts, set pytorch seed pytorch_gpu_init(cfg=cfg) # Prepare everything else from task_runner import Runner runner = Runner(process_pool=smart_pool) # GO! GO! GO! runner.run() # DONE! print绿('--- All jobs finished ---') smart_pool.party_over() elif platform.system()!="Linux": # Linux uses fork for multi-processing, but Windows does not, reload config for Windows from UTIL.config_args import prepare_args cfg = prepare_args(vb=False) ================================================ FILE: PythonExample/hmp_minimal_modules/multi_server.py ================================================ base = """ { "config.py->GlobalConfig": { "note": "sc-MMM2-conc-db1", // experiment note, also means the log saving directory // "train_time_testing": "False", // do not manage train time testing, pymarl env manage the testing itself "env_name":"sc2", // starcraft 2 "env_path":"MISSION.starcraft.sc2_env_wrapper", // starcraft 2 // "interested_agent_num":100, // only for reward logging, **not needed because sc2 use uniform team reward "draw_mode": "Img", // plot curlves as image "num_threads": "16", // number of parallel envs "report_reward_interval": "128", // report the reward averaging x episodes "test_interval": "512", // begin a test run every x episodes, test run is managed by pymarl side "test_epoch": "64", // begin a test run every x episodes, test run is managed by pymarl side "device": "cuda", "max_n_episode": 1500000, "fold": "1", // each linux process handle x parallel envs "backup_files":[ ] }, "MISSION.starcraft.sc2_env_wrapper.py->ScenarioConfig": { "map_": "MMM2", "sc_version": "2.4.6", // "map_": "5m_vs_6m", // "SINGLE_TEAM_N_AGENT": 5, // "episode_limit": 60, // "reward_vec": true, "TEAM_NAMES": [ "ALGORITHM.conc_4hist_scdb.foundation->ReinforceAlgorithmFoundation" ] }, "ALGORITHM.conc_4hist_scdb.foundation.py->AlgorithmConfig": { "train_traj_needed": "128", "n_focus_on": 3, "actor_attn_mod": "False", "lr": 0.0001, "ppo_epoch": 24, "load_checkpoint": "False" } } """ import commentjson as json import numpy as np base_conf = json.loads(base) n_run = 4 n_run_mode = [ { "addr": "localhost:2266", "usr": "hmp", "pwd": "hmp" }, ]*n_run assert len(n_run_mode)==n_run sum_note = "MMM2-conc4hist" conf_override = { "config.py->GlobalConfig-->seed": [ np.random.randint(0, 10000) for _ in range(n_run) ], "config.py->GlobalConfig-->device": [ 'cuda', 'cuda', 'cuda', 'cuda', ], "config.py->GlobalConfig-->note": [ "n_focus_on_run1_3focus", "n_focus_on_run2_3focus", "n_focus_on_run1_5focus", "n_focus_on_run2_5focus", ], "ALGORITHM.conc_4hist_scdb.foundation.py->AlgorithmConfig-->n_focus_on": [ 3, 3, 5, 5, ], } if __name__ == '__main__': # copy the experiments import shutil, os shutil.copyfile(__file__, os.path.join(os.path.dirname(__file__), 'batch_experiment_backup.py')) # run experiments remotely from UTIL.batch_exp import run_batch_exp run_batch_exp(sum_note, n_run, n_run_mode, base_conf, conf_override, __file__) ================================================ FILE: PythonExample/hmp_minimal_modules/multi_team.py ================================================ import numpy as np import importlib from UTIL.data_struct import UniqueList class MMPlatform(object): def __init__(self, mcv, envs): from config import GlobalConfig self.n_t = GlobalConfig.ScenarioConfig.N_TEAM # n_t => n_teams n_agents_each_t = GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM # n_agents_each_t => n_agents_each_team self.t_member_list = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM self.t_name = GlobalConfig.ScenarioConfig.TEAM_NAMES assert self.n_t == len(self.t_name), 'Team does not match agent id' # check N_TEAM assert self.n_t == len(UniqueList(self.t_name)), 'Team name must not repeat' # please duplicate algorithm if needed self.align_episode = GlobalConfig.align_episode self.n_thread = GlobalConfig.num_threads self.legacy_act_order = True if GlobalConfig.mt_act_order == 'new_method': self.legacy_act_order = False self.RewardAsUnity = False # env give reward of each team instead of agent if hasattr(GlobalConfig.ScenarioConfig, 'RewardAsUnity'): self.RewardAsUnity = GlobalConfig.ScenarioConfig.RewardAsUnity self.ActAsUnity = False if hasattr(GlobalConfig.ScenarioConfig, 'ActAsUnity'): self.ActAsUnity = GlobalConfig.ScenarioConfig.ActAsUnity self.ObsAsUnity = False if hasattr(GlobalConfig.ScenarioConfig, 'ObsAsUnity'): self.ObsAsUnity = GlobalConfig.ScenarioConfig.ObsAsUnity space = envs.get_space() # get observation space and action space self.algo_foundations = [] # import and initialize algorithms for t in range(self.n_t): assert len(self.t_member_list[t]) == n_agents_each_t[t] assert '->' in self.t_name[t] module_, class_ = self.t_name[t].split('->') init_f = getattr(importlib.import_module(module_), class_) self.algo_foundations.append( init_f(n_agent=n_agents_each_t[t], n_thread=self.n_thread, space=space, mcv=mcv, team=t) ) pass def act(self, runner_info): actions_list = [] for t_name, t_members, algo_fdn, t_index in zip(self.t_name, self.t_member_list, self.algo_foundations, range(self.n_t)): # split intel such as reward and observation into different teams _t_intel_ = self._split_intel(runner_info, t_members, t_name, t_index) # each team (controlled by different algorithms) interacts with env and act _act_, _t_intel_ = algo_fdn.interact_with_env(_t_intel_) # concat actions of each agent ('_act_' --> 'actions_list') actions_list = self._append_act_to_list(_act_, actions_list, t_members) # loop back internal states registered in _t_intel_ (e.g._division_obs_) if _t_intel_ is None: continue # process internal states loop back, featured with keys that startswith and endswith '_' for key in _t_intel_: if key.startswith('_') and key.endswith('_'): self._update_runner(runner_info, runner_info['ENV-PAUSE'], t_name, key, _t_intel_[key]) pass # swapaxes: [n_agent(n_teams if ActAsUnity), n_thread] --> [n_thread, $n_agent(n_teams if ActAsUnity)] actions_list = np.swapaxes(np.array(actions_list, dtype=np.double), 0, 1) # in align_episode mod, threads that are paused are forced to give NaN action ENV_PAUSE = runner_info['ENV-PAUSE'] if ENV_PAUSE.any() and self.align_episode: actions_list[ENV_PAUSE,:] = np.nan return actions_list, runner_info def before_terminate(self, runner_info): for t_name, t_members, t_index in zip(self.t_name, self.t_member_list, range(self.n_t)): # split info such as reward and observation self._split_intel(runner_info, t_members, t_name, t_index) def _update_runner(self, runner_info, ENV_PAUSE, t_name, key, content): u_key = t_name+key if (u_key in runner_info) and hasattr(content, '__len__') and \ len(content)==self.n_thread and ENV_PAUSE.any(): runner_info[u_key][~ENV_PAUSE] = content[~ENV_PAUSE] return runner_info[u_key] = content return # seperate observation between teams def _split_intel(self, runner_info, t_members, t_name, t_index): # RUNNING = ~runner_info['ENV-PAUSE'] # Team_Info and ter_obs_echo are None when runner_info['Latest-Team-Info'] is absent Team_Info = None ter_obs_echo = None # load Team_Info and ter_obs_echo if runner_info['Latest-Team-Info'] is not None: assert isinstance(runner_info['Latest-Team-Info'][0], dict) Team_Info = runner_info['Latest-Team-Info'] # if a env just ended ('Env-Suffered-Reset'), the final step obs can be acquired here ter_obs_echo = np.array([self.__split_obs_thread(Team_Info[thread_idx]['obs-echo'], t_index) if done and ('obs-echo' in Team_Info[thread_idx]) else None for thread_idx, done in enumerate(runner_info['Env-Suffered-Reset'])], dtype=object) o = self.__split_obs(runner_info['Latest-Obs'], t_index) reward = runner_info['Latest-Reward'] # summary t_intel_basic = { 'Team_Name': t_name, 'Latest-Obs': o, 'Latest-Team-Info': Team_Info, 'Env-Suffered-Reset': runner_info['Env-Suffered-Reset'], 'Terminal-Obs-Echo': ter_obs_echo, 'ENV-PAUSE': runner_info['ENV-PAUSE'], 'Test-Flag': runner_info['Test-Flag'], 'Latest-Reward': reward[:, t_members] if not self.RewardAsUnity else reward[:, t_index], 'Current-Obs-Step': runner_info['Current-Obs-Step'] } for key in runner_info: if not (t_name in key): continue # otherwise t_name in key s_key = key.replace(t_name, '') t_intel_basic[s_key] = runner_info[key] if (s_key != '_hook_'): continue # otherwise deal with _hook_ if t_intel_basic['_hook_'] is not None: self.deal_with_hook(t_intel_basic['_hook_'], t_intel_basic) runner_info[key] = None t_intel_basic['_hook_'] = None # remove _hook_ key t_intel_basic.pop('_hook_') # t_intel_basic = self.filter_running(t_intel_basic, RUNNING) return t_intel_basic def _append_act_to_list(self, _act_, actions_list, t_members): if not self.legacy_act_order: _act_ = np.swapaxes(_act_, 0, 1) assert _act_.shape[0]==len(t_members), ('number of actions differs number of agents!') append_op = actions_list.append if self.ActAsUnity else actions_list.extend append_op(_act_) return actions_list def deal_with_hook(self, hook, t_intel_basic): # use the hook left by algorithm to callback some function # to deliver reward and reset signals # assert self.L_RUNNING is not None # t_intel_basic = self.filter_running(t_intel_basic, self.L_RUNNING) hook({ 'reward':t_intel_basic['Latest-Reward'], 'done': t_intel_basic['Env-Suffered-Reset'], 'info': t_intel_basic['Latest-Team-Info'], 'Latest-Obs':t_intel_basic['Latest-Obs'], 'Terminal-Obs-Echo': t_intel_basic['Terminal-Obs-Echo'], }) def notify_teams(self, message, **kargs): for t_index, algo_fdn in enumerate(self.algo_foundations): if (not hasattr(algo_fdn, 'on_notify')) or (not callable(algo_fdn.on_notify)): continue team_kargs = {k:v[t_index] for k,v in kargs.items()} algo_fdn.on_notify(message, **team_kargs) def __split_obs(self, obs, t_index): # obs [n_thread, n_team/n_agent, coredim] if obs[0] is None: o = None elif self.ObsAsUnity: o = obs[:, t_index] else: # in most cases o = obs[:, self.t_member_list[t_index]] return o def __split_obs_thread(self, obs, t_index): # obs [n_thread, n_team/n_agent, coredim] if self.ObsAsUnity: o = obs[t_index] else: # in most cases o = obs[self.t_member_list[t_index]] return o ================================================ FILE: PythonExample/hmp_minimal_modules/multi_team_parallel.py ================================================ import numpy as np import importlib, copy from UTIL.data_struct import UniqueList from UTIL.shm_pool import SmartPool class alg_parallel_wrapper(object): def __init__(self, t_name, n_agent, n_thread, space, mcv, team) -> None: self.team = team if mcv is None: mcv = self.init_alg_logger() module_, class_ = t_name.split('->') init_f = getattr(importlib.import_module(module_), class_) self.alg = init_f(n_agent, n_thread, space, mcv, team) self._hook_deligate_ = None def interact_with_env(self, _input_): _act_, _t_intel_ = self.alg.interact_with_env(_input_) for k in list(_t_intel_.keys()): if not k.startswith('_'): _t_intel_.pop(k) # _act_.shape=(n_thread, n_agent, action_dim) if '_hook_' in _t_intel_ and _t_intel_['_hook_'] is not None: self._hook_deligate_ = _t_intel_.pop('_hook_') _t_intel_['_hook_'] = 'call_hook_deligate' return _act_, _t_intel_ def call_hook_deligate(self, callback_arg): assert self._hook_deligate_ is not None self._hook_deligate_(callback_arg) self._hook_deligate_ = None # -- you may delete it or replace it with Tensorboard -- def init_alg_logger(self): from config import GlobalConfig as cfg from VISUALIZE.mcom import mcom logdir = cfg.logdir if cfg.activate_logger: mcv = mcom( path=f'{logdir}/logger/{self.team}/', image_path=f'{logdir}/team-{self.team}.jpg', rapid_flush=True, draw_mode=cfg.draw_mode, tag='[multi_team_parallel.py]', resume_mod=cfg.resume_mod) mcv.rec_init(color='k') return mcv class MMPlatform(object): def __init__(self, mcv, envs): from config import GlobalConfig self.n_t = GlobalConfig.ScenarioConfig.N_TEAM # n_t => n_teams n_agents_each_t = GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM # n_agents_each_t => n_agents_each_team self.t_member_list = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM self.t_name = GlobalConfig.ScenarioConfig.TEAM_NAMES assert self.n_t == len(self.t_name), 'Team does not match agent id' # check N_TEAM assert self.n_t == len(UniqueList(self.t_name)), 'Team name must not repeat' # please duplicate algorithm if needed self.align_episode = GlobalConfig.align_episode self.n_thread = GlobalConfig.num_threads self.legacy_act_order = True if GlobalConfig.mt_act_order == 'new_method': self.legacy_act_order = False self.RewardAsUnity = False # env give reward of each team instead of agent if hasattr(GlobalConfig.ScenarioConfig, 'RewardAsUnity'): self.RewardAsUnity = GlobalConfig.ScenarioConfig.RewardAsUnity self.ActAsUnity = False if hasattr(GlobalConfig.ScenarioConfig, 'ActAsUnity'): self.ActAsUnity = GlobalConfig.ScenarioConfig.ActAsUnity self.ObsAsUnity = False if hasattr(GlobalConfig.ScenarioConfig, 'ObsAsUnity'): self.ObsAsUnity = GlobalConfig.ScenarioConfig.ObsAsUnity space = envs.get_space() # get observation space and action space arg_list = [] self.algo_foundations = [] # import and initialize algorithms for t in range(self.n_t): assert len(self.t_member_list[t]) == n_agents_each_t[t] assert '->' in self.t_name[t] arg_list.append(( self.t_name[t], # 't_name' n_agents_each_t[t], # 'n_agent' self.n_thread, # 'n_thread' space, # 'space' None, # 'mcv' t, # 'team' )) print('[multi_team_parallel] distributing algorithm to independent process') self.alg_parallel_exe = SmartPool(fold=1, proc_num=self.n_t, base_seed=GlobalConfig.seed) self.alg_parallel_exe.add_target( name='alg_parallel_exe', lam=alg_parallel_wrapper, args_list=arg_list ) print('[multi_team_parallel] distribution is done') pass def act(self, runner_info): actions_list = [] _t_intel_feed_list_ = [] for t_name, t_members, t_index in zip(self.t_name, self.t_member_list, range(self.n_t)): # split intel such as reward and observation into different teams _t_intel_ = self._split_intel(runner_info, t_members, t_name, t_index) _t_intel_feed_list_.append(_t_intel_) results = self.alg_parallel_exe.exec_target(name='alg_parallel_exe', dowhat='interact_with_env', args_list=_t_intel_feed_list_, ensure_safe=True) # each team (controlled by different algorithms) interacts with env and act # _act_, _t_intel_ = algo_fdn.interact_with_env(_t_intel_) _act_mt_, _t_intel_mt_ = zip(*results) for t_name, t_members, _act_, _t_intel_, t_index in zip(self.t_name, self.t_member_list, _act_mt_, _t_intel_mt_, range(self.n_t)): # concat actions of each agent ('_act_' --> 'actions_list') actions_list = self._append_act_to_list(_act_, actions_list, t_members) # loop back internal states registered in _t_intel_ (e.g._division_obs_) if _t_intel_ is None: continue # process internal states loop back, featured with keys that startswith and endswith '_' for key in _t_intel_: if key.startswith('_') and key.endswith('_'): self._update_runner(runner_info, runner_info['ENV-PAUSE'], t_name, key, _t_intel_[key]) pass # swapaxes: [n_agent(n_teams if ActAsUnity), n_thread] --> [n_thread, $n_agent(n_teams if ActAsUnity)] actions_list = np.swapaxes(np.array(actions_list, dtype=np.double), 0, 1) # in align_episode mod, threads that are paused are forced to give NaN action ENV_PAUSE = runner_info['ENV-PAUSE'] if ENV_PAUSE.any() and self.align_episode: actions_list[ENV_PAUSE,:] = np.nan return actions_list, runner_info def before_terminate(self, runner_info): for t_name, t_members, t_index in zip(self.t_name, self.t_member_list, range(self.n_t)): # split info such as reward and observation self._split_intel(runner_info, t_members, t_name, t_index) def _update_runner(self, runner_info, ENV_PAUSE, t_name, key, content): u_key = t_name+key if (u_key in runner_info) and hasattr(content, '__len__') and \ len(content)==self.n_thread and ENV_PAUSE.any(): runner_info[u_key][~ENV_PAUSE] = content[~ENV_PAUSE] return runner_info[u_key] = content return # seperate observation between teams def _split_intel(self, runner_info, t_members, t_name, t_index): # RUNNING = ~runner_info['ENV-PAUSE'] # Team_Info and ter_obs_echo are None when runner_info['Latest-Team-Info'] is absent Team_Info = None ter_obs_echo = None # load Team_Info and ter_obs_echo if runner_info['Latest-Team-Info'] is not None: assert isinstance(runner_info['Latest-Team-Info'][0], dict) Team_Info = runner_info['Latest-Team-Info'] # if a env just ended ('Env-Suffered-Reset'), the final step obs can be acquired here ter_obs_echo = np.array([None for _ in range(self.n_thread)], dtype=object) for thread_idx, done in enumerate(runner_info['Env-Suffered-Reset']): if done and ('obs-echo' in Team_Info[thread_idx]): ter_obs_echo[thread_idx] = self.__split_obs_thread(Team_Info[thread_idx]['obs-echo'], t_index) Team_Info_Downstream = copy.deepcopy(Team_Info) for i in range(len(Team_Info_Downstream)): if 'obs-echo' in Team_Info_Downstream[i]: Team_Info_Downstream[i].pop('obs-echo') o = self.__split_obs(runner_info['Latest-Obs'], t_index) reward = runner_info['Latest-Reward'] # summary t_intel_basic = { 'Team_Name': t_name, 'Latest-Obs': o, 'Latest-Team-Info': Team_Info_Downstream, 'Env-Suffered-Reset': runner_info['Env-Suffered-Reset'], 'Terminal-Obs-Echo': ter_obs_echo, 'ENV-PAUSE': runner_info['ENV-PAUSE'], 'Test-Flag': runner_info['Test-Flag'], 'Latest-Reward': reward[:, t_members] if not self.RewardAsUnity else reward[:, t_index], 'Current-Obs-Step': runner_info['Current-Obs-Step'] } # deal with algorithm callback key = f'{t_name}_hook_' if (key in runner_info) and (runner_info[key] is not None): t_intel_basic['_hook_'] = runner_info[key] self.deal_with_hook(t_intel_basic['_hook_'], t_intel_basic, t_index) runner_info[key] = None t_intel_basic['_hook_'] = None # remove _hook_ key t_intel_basic.pop('_hook_') # t_intel_basic = self.filter_running(t_intel_basic, RUNNING) return t_intel_basic def _append_act_to_list(self, _act_, actions_list, t_members): if not self.legacy_act_order: _act_ = np.swapaxes(_act_, 0, 1) assert _act_.shape[0]==len(t_members), ('number of actions differs number of agents!') append_op = actions_list.append if self.ActAsUnity else actions_list.extend append_op(_act_) return actions_list def deal_with_hook(self, hook, t_intel_basic, t_index): # use the hook left by algorithm to callback some function # to deliver reward and reset signals # assert self.L_RUNNING is not None # t_intel_basic = self.filter_running(t_intel_basic, self.L_RUNNING) arg = { 'reward':t_intel_basic['Latest-Reward'], 'done': t_intel_basic['Env-Suffered-Reset'], 'info': t_intel_basic['Latest-Team-Info'], 'Latest-Obs':t_intel_basic['Latest-Obs'], 'Terminal-Obs-Echo': t_intel_basic['Terminal-Obs-Echo'], } if hook == 'call_hook_deligate': # name, dowhat, args_list index_list self.alg_parallel_exe.exec_target( name='alg_parallel_exe', dowhat='call_hook_deligate', args_list=[arg], index_list=[t_index], ensure_safe=True ) else: hook(arg) def notify_teams(self, message, **kargs): for algo_fdn in self.algo_foundations: if (not hasattr(algo_fdn, 'on_notify')) or (not callable(algo_fdn.on_notify)): continue algo_fdn.on_notify(message, **kargs) def __split_obs(self, obs, t_index): # obs [n_thread, n_team/n_agent, coredim] if obs[0] is None: o = None elif self.ObsAsUnity: o = obs[:, t_index] else: # in most cases o = obs[:, self.t_member_list[t_index]] return o def __split_obs_thread(self, obs, t_index): # obs [n_thread, n_team/n_agent, coredim] if self.ObsAsUnity: o = obs[t_index] else: # in most cases o = obs[self.t_member_list[t_index]] return o ================================================ FILE: PythonExample/hmp_minimal_modules/reproduce.jsonc ================================================ { // --- Part1: config HMP core --- "config.py->GlobalConfig": { "note": "random-reproduce",// http://localhost:59547 "env_name": "uhmap", "env_path": "MISSION.uhmap", // "heartbeat_on": "False", "draw_mode": "Img", "num_threads": 1, // 请预留 num_threads * 1 GB 的内存空间 "report_reward_interval": 128, "test_interval": 1280, "mt_act_order": "new_method", "test_epoch": 512, "interested_team": 0, "seed": 10098, "device": "cpu", "max_n_episode": 5000000, "fold": 1, "backup_files": [ "MISSION/uhmap" ] }, // --- Part2: config MISSION --- "MISSION.uhmap.uhmap_env_wrapper.py->ScenarioConfig": { "N_AGENT_EACH_TEAM": [ 200 ], // 8 agent, single team "MaxEpisodeStep": 50, "StepGameTime": 0.5, "StateProvided": false, "render": false, // note: random seed has different impact on renderer and server "UElink2editor": false, "HeteAgents": false, "UnrealLevel": "UhmapReproduce", "SubTaskSelection": "UhmapReproduce", "UhmapVersion":"3.8", "UhmapRenderExe": "../../Build/WindowsNoEditor/UHMP.exe", "UhmapServerExe": "../../Build/WindowsServer/UHMPServer.exe", // "UhmapRenderExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxNoEditor/UHMP.sh", // "UhmapServerExe": "/home/hmp/UnrealHmapBinary/Version3.8/LinuxServer/UHMPServer.sh", "TimeDilation": 1, // simulation time speed up, larger is faster "TEAM_NAMES": [ "ALGORITHM.script_ai.a_test_reproduce->TestReproduce", ] }, // --- Part3: config ALGORITHM 1/2 --- "ALGORITHM.script_ai.a_test_reproduce.py->AlgorithmConfig": { }, } ================================================ FILE: PythonExample/hmp_minimal_modules/task_runner.py ================================================ """ Author: Fu Qingxu,CASIA Description: HMP task runner, coordinates environments and algorithms Notes before you read code: In general, HMP task runner can operate two ways: self.align_episode = False: threads immediately restart at terminal state, threads do not wait each other self.align_episode = True: threads pause at terminal state, waiting until all threads terminate, then reset """ import time, os import numpy as np from UTIL.colorful import * from UTIL.exp_helper import upload_exp from config import GlobalConfig as cfg from MISSION.env_router import make_parallel_envs class Runner(object): def __init__(self, process_pool): self.process_pool = process_pool self.envs = make_parallel_envs(process_pool) # parallel environments start self.mcv = self.get_a_logger(cfg.note) # multiagent silent logging bridge active if cfg.mt_parallel: from multi_team_parallel import MMPlatform # parallel the decision process else: from multi_team import MMPlatform self.platform_controller = MMPlatform(self.mcv, self.envs) # block infomation access between teams self.info_runner = {} # dict of realtime obs, reward, reward, info et.al. self.n_agent = sum(cfg.ScenarioConfig.N_AGENT_EACH_TEAM) self.n_team = len(cfg.ScenarioConfig.N_AGENT_EACH_TEAM) # please specify: env gives reward of each team instead of agent ? self.RewardAsUnity = False if hasattr(cfg.ScenarioConfig, 'RewardAsUnity'): self.RewardAsUnity = cfg.ScenarioConfig.RewardAsUnity # let test env sleep (when not used) to save memory ? self.test_env_sleepy = False if hasattr(cfg.ScenarioConfig, 'CanTurnOff'): self.test_env_sleepy = cfg.ScenarioConfig.CanTurnOff self.n_thread = cfg.num_threads self.n_frame = cfg.n_parallel_frame self.note = cfg.note # experiment note self.hb_on = cfg.heartbeat_on and stdout.isatty() # show the environment stepping heartbeat self.current_n_frame = 0 self.current_n_episode = 0 self.max_n_episode = cfg.max_n_episode # Reward monitoring for agents of your interest self.train_time_testing = cfg.train_time_testing self.test_interval = cfg.test_interval self.test_only = cfg.test_only self.align_episode = cfg.align_episode self._exit_early_ = False self._init_interested_agent_logging() # ------------------------------------------------------------------------- # ------------------------------ Major Loop ------------------------------- # ------------------------------------------------------------------------- def run(self): # all item in self.info_runner: shape =(n_thread, n_agent/n_team, ...) self.init_runner() # test machine performance tic = time.time() # start simulation for cnt in range(self.n_frame): # line 1: get action, block infomation access between teams (LINK to ARGORITHM) # (The controller can also handle algorithm internal state loopback by following simple rules) actions_list, self.info_runner = self.platform_controller.act(self.info_runner) # line 2: multi-thread environment step (LINK to MISSION) # (When thread align is needed, NaN actions will be used to make envs freeze for a step) obs, reward, done, info = self.envs.step(actions_list) # line 3: prepare obs and reward for next round # (If required, a test run will be started at proper time) self.info_runner = self.update_runner(done, obs, reward, info) toc=time.time(); dt = toc-tic; tic = toc if self.hb_on: print('\r [task runner]: FPS %d, episode steping %s '%( self.get_fps(dt), self.heartbeat()), end='', flush=True) if self._exit_early_: print('exit_early'); break # All task done! Time to shut down return def init_runner(self): self.info_runner['Test-Flag'] = self.test_only # not testing mode for rl methods self.info_runner['Recent-Reward-Sum'] = [] self.info_runner['Recent-Win'] = [] self.info_runner['Recent-Team-Ranking'] = [] obs_info = self.envs.reset() # assumes only the first time reset is manual self.info_runner['Latest-Obs'], self.info_runner['Latest-Team-Info'] = obs_info if isinstance(obs_info, tuple) else (obs_info, None) self.info_runner['Env-Suffered-Reset'] = np.array([True for _ in range(self.n_thread)]) self.info_runner['ENV-PAUSE'] = np.array([False for _ in range(self.n_thread)]) self.info_runner['Current-Obs-Step'] = np.array([0 for _ in range(self.n_thread)]) self.info_runner['Latest-Reward'] = np.zeros(shape=(self.n_thread, self.n_agent)) self.info_runner['Latest-Reward-Sum'] = np.zeros(shape=(self.n_thread, self.n_agent)) self.info_runner['Thread-Episode-Cnt'] = np.array([0 for _ in range(self.n_thread)]) if self.RewardAsUnity: self.info_runner['Latest-Reward'] = np.zeros(shape=(self.n_thread, self.n_team)) self.info_runner['Latest-Reward-Sum'] = np.zeros(shape=(self.n_thread, self.n_team)) return def update_runner(self, done, obs, reward, info): P = self.info_runner['ENV-PAUSE'] R = ~P assert info is not None if self.info_runner['Latest-Team-Info'] is None: self.info_runner['Latest-Team-Info'] = info self.info_runner['Latest-Obs'][R] = obs[R] self.info_runner['Latest-Team-Info'][R] = info[R] self.info_runner['Latest-Reward'][R] = reward[R] # note, reward shape: (thread, n-team\n-agent) self.info_runner['Latest-Reward-Sum'][R] += reward[R] self.info_runner['Current-Obs-Step'][R] += 1 for i in range(self.n_thread): self.info_runner['Env-Suffered-Reset'][i] = done[i].all() # if the environment has not been reset, do nothing if P[i] or (not self.info_runner['Env-Suffered-Reset'][i]): continue # otherwise, the environment just been reset self.current_n_frame += self.info_runner['Current-Obs-Step'][i] self.current_n_episode += 1 self.info_runner['Recent-Reward-Sum'].append(self.info_runner['Latest-Reward-Sum'][i].copy()) term_info = self.info_runner['Latest-Team-Info'][i] # update win/lose (single-team), or team ranking (multi-team) win = 1 if 'win' in term_info and term_info['win']==True else 0 self.info_runner['Recent-Win'].append(win) if 'team_ranking' in term_info: self.info_runner['Recent-Team-Ranking'].append(term_info['team_ranking'].copy()) self.info_runner['Latest-Reward-Sum'][i] = 0 self.info_runner['Current-Obs-Step'][i] = 0 self.info_runner['Thread-Episode-Cnt'][i] += 1 # hault finished threads to wait unfinished ones if self.align_episode: self.info_runner['ENV-PAUSE'][i] = True # monitoring agents/team of interest if self.current_n_episode % self.report_interval == 0: self._checkout_interested_agents(self.info_runner) # monitor rewards for some specific agents self.info_runner['Recent-Reward-Sum'] = [] self.info_runner['Recent-Win'] = [] self.info_runner['Recent-Team-Ranking'] = [] # begin a testing session? if self.train_time_testing and (not self.test_only) and (self.current_n_episode % self.test_interval == 0): self.platform_controller.before_terminate(self.info_runner) self.start_a_test_run() # all threads haulted, finished and Aligned, then restart all thread if self.align_episode and self.info_runner['ENV-PAUSE'].all(): self.info_runner['ENV-PAUSE'][:] = False # when too many episode is done, Terminate flag on. if self.current_n_episode >= self.max_n_episode: self._exit_early_ = True return self.info_runner # ------------------------------------------------------------------------------------------------------------------------------------------ # ------------------------------------------ About TEST RUN routine, almost a Mirror of above ---------------------------------------------- # ------------------------------------------------------------------------------------------------------------------------------------------ # -- I know these code below might merge with above for simplicity -- # -- But I decide not, in order to make it easier to read and debug -- if cfg.train_time_testing: def start_a_test_run(self): print靛('\r[task runner]: test run started!') self.init_test_runner() # loop until all env is done assert cfg.test_epoch%self.n_thread == 0, ('please set test_epoch as (n_thread * N)!') num_runs = cfg.test_epoch // self.n_thread print靛('\r[task runner]: test run is going to run %d episode'%cfg.test_epoch) while True: actions_list, self.test_info_runner = self.platform_controller.act(self.test_info_runner) obs, reward, done, info = self.test_envs.step(actions_list) self.test_info_runner = self.update_test_runner(done, obs, reward, info) if self.hb_on: print('\r [task runner]: testing %s '%self.heartbeat( style=3, beat=self.test_info_runner['Current-Obs-Step']), end='', flush=True) # If the test run reach its end, record the reward and win-rate: if (self.test_info_runner['Thread-Episode-Cnt']>=num_runs).all(): # get the reward average t_win_rates, t_rewards = self._checkout_interested_agents(self.test_info_runner, testing=True) self.platform_controller.before_terminate(self.test_info_runner) self.platform_controller.notify_teams('test done', win_rate=t_win_rates, mean_reward=t_rewards) # close all if self.test_env_sleepy: self.test_envs.sleep() return def init_test_runner(self): if not hasattr(self, 'test_envs'): self.test_envs = make_parallel_envs(self.process_pool, marker='test') # 平行环境 self.test_info_runner = {} # dict of realtime obs, reward, reward, info et.al. self.test_info_runner['ENV-PAUSE'] = np.array([False for _ in range(self.n_thread)]) self.test_info_runner['Test-Flag'] = True self.test_info_runner['Recent-Win'] = [] self.test_info_runner['Recent-Reward-Sum'] = [] self.test_info_runner['Recent-Team-Ranking'] = [] test_obs_info = self.test_envs.reset() # assume only the first time reset is manual self.test_info_runner['Latest-Obs'], self.test_info_runner['Latest-Team-Info'] = test_obs_info if isinstance(test_obs_info, tuple) else (test_obs_info, None) self.test_info_runner['Env-Suffered-Reset'] = np.array([True for _ in range(self.n_thread)]) self.test_info_runner['Latest-Reward'] = np.zeros(shape=(self.n_thread, self.n_agent)) self.test_info_runner['Latest-Reward-Sum'] = np.zeros(shape=(self.n_thread, self.n_agent)) self.test_info_runner['Current-Obs-Step'] = np.array([0 for _ in range(self.n_thread)]) self.test_info_runner['Thread-Episode-Cnt'] = np.array([0 for _ in range(self.n_thread)]) if self.RewardAsUnity: self.test_info_runner['Latest-Reward'] = np.zeros(shape=(self.n_thread, self.n_team)) self.test_info_runner['Latest-Reward-Sum'] = np.zeros(shape=(self.n_thread, self.n_team)) return def update_test_runner(self, done, obs, reward, info): P = self.test_info_runner['ENV-PAUSE'] R = ~P assert info is not None if self.test_info_runner['Latest-Team-Info'] is None: self.test_info_runner['Latest-Team-Info'] = info self.test_info_runner['Latest-Obs'][R] = obs[R] self.test_info_runner['Latest-Team-Info'][R] = info[R] self.test_info_runner['Latest-Reward'][R] = reward[R] self.test_info_runner['Latest-Reward-Sum'][R] += reward[R] self.test_info_runner['Current-Obs-Step'][R] += 1 for i in range(self.n_thread): self.test_info_runner['Env-Suffered-Reset'][i] = done[i].all() # if the environment has not been reset, do nothing if P[i] or (not self.test_info_runner['Env-Suffered-Reset'][i]): continue # otherwise, the environment just been reset self.test_info_runner['Recent-Reward-Sum'].append(self.test_info_runner['Latest-Reward-Sum'][i].copy()) self.test_info_runner['Latest-Reward-Sum'][i] = 0 self.test_info_runner['Current-Obs-Step'][i] = 0 self.test_info_runner['Thread-Episode-Cnt'][i] += 1 term_info = self.test_info_runner['Latest-Team-Info'][i] win = 1 if 'win' in term_info and term_info['win']==True else 0 self.test_info_runner['Recent-Win'].append(win) if 'team_ranking' in term_info: self.test_info_runner['Recent-Team-Ranking'].append(term_info['team_ranking'].copy()) if self.align_episode: self.test_info_runner['ENV-PAUSE'][i] = True if self.align_episode and self.test_info_runner['ENV-PAUSE'].all(): self.test_info_runner['ENV-PAUSE'][:] = False return self.test_info_runner # -- If you care much about the agents running your algorthm... -- # -- you may delete them if monitering is established in ALGORITHM level -- def _init_interested_agent_logging(self): self.report_interval = cfg.report_reward_interval self.interested_agents_uid = cfg.interested_agent_uid self.interested_team = cfg.interested_team self.top_rewards = None self.test_top_rewards = None return def _checkout_interested_agents(self, info_runner, testing=False): # (1). record mean reward if not testing: self.mcv.rec(self.current_n_episode, 'time') prefix = 'test' if testing else '' recent_rewards = np.stack(info_runner['Recent-Reward-Sum']) mean_reward_each_team = [] if self.RewardAsUnity: for interested_team in range(self.n_team): mean_reward_each_team.append(recent_rewards[:, interested_team].mean().copy()) else: for interested_team in range(self.n_team): tean_agent_uid = cfg.ScenarioConfig.AGENT_ID_EACH_TEAM[interested_team] mean_reward_each_team.append(recent_rewards[:, tean_agent_uid].mean().copy()) for team in range(self.n_team): self.mcv.rec(mean_reward_each_team[team], f'{prefix} reward of=team-{team}') # (2).reflesh historical top reward if not testing: if self.top_rewards is None: self.top_rewards = mean_reward_each_team top_rewards_list_pointer = self.top_rewards else: if self.test_top_rewards is None: self.test_top_rewards = mean_reward_each_team top_rewards_list_pointer = self.test_top_rewards for team in range(self.n_team): if mean_reward_each_team[team] > top_rewards_list_pointer[team]: top_rewards_list_pointer[team] = mean_reward_each_team[team] self.mcv.rec(top_rewards_list_pointer[team], f'{prefix} top reward of=team-{team}') # (3).record winning rate (single-team) or record winning rate (multi-team) # for team in range(self.n_team): teams_ranking = info_runner['Recent-Team-Ranking'] win_rate_each_team = [0]*self.n_team if len(teams_ranking)>0: for team in range(self.n_team): rank_itr_team = np.array(teams_ranking)[:, team] win_rate = (rank_itr_team==0).mean() # 0 means rank first win_rate_each_team[team] = win_rate self.mcv.rec(win_rate, f'{prefix} top-rank ratio of=team-{team}') else: team = 0; assert self.n_team == 1, "There is only one team" win_rate_each_team[team] = np.array(info_runner['Recent-Win']).mean() win_rate = np.array(info_runner['Recent-Win']).mean() self.mcv.rec(win_rate, f'{prefix} win rate of=team-{team}') # plot the figure self.mcv.rec_show() if testing: print_info = ['\r[task runner]: Test result at episode %d.'%(self.current_n_episode)] else: print_info = ['\r[task runner]: (%s) Finished episode %d, frame %d.'%(self.note, self.current_n_episode, self.current_n_frame)] for team in range(self.n_team): print_info.append(' | team-%d: win rate: %.3f, recent reward %.3f'%(team, win_rate_each_team[team], mean_reward_each_team[team])) print靛(''.join(print_info)) return win_rate_each_team, mean_reward_each_team # -- below is nothing of importance -- # -- you may delete it or replace it with Tensorboard -- @staticmethod def get_a_logger(note): from VISUALIZE.mcom import mcom logdir = cfg.logdir if cfg.activate_logger: mcv = mcom( path='%s/logger/'%logdir, digit=16, rapid_flush=True, draw_mode=cfg.draw_mode, tag='[task_runner.py]', resume_mod=cfg.resume_mod) cfg.data_logger = mcv mcv.rec_init(color='b') return mcv def heartbeat(self, style=0, beat=None): # default ⠁⠈⠐⠠⢀⡀⠄⠂ width = os.get_terminal_size().columns if style==0: sym = ['⠁','⠈','⠐','⠠','⢀','⡀','⠄','⠂',] elif style==1: sym = ['◐ ','◓ ','◑ ','◒ '] elif style==2: sym = ['▁','▂','▃','▄','▅','▆','▇','█'] elif style==3: sym = ['💐','🌷','🌸','🌹','🌺','🌻','🌼',] if beat is None: beat = self.info_runner['Current-Obs-Step'] beat = beat % len(sym) beat = beat[:int(width*0.2)] beat.astype(int) beat = [sym[t] for t in beat] return ''.join(beat) def get_fps(self, dt): new_fps = int(self.n_thread/dt) if not hasattr(self, 'fps_smooth'): self.fps_smooth = new_fps else: self.fps_smooth = self.fps_smooth*0.98 + new_fps*0.02 return int(self.fps_smooth) ================================================ FILE: README.md ================================================ # Unreal-MAP [English](README.md) | [中文](README_CN.md) [![Version](https://img.shields.io/badge/version-3.14-blue)](https://github.com/binary-husky/unreal-map) [![License](https://img.shields.io/badge/license-MIT-green)](LICENSE) [![Python](https://img.shields.io/badge/python-3.7+-blue)](https://www.python.org/) [![Unreal Engine](https://img.shields.io/badge/Unreal%20Engine-4.27-blue)](https://www.unrealengine.com/) [![stars](https://img.shields.io/github/stars/binary-husky/unreal-map)](https://github.com/binary-husky/unreal-map) [![Documentation](https://img.shields.io/badge/docs-English-blue)](README.md) This is **Unreal Multi-Agent Playground** (Unreal-Map), a multi-agent general platform based on [Unreal Engine](https://www.unrealengine.com/). Here you can use all the capabilities of Unreal Engine (Blueprints, Behavior tree, Physics engine, AI navigation, 3D models/animations and Plugin resources, etc) to build elegant (but also computational efficient) and magnificent (but also experimentally reproducible) multi-agent environments. Unreal-MAP can not only be used to develop conventional multi-agent simulation environments, but has also been optimized for Multi-Agent Reinforcement Learning (MARL) simulation. You can use it to develop various realistic and complex MARL scenarios. You can also use Unreal-MAP together with our developed [HMAP](https://github.com/binary-husky/hmp2g) (a powerful MARL-specific experimental framework) to easily develop MARL scenarios and quickly deploy cutting-edge algorithms. > The present study aims to identify potential collaboration partners. If interested in this research project, please feel free to contact our office at CASIA: tenghai.qiu@ia.ac.cn, hutianyi2021@ia.ac.cn. > **Please ```star``` the Github project. Your encouragement is extremely important to us as researchers: ```https://github.com/binary-husky/unreal-map```** !
# 1. Introduction ### 1.1 Basic Introduction Unreal-based Multi-Agent Playground (Unreal-MAP) is a new generation of multi-agent general platform based on the Unreal Engine. This platform supports adversial training between swarms & algorithms, and it is the first (and currently the only) extensible RL/MARL environment based on the Unreal Engine to support multi-team training. ### 1.2 Architecture
Unreal-MAP employs a hierarchical five-layer architecture, where each layer builds upon the previous one. From bottom to top,the five layers are: *native layer*, *specification layer*, *base class layer*, ***advanced module layer***, and ***interface layer***. layer. **You only need to focus on the *advanced module layer* (Blueprint) and the *interface layer* (Python).** From the perspective of creating a standard MARL scenario, using these two layers is sufficient to modify all elements in the task (e.g., POMDP) such as states, actions, observations, transitions, etc. ### 1.3 Features Unreal-MAP can be used to develop various multi-agent simulation scenarios. Our case studies have already included scenarios with large-scale, heterogeneous, and multi-team characteristics. **Compared to other RL general platforms** such as [Unity ML-Agents](https://unity-technologies.github.io/ml-agents/), Unreal-MAP has the following advantages in terms of scientific research and experiment: **(1) Fully Open-Source and Easily Modifiable**: Unreal-MAP utilizes a layered design, and all components from the bottom-level engine to the top-level interfaces are open-sourced. **(2) Optimized Specifically for MARL**: The underlying engine of Unreal-MAP has been optimized to enhance efficiency in large-scale agent simulations and data transmission. **(3) Parallel Multi-Process Execution and Controllable Single-Process Time Flow**: Unreal-MAP supports the parallel execution of multiple simulation processes as well as the adjustment of the simulation time flow speed in a single process. You can accelerate simulations to speed up training or decelerate simulations for detailed slow-motion analysis. **Compared to all current MARL simulation environments**, Unreal-MAP has advantages in terms of scientific research and experiment: - **Freely build realistic tasks** using the massive resources available in the [Unreal Engine Marketplace](https://www.fab.com/). - Simultaneously supports **large-scale, heterogeneous, multi-team** simulations. - **Highly efficient training** with TPS (Timesteps per second) up to 10k+ and FPS (Frames per second) up to 10M+. - **Controllable simulation time**: you can accelerate simulation to speed up training (until CPU is fully utilized, acceleration doesn't consume extra memory or VRAM), or decelerate for slow-motion analysis. - **Strong reproducibility**: eliminated various butterfly effect factors in Unreal Engine that could cause experimental irreproducibility. - **Multi-platform support**: compile both Headless mode and rendering mode clients on Windows, Linux, and MacOS. - **Rich rendering mechanisms**: supports a) rendering in the UE editor, b) on a compiled pure rendering client, c) cross-platform real-time rendering. You can train on a Linux server and render on Windows host at the same time!
### 1.4 Some Future Works Unreal-MAP introduces modern game engines into the MARL field with tremendous potential. This potential is mainly reflected in two dimensions: **Scalability** and **Realism**. In terms of scalability, users can not only ***freely*** construct environments using the extremely rich resources from the Unreal Engine community, but can also ***quickly*** build environments according to their ideas using Unreal Engine's future generative AI plugins (such as [ACE](https://developer.nvidia.com/ace.)). In terms of realism, users can leverage Unreal-MAP to build ***highly realistic*** MARL environments and even develop ***digital twins*** of real-world scenarios. We attempted a sim2real demo using Unreal-MAP. In this demo, we first deployed a multi-UAV-UGV gaming scenario in the experimental field, then recreated the scenario using Unreal-MAP (including model proportions, agent kinematics and dynamics, etc.). We conducted training in the sim environment and then validated it in the real-world scenario, achieving preliminary positive results. In the current solution, Unreal-MAP not only serves as a simulation environment creator, but also acts as a data transmission intermediary, connecting data from the real-world scenario with the algorithmic side.
# 2. How to Install ## 2.1 Professional version - Step 1, you must install the Unreal Engine from the source code. For details, see the official document of the Unreal Engine: ```https://docs.unrealengine.com/4.27/zh-CN/ProductionPipelines/DevelopmentSetup/BuildingUnrealEngine/``` - Step 2: Clone the git resp ```git clone https://github.com/binary-husky/unreal-hmp.git``` - Step 3: Download large files that github cannot manage. Run ```python Please_ Run_ This_ First_ To_ Fetch_ Big_ Files.py``` - Step 4: Right click the ```UHMP.upproject``` downloaded in step 3, select ```switch unreal engine version```, and then select ```source build at xxxxx``` to confirm. Then open the generated ```UHMP. sln``` and compile it - Finally, double-click ```UHMP. upproject``` to enter the Unreal Engine Editor. Note that steps 1 and 4 are difficult. It is recommended to refer to the following video (the 0:00->1:46 in the video is the steps 1, and 1:46->end is steps 4): ```https://ageasga-my.sharepoint.com/:v:/g/personal/fuqingxu_yiteam_tech/EawfqsV2jF5Nsv3KF7X1-woBH-VTvELL6FSRX4cIgUboLg?e=Vmp67E``` ## 2.2 Only compiled binary version ```https://github.com/binary-husky/hmp2g/blob/master/ZDOCS/use_unreal_hmap.md``` # 3. Tutorial The document is being improved. For the video tutorial of simple demo, see ```EnvDesignTutorial.pptx``` (you need to complete step 3 of installation to download this pptx file) Directory: - Chapter I. Unreal Engine - - Build a map (Level) ```https://www.bilibili.com/video/BV1U24y1D7i4/?spm_id_from=333.999.0.0&vd_source=e3bc3eddd1d2414cb64ae72b6a64df55``` - - Establish Agent Actor - - Design agent blueprint program logic - - Episode key event notification mechanism - - Define Custom actions (Unreal Engine side) - - The Python side controls the custom parameters of the agent - Chapter II. Python Interface - - Create a task file (SubTask) - - Modify agent initialization code - - Modify the agent reward code - - Select the control algorithm of each team - - Full closed loop debugging method - Chapter III. Appendix - - Headless acceleration and cross-compiling Linux package - - Define Custom actions (Need to be familiar with the full closed-loop debugging method first) - - - Draft a list of actions - - - Python side action generation - - - UE-side action parse and execution - - - Action discretization - - Installation guide for cross compilation tool chain # 4. How to Build Binary Client Run following scripts. ``` python BuildlinuxRender.py python BuildLinuxServer.py python BuildWinRender.py python BuildWinServer.py ``` - Among them, ```Render/Server``` represents ```including graphic rendering / only computing```, the later is generally used for RL training. - Among them, ```Windows/linux``` represents the target operating system. Note that you need to install ```Unreal Engine Cross Compilation Tool``` to compile Linux programs on Windows. - After adding new ActionSets in ```Content/Assets/DefAction/ParseAction.uasset```, you may encounter ```Ensure condition failed: !FindPin(FFunctionEntryHelper::GetWorldContextPinName())``` error during packaging. If so, find and remove an extra blueprint function parameter named ```__WorldContext``` that you created by accident in ```ParseAction.uasset```. For more details: ```https://forums.unrealengine.com/t/ensure-condition-failed-on-project-start/469587``` - If you encounter BuildCMakeLib.Automation.cs(45,54): error CS1002 after project migration, please **Rebuild** (not Build!) the AutomationTool in Visual Studio. For more details: ```https://forums.unrealengine.com/t/unreal-engine-version-4-27-2-i-get-an-error-when-trying-to-package-any-project/270627``` # Cite this project ! ``` @article{unrealmap, title={Unreal-MAP: Unreal-Engine-Based General Platform for Multi-Agent Reinforcement Learning}, author={Hu, Tianyi and Fu, Qingxu and Pu, Zhiqiang and Wang, Yuan and Qiu, Tenghai}, journal={arXiv preprint arXiv:2503.15947}, year={2025} } ``` ================================================ FILE: README_CN.md ================================================ # Unreal-MAP [English](README.md) | [中文](README_CN.md) [![Version](https://img.shields.io/badge/version-3.14-blue)](https://github.com/binary-husky/unreal-map) [![License](https://img.shields.io/badge/license-MIT-green)](LICENSE) [![Python](https://img.shields.io/badge/python-3.7+-blue)](https://www.python.org/) [![Unreal Engine](https://img.shields.io/badge/Unreal%20Engine-4.27-blue)](https://www.unrealengine.com/) [![stars](https://img.shields.io/github/stars/binary-husky/unreal-map)](https://github.com/binary-husky/unreal-map) [![Documentation](https://img.shields.io/badge/docs-中文-blue)](README_CN.md) 这是**虚幻多智能体游乐场**(Unreal-MAP),一个基于[虚幻引擎](https://www.unrealengine.com/)的多智能体通用平台。 在这里,您可以使用虚幻引擎的所有功能(蓝图、行为树、物理引擎、AI导航、3D模型/动画和插件资源等)来构建优雅(但也计算高效)和宏伟(但也实验可重现)的多智能体环境。 Unreal-MAP不仅可以用于开发常规的多智能体仿真环境,还针对多智能体强化学习(MARL)仿真进行了特殊优化。您可以使用它来开发各种真实和复杂的MARL场景。您还可以将Unreal-MAP与我们开发的[HMAP](https://github.com/binary-husky/hmp2g)(一个强大的MARL专用实验框架)一起使用,轻松开发MARL场景并快速部署前沿算法。 > 本研究旨在寻找潜在的合作伙伴。如果对这个研究项目感兴趣,请随时联系我们中科院自动化研究所的办公室:tenghai.qiu@ia.ac.cn, hutianyi2021@ia.ac.cn。 > **请为Github项目点亮```star```。作为研究人员,您的鼓励对我们来说极其重要:```https://github.com/binary-husky/unreal-hmp```** !
# 简介 基于虚幻引擎的多智能体游乐场(Unreal-MAP)是一个基于虚幻引擎的新一代多智能体通用平台。 该平台支持群体与算法之间的对抗训练,是第一个(也是目前唯一一个)支持多团队训练的基于虚幻引擎的可扩展RL/MARL环境。
Unreal-MAP采用分层五层架构,每一层都建立在前一层之上。从底层到顶层,这五层分别是:*原生层*、*规范层*、*基类层*、***高级模块层***和***接口层***。 **您只需要关注*高级模块层*(蓝图)和*接口层*(Python)**。 从创建标准MARL环境的角度来看,使用这两层就足以修改任务中的所有元素(例如POMDP),如状态、动作、观察、转换等。 Unreal-MAP可用于开发各种多智能体仿真场景。我们的案例研究已经包括了具有大规模、异构和多团队特征的场景。 **与其他RL通用平台**相比,如[Unity ML-Agents](https://unity-technologies.github.io/ml-agents/),Unreal-MAP在科研和实验方面具有以下优势: **(1) 完全开源且易于修改**:Unreal-MAP采用分层设计,从底层引擎到顶层接口的所有组件都是开源的。 **(2) 专门针对MARL优化**:Unreal-MAP的底层引擎已经过优化,以提高大规模智能体仿真和数据传输的效率。 **(3) 并行多进程执行和可控的单进程时间流**:Unreal-MAP支持多个仿真进程的并行执行以及单个进程中仿真时间流速度的调整。您可以加速仿真以加快训练速度,或减速仿真以进行详细的慢动作分析。 **与目前所有的MARL仿真环境相比**,Unreal-MAP在科研和实验方面具有优势: - 使用[虚幻引擎市场](https://www.fab.com/)中的海量资源**自由构建真实任务**。 - 同时支持**大规模、异构、多团队**仿真。 - **高效训练**,TPS(每秒时间步数)高达10k+,FPS(每秒帧数)高达10M+。 - **可控的仿真时间**:您可以加速仿真以加快训练速度(直到CPU完全利用,加速不会消耗额外的内存或显存),或减速以进行慢动作分析。 - **强大的可重现性**:消除了虚幻引擎中可能导致实验不可重现的各种蝴蝶效应因素。 - **多平台支持**:在Windows、Linux和MacOS上编译无头模式和渲染模式客户端。 - **丰富的渲染机制**:支持a)在UE编辑器中渲染,b)在编译的纯渲染客户端上渲染,c)跨平台实时渲染。
# 如何安装 ## 完整版本 - 步骤1,您必须从源代码安装虚幻引擎。详情请参见虚幻引擎官方文档:```https://docs.unrealengine.com/4.27/zh-CN/ProductionPipelines/DevelopmentSetup/BuildingUnrealEngine/``` - 步骤2:克隆git仓库 ```git clone https://github.com/binary-husky/unreal-hmp.git``` - 步骤3:下载github无法管理的大文件。运行 ```python Please_ Run_ This_ First_ To_ Fetch_ Big_ Files.py``` - 步骤4:右键点击步骤3中下载的```UHMP.upproject```,选择```切换虚幻引擎版本```,然后选择```source build at xxxxx```确认。然后打开生成的```UHMP.sln```并编译 - 最后,双击```UHMP.upproject```进入虚幻引擎编辑器。 注意,步骤1和4比较困难。建议参考以下视频(视频中0:00->1:46是步骤1,1:46->结束是步骤4):```https://ageasga-my.sharepoint.com/:v:/g/personal/fuqingxu_yiteam_tech/EawfqsV2jF5Nsv3KF7X1-woBH-VTvELL6FSRX4cIgUboLg?e=Vmp67E``` ## 仅编译二进制版本 ```https://github.com/binary-husky/hmp2g/blob/master/ZDOCS/use_unreal_hmap.md``` # 教程 文档正在完善中。关于简单演示的视频教程,请参见```EnvDesignTutorial.pptx```(您需要完成安装步骤3才能下载此pptx文件) 目录: - 第一章 虚幻引擎 - - 构建地图(Level)```https://www.bilibili.com/video/BV1U24y1D7i4/?spm_id_from=333.999.0.0&vd_source=e3bc3eddd1d2414cb64ae72b6a64df55``` - - 建立智能体Actor - - 设计智能体蓝图程序逻辑 - - Episode关键事件通知机制 - - 定义自定义动作(虚幻引擎端) - - Python端控制智能体的自定义参数 - 第二章 Python接口 - - 创建任务文件(SubTask) - - 修改智能体初始化代码 - - 修改智能体奖励代码 - - 选择每个团队的控制算法 - - 完整闭环调试方法 - 第三章 附录 - - 无头加速和交叉编译Linux包 - - 定义自定义动作(需要先熟悉完整闭环调试方法) - - - 起草动作列表 - - - Python端动作生成 - - - UE端动作解析和执行 - - - 动作离散化 - - 交叉编译工具链安装指南 # 如何构建二进制客户端 运行以下脚本。 ``` python BuildlinuxRender.py python BuildLinuxServer.py python BuildWinRender.py python BuildWinServer.py ``` - 其中,```Render/Server```代表```包含图形渲染/仅计算```,后者通常用于RL训练。 - 其中,```Windows/linux```代表目标操作系统。注意,您需要安装```虚幻引擎交叉编译工具```才能在Windows上编译Linux程序。 # 常见问题 - 在```Content/Assets/DefAction/ParseAction.uasset```中添加新的ActionSets后,打包时可能遇到```Ensure condition failed: !FindPin(FFunctionEntryHelper::GetWorldContextPinName())```错误。如果出现这种情况,请在```ParseAction.uasset```中找到并删除您不小心创建的名为```__WorldContext```的额外蓝图函数参数。更多详情:```https://forums.unrealengine.com/t/ensure-condition-failed-on-project-start/469587``` - 如果在项目迁移后遇到BuildCMakeLib.Automation.cs(45,54): error CS1002,请在Visual Studio中**重新构建**(不是构建!)AutomationTool。更多详情:```https://forums.unrealengine.com/t/unreal-engine-version-4-27-2-i-get-an-error-when-trying-to-package-any-project/270627``` # 引用 ``` @article{unrealmap, title={Unreal-MAP: Unreal-Engine-Based General Platform for Multi-Agent Reinforcement Learning}, author={Hu, Tianyi and Fu, Qingxu and Pu, Zhiqiang and Wang, Yuan and Qiu, Tenghai}, journal={arXiv preprint arXiv:2503.15947}, year={2025} } ``` # Dev log 项目开发日志 - 2023-10-18 版本3.14 - 2023-4-30 版本3.8,引入标准化的高效感知模块 - 2023-3-9 正在尝试用共享内存通讯替换tcp通讯,以提高IO效率,待上传到4.0版本 - 2023-3-1 实现高效感知模块,待上传到4.0版本 - 2023-2-15 版本3.7融入master分支 - 2023-2-14 3.7上传中 - 2023-2-14 ```EnvDesignTutorial.pptx```中更新了自定义动作的文档 - 2023-2-14 上传了一个微缩版的hmp代码,作为入门用的U-MAP驱动,文档待写 - 2023-2-1 将读起来蹩脚的UHMAP缩写名称改为U-Map - 2023-1-8 update readme - 2023-12-25 covid is not a flu /(ㄒoㄒ)/ - 2022-12-22 版本3.6融入master分支 - 2022-12-21 解决智能体scale!=1的情况下,飞行智能体高度越来越低的问题 - 2022-12-21 修复超大规模智能体数量情况下缓存区溢出的问题 - 2022-12-18 优化大文件下载脚本 - 2022-12-17 版本3.5融入master分支 ================================================ FILE: Source/Jsonx/Jsonx.Build.cs ================================================ // Copyright Epic Games, Inc. All Rights Reserved. namespace UnrealBuildTool.Rules { public class Jsonx : ModuleRules { public Jsonx(ReadOnlyTargetRules Target) : base(Target) { PublicDependencyModuleNames.AddRange( new string[] { "Core", } ); PrivateIncludePaths.AddRange( new string[] { "Jsonx/Private", } ); } } } ================================================ FILE: Source/Jsonx/Private/Dom/JsonxObject.cpp ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #include "Dom/JsonxObject.h" void FJsonxObject::SetField( const FString& FieldName, const TSharedPtr& Value ) { this->Values.Add(FieldName, Value); } void FJsonxObject::RemoveField( const FString& FieldName ) { this->Values.Remove(FieldName); } double FJsonxObject::GetNumberField( const FString& FieldName ) const { return GetField(FieldName)->AsNumber(); } bool FJsonxObject::TryGetNumberField( const FString& FieldName, double& OutNumber ) const { TSharedPtr Field = TryGetField(FieldName); return Field.IsValid() && Field->TryGetNumber(OutNumber); } bool FJsonxObject::TryGetNumberField( const FString& FieldName, int32& OutNumber ) const { TSharedPtr Field = TryGetField(FieldName); return Field.IsValid() && Field->TryGetNumber(OutNumber); } bool FJsonxObject::TryGetNumberField( const FString& FieldName, uint32& OutNumber ) const { TSharedPtr Field = TryGetField(FieldName); return Field.IsValid() && Field->TryGetNumber(OutNumber); } bool FJsonxObject::TryGetNumberField(const FString& FieldName, int64& OutNumber) const { TSharedPtr Field = TryGetField(FieldName); return Field.IsValid() && Field->TryGetNumber(OutNumber); } void FJsonxObject::SetNumberField( const FString& FieldName, double Number ) { this->Values.Add(FieldName, MakeShared(Number)); } FString FJsonxObject::GetStringField( const FString& FieldName ) const { return GetField(FieldName)->AsString(); } bool FJsonxObject::TryGetStringField( const FString& FieldName, FString& OutString ) const { TSharedPtr Field = TryGetField(FieldName); return Field.IsValid() && Field->TryGetString(OutString); } bool FJsonxObject::TryGetStringArrayField( const FString& FieldName, TArray& OutArray ) const { TSharedPtr Field = TryGetField(FieldName); if (!Field.IsValid()) { return false; } const TArray< TSharedPtr > *Array; if (!Field->TryGetArray(Array)) { return false; } for (int Idx = 0; Idx < Array->Num(); Idx++) { FString Element; if (!(*Array)[Idx]->TryGetString(Element)) { return false; } OutArray.Add(Element); } return true; } void FJsonxObject::SetStringField( const FString& FieldName, const FString& StringValue ) { this->Values.Add(FieldName, MakeShared(StringValue)); } bool FJsonxObject::GetBoolField( const FString& FieldName ) const { return GetField(FieldName)->AsBool(); } bool FJsonxObject::TryGetBoolField( const FString& FieldName, bool& OutBool ) const { TSharedPtr Field = TryGetField(FieldName); return Field.IsValid() && Field->TryGetBool(OutBool); } void FJsonxObject::SetBoolField( const FString& FieldName, bool InValue ) { this->Values.Add(FieldName, MakeShared(InValue)); } const TArray>& FJsonxObject::GetArrayField( const FString& FieldName ) const { return GetField(FieldName)->AsArray(); } bool FJsonxObject::TryGetArrayField(const FString& FieldName, const TArray< TSharedPtr >*& OutArray) const { TSharedPtr Field = TryGetField(FieldName); return Field.IsValid() && Field->TryGetArray(OutArray); } void FJsonxObject::SetArrayField( const FString& FieldName, const TArray< TSharedPtr >& Array ) { this->Values.Add(FieldName, MakeShared(Array)); } const TSharedPtr& FJsonxObject::GetObjectField( const FString& FieldName ) const { return GetField(FieldName)->AsObject(); } bool FJsonxObject::TryGetObjectField( const FString& FieldName, const TSharedPtr*& OutObject ) const { TSharedPtr Field = TryGetField(FieldName); return Field.IsValid() && Field->TryGetObject(OutObject); } void FJsonxObject::SetObjectField( const FString& FieldName, const TSharedPtr& JsonxObject ) { if (JsonxObject.IsValid()) { this->Values.Add(FieldName, MakeShared(JsonxObject.ToSharedRef())); } else { this->Values.Add(FieldName, MakeShared()); } } ================================================ FILE: Source/Jsonx/Private/Dom/JsonxValue.cpp ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #include "Dom/JsonxValue.h" #include "Dom/JsonxObject.h" double FJsonxValue::AsNumber() const { double Number = 0.0; if (!TryGetNumber(Number)) { ErrorMessage(TEXT("Number")); } return Number; } FString FJsonxValue::AsString() const { FString String; if (!TryGetString(String)) { ErrorMessage(TEXT("String")); } return String; } bool FJsonxValue::AsBool() const { bool Bool = false; if (!TryGetBool(Bool)) { ErrorMessage(TEXT("Boolean")); } return Bool; } const TArray< TSharedPtr >& FJsonxValue::AsArray() const { const TArray>* Array = nullptr; if (!TryGetArray(Array)) { static const TArray< TSharedPtr > EmptyArray; Array = &EmptyArray; ErrorMessage(TEXT("Array")); } return *Array; } const TSharedPtr& FJsonxValue::AsObject() const { const TSharedPtr* Object = nullptr; if (!TryGetObject(Object)) { static const TSharedPtr EmptyObject = MakeShared(); Object = &EmptyObject; ErrorMessage(TEXT("Object")); } return *Object; } // ----------------------------------- template bool TryConvertNumber(const FJsonxValue& InValue, T& OutNumber) { double Double; if (InValue.TryGetNumber(Double) && (Double >= TNumericLimits::Min()) && (Double <= static_cast(TNumericLimits::Max()))) { OutNumber = static_cast(FMath::RoundHalfFromZero(Double)); return true; } return false; } // Need special handling for int64/uint64, due to overflow in the numeric limits. // 2^63-1 and 2^64-1 cannot be exactly represented as a double, so TNumericLimits<>::Max() gets rounded up to exactly 2^63 or 2^64 by the compiler's implicit cast to double. // This breaks the overflow check in TryConvertNumber. We use "<" rather than "<=" along with the exact power-of-two double literal to fix this. template <> bool TryConvertNumber(const FJsonxValue& InValue, uint64& OutNumber) { double Double; if (InValue.TryGetNumber(Double) && Double >= 0.0 && Double < 18446744073709551616.0) { OutNumber = static_cast(FMath::RoundHalfFromZero(Double)); return true; } return false; } template <> bool TryConvertNumber(const FJsonxValue& InValue, int64& OutNumber) { double Double; if (InValue.TryGetNumber(Double) && Double >= -9223372036854775808.0 && Double < 9223372036854775808.0) { OutNumber = static_cast(FMath::RoundHalfFromZero(Double)); return true; } return false; } // ----------------------------------- bool FJsonxValue::TryGetNumber(float& OutNumber) const { double Double; if (TryGetNumber(Double)) { OutNumber = static_cast(Double); return true; } return false; } bool FJsonxValue::TryGetNumber(uint8& OutNumber) const { return TryConvertNumber(*this, OutNumber); } bool FJsonxValue::TryGetNumber(uint16& OutNumber) const { return TryConvertNumber(*this, OutNumber); } bool FJsonxValue::TryGetNumber(uint32& OutNumber) const { return TryConvertNumber(*this, OutNumber); } bool FJsonxValue::TryGetNumber(uint64& OutNumber) const { return TryConvertNumber(*this, OutNumber); } bool FJsonxValue::TryGetNumber(int8& OutNumber) const { return TryConvertNumber(*this, OutNumber); } bool FJsonxValue::TryGetNumber(int16& OutNumber) const { return TryConvertNumber(*this, OutNumber); } bool FJsonxValue::TryGetNumber(int32& OutNumber) const { return TryConvertNumber(*this, OutNumber); } bool FJsonxValue::TryGetNumber(int64& OutNumber) const { return TryConvertNumber(*this, OutNumber); } //static bool FJsonxValue::CompareEqual( const FJsonxValue& Lhs, const FJsonxValue& Rhs ) { if (Lhs.Type != Rhs.Type) { return false; } switch (Lhs.Type) { case EJsonx::None: case EJsonx::Null: return true; case EJsonx::String: return Lhs.AsString() == Rhs.AsString(); case EJsonx::Number: return Lhs.AsNumber() == Rhs.AsNumber(); case EJsonx::Boolean: return Lhs.AsBool() == Rhs.AsBool(); case EJsonx::Array: { const TArray< TSharedPtr >& LhsArray = Lhs.AsArray(); const TArray< TSharedPtr >& RhsArray = Rhs.AsArray(); if (LhsArray.Num() != RhsArray.Num()) { return false; } // compare each element for (int32 i = 0; i < LhsArray.Num(); ++i) { if (!CompareEqual(*LhsArray[i], *RhsArray[i])) { return false; } } } return true; case EJsonx::Object: { const TSharedPtr& LhsObject = Lhs.AsObject(); const TSharedPtr& RhsObject = Rhs.AsObject(); if (LhsObject.IsValid() != RhsObject.IsValid()) { return false; } if (LhsObject.IsValid()) { if (LhsObject->Values.Num() != RhsObject->Values.Num()) { return false; } // compare each element for (const auto& It : LhsObject->Values) { const FString& Key = It.Key; const TSharedPtr* RhsValue = RhsObject->Values.Find(Key); if (RhsValue == NULL) { // not found in both objects return false; } const TSharedPtr& LhsValue = It.Value; if (LhsValue.IsValid() != RhsValue->IsValid()) { return false; } if (LhsValue.IsValid()) { if (!CompareEqual(*LhsValue.Get(), *RhsValue->Get())) { return false; } } } } } return true; default: return false; } } void FJsonxValue::ErrorMessage(const FString& InType) const { UE_LOG(LogJsonx, Error, TEXT("Jsonx Value of type '%s' used as a '%s'."), *GetType(), *InType); } ================================================ FILE: Source/Jsonx/Private/JsonxModule.cpp ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #include "CoreMinimal.h" #include "JsonxGlobals.h" #include "Modules/ModuleInterface.h" #include "Modules/ModuleManager.h" DEFINE_LOG_CATEGORY(LogJsonx); /** * Implements the Jsonx module. */ class FJsonxModule : public IModuleInterface { public: // IModuleInterface interface virtual void StartupModule( ) override { } virtual void ShutdownModule( ) override { } virtual bool SupportsDynamicReloading( ) override { return false; } }; IMPLEMENT_MODULE(FJsonxModule, Jsonx); ================================================ FILE: Source/Jsonx/Private/Tests/JsonxTests.cpp ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #include "CoreMinimal.h" #include "Misc/AutomationTest.h" #include "Policies/CondensedJsonxPrintPolicy.h" #include "Serialization/JsonxTypes.h" #include "Serialization/JsonxReader.h" #include "Policies/PrettyJsonxPrintPolicy.h" #include "Serialization/JsonxSerializer.h" #if WITH_DEV_AUTOMATION_TESTS /** * FJsonxAutomationTest * Simple unit test that runs Jsonx's in-built test cases */ IMPLEMENT_SIMPLE_AUTOMATION_TEST(FJsonxAutomationTest, "System.Engine.FileSystem.JSONX", EAutomationTestFlags::ApplicationContextMask | EAutomationTestFlags::SmokeFilter ) typedef TJsonxWriterFactory< TCHAR, TCondensedJsonxPrintPolicy > FCondensedJsonxStringWriterFactory; typedef TJsonxWriter< TCHAR, TCondensedJsonxPrintPolicy > FCondensedJsonxStringWriter; typedef TJsonxWriterFactory< TCHAR, TPrettyJsonxPrintPolicy > FPrettyJsonxStringWriterFactory; typedef TJsonxWriter< TCHAR, TPrettyJsonxPrintPolicy > FPrettyJsonxStringWriter; /** * Execute the Jsonx test cases * * @return true if the test was successful, false otherwise */ bool FJsonxAutomationTest::RunTest(const FString& Parameters) { // Null Case { const FString InputString = TEXT(""); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); TSharedPtr Object; check( FJsonxSerializer::Deserialize( Reader, Object ) == false ); check( !Object.IsValid() ); } // Empty Object Case { const FString InputString = TEXT("{}"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); TSharedPtr Object; check( FJsonxSerializer::Deserialize( Reader, Object ) ); check( Object.IsValid() ); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create( &OutputString ); check( FJsonxSerializer::Serialize( Object.ToSharedRef(), Writer ) ); check( InputString == OutputString ); } // Empty Array Case { const FString InputString = TEXT("[]"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); TArray< TSharedPtr > Array; check( FJsonxSerializer::Deserialize( Reader, Array ) ); check( Array.Num() == 0 ); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create( &OutputString ); check( FJsonxSerializer::Serialize( Array, Writer ) ); check( InputString == OutputString ); } // Simple Array Case { const FString InputString = TEXT("[") TEXT( "{") TEXT( "\"Value\":\"Some String\"") TEXT( "}") TEXT("]"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); TArray< TSharedPtr > Array; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Array); check(bSuccessful); check( Array.Num() == 1 ); check( Array[0].IsValid() ); TSharedPtr< FJsonxObject > Object = Array[0]->AsObject(); check( Object.IsValid() ); check( Object->GetStringField( TEXT("Value") ) == TEXT("Some String") ); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create( &OutputString ); check( FJsonxSerializer::Serialize( Array, Writer ) ); check( InputString == OutputString ); } // Object Array Case { const FString InputString = TEXT("[") TEXT( "{") TEXT( "\"Value\":\"Some String1\"") TEXT( "},") TEXT( "{") TEXT( "\"Value\":\"Some String2\"") TEXT( "},") TEXT( "{") TEXT( "\"Value\":\"Some String3\"") TEXT( "}") TEXT("]"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create(InputString); TArray< TSharedPtr > Array; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Array); check(bSuccessful); check(Array.Num() == 3); check(Array[0].IsValid()); check(Array[1].IsValid()); check(Array[2].IsValid()); TSharedPtr< FJsonxObject > Object = Array[0]->AsObject(); check(Object.IsValid()); check(Object->GetStringField(TEXT("Value")) == TEXT("Some String1")); Object = Array[1]->AsObject(); check(Object.IsValid()); check(Object->GetStringField(TEXT("Value")) == TEXT("Some String2")); Object = Array[2]->AsObject(); check(Object.IsValid()); check(Object->GetStringField(TEXT("Value")) == TEXT("Some String3")); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create(&OutputString); check(FJsonxSerializer::Serialize(Array, Writer)); check(InputString == OutputString); } // Number Array Case { const FString InputString = TEXT("[") TEXT("10,") TEXT("20,") TEXT("30,") TEXT("40") TEXT("]"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create(InputString); TArray< TSharedPtr > Array; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Array); check(bSuccessful); check(Array.Num() == 4); check(Array[0].IsValid()); check(Array[1].IsValid()); check(Array[2].IsValid()); check(Array[3].IsValid()); double Number = Array[0]->AsNumber(); check(Number == 10); Number = Array[1]->AsNumber(); check(Number == 20); Number = Array[2]->AsNumber(); check(Number == 30); Number = Array[3]->AsNumber(); check(Number == 40); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create(&OutputString); check(FJsonxSerializer::Serialize(Array, Writer)); check(InputString == OutputString); } // String Array Case { const FString InputString = TEXT("[") TEXT("\"Some String1\",") TEXT("\"Some String2\",") TEXT("\"Some String3\",") TEXT("\"Some String4\"") TEXT("]"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create(InputString); TArray< TSharedPtr > Array; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Array); check(bSuccessful); check(Array.Num() == 4); check(Array[0].IsValid()); check(Array[1].IsValid()); check(Array[2].IsValid()); check(Array[3].IsValid()); FString Text = Array[0]->AsString(); check(Text == TEXT("Some String1")); Text = Array[1]->AsString(); check(Text == TEXT("Some String2")); Text = Array[2]->AsString(); check(Text == TEXT("Some String3")); Text = Array[3]->AsString(); check(Text == TEXT("Some String4")); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create(&OutputString); check(FJsonxSerializer::Serialize(Array, Writer)); check(InputString == OutputString); } // Complex Array Case { const FString InputString = TEXT("[") TEXT( "\"Some String1\",") TEXT( "10,") TEXT( "{") TEXT( "\"Value\":\"Some String3\"") TEXT( "},") TEXT( "[") TEXT( "\"Some String4\",") TEXT( "\"Some String5\"") TEXT( "],") TEXT( "true,") TEXT( "null") TEXT("]"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create(InputString); TArray< TSharedPtr > Array; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Array); check(bSuccessful); check(Array.Num() == 6); check(Array[0].IsValid()); check(Array[1].IsValid()); check(Array[2].IsValid()); check(Array[3].IsValid()); check(Array[4].IsValid()); check(Array[5].IsValid()); FString Text = Array[0]->AsString(); check(Text == TEXT("Some String1")); double Number = Array[1]->AsNumber(); check(Number == 10); TSharedPtr< FJsonxObject > Object = Array[2]->AsObject(); check(Object.IsValid()); check(Object->GetStringField(TEXT("Value")) == TEXT("Some String3")); const TArray>& InnerArray = Array[3]->AsArray(); check(InnerArray.Num() == 2); check(Array[0].IsValid()); check(Array[1].IsValid()); Text = InnerArray[0]->AsString(); check(Text == TEXT("Some String4")); Text = InnerArray[1]->AsString(); check(Text == TEXT("Some String5")); bool Boolean = Array[4]->AsBool(); check(Boolean == true); check(Array[5]->IsNull() == true); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create(&OutputString); check(FJsonxSerializer::Serialize(Array, Writer)); check(InputString == OutputString); } // String Test { const FString InputString = TEXT("{") TEXT( "\"Value\":\"Some String, Escape Chars: \\\\, \\\", \\/, \\b, \\f, \\n, \\r, \\t, \\u002B\"") TEXT("}"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); TSharedPtr Object; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Object); check(bSuccessful); check( Object.IsValid() ); const TSharedPtr* Value = Object->Values.Find(TEXT("Value")); check(Value && (*Value)->Type == EJsonx::String); const FString String = (*Value)->AsString(); check(String == TEXT("Some String, Escape Chars: \\, \", /, \b, \f, \n, \r, \t, +")); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create( &OutputString ); check( FJsonxSerializer::Serialize( Object.ToSharedRef(), Writer ) ); const FString TestOutput = TEXT("{") TEXT( "\"Value\":\"Some String, Escape Chars: \\\\, \\\", /, \\b, \\f, \\n, \\r, \\t, +\"") TEXT("}"); check(OutputString == TestOutput); } //// Number Test //{ // const FString InputString = // TEXT("{") // TEXT( "\"Value1\":2.544e+15,") // TEXT( "\"Value2\":-0.544E-2,") // TEXT( "\"Value3\":251e3,") // TEXT( "\"Value4\":-0.0,") // TEXT( "\"Value5\":843") // TEXT("}"); // TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); // TSharedPtr Object; // bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Object); // check(bSuccessful); // check( Object.IsValid() ); // double TestValues[] = {2.544e+15, -0.544e-2, 251e3, -0.0, 843}; // for (int32 i = 0; i < 5; ++i) // { // const TSharedPtr* Value = Object->Values.Find(FString::Printf(TEXT("Value%i"), i + 1)); // check(Value && (*Value)->Type == EJsonx::Number); // const double Number = (*Value)->AsNumber(); // check(Number == TestValues[i]); // } // FString OutputString; // TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create( &OutputString ); // check( FJsonxSerializer::Serialize( Object.ToSharedRef(), Writer ) ); // // %g isn't standardized, so we use the same %g format that is used inside PrintJsonx instead of hardcoding the values here // const FString TestOutput = FString::Printf( // TEXT("{") // TEXT( "\"Value1\":%.17g,") // TEXT( "\"Value2\":%.17g,") // TEXT( "\"Value3\":%.17g,") // TEXT( "\"Value4\":%.17g,") // TEXT( "\"Value5\":%.17g") // TEXT("}"), // TestValues[0], TestValues[1], TestValues[2], TestValues[3], TestValues[4]); // check(OutputString == TestOutput); //} // Boolean/Null Test { const FString InputString = TEXT("{") TEXT( "\"Value1\":true,") TEXT( "\"Value2\":true,") TEXT( "\"Value3\":faLsE,") TEXT( "\"Value4\":null,") TEXT( "\"Value5\":NULL") TEXT("}"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); TSharedPtr Object; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Object); check(bSuccessful); check( Object.IsValid() ); bool TestValues[] = {true, true, false}; for (int32 i = 0; i < 5; ++i) { const TSharedPtr* Value = Object->Values.Find(FString::Printf(TEXT("Value%i"), i + 1)); check(Value); if (i < 3) { check((*Value)->Type == EJsonx::Boolean); const bool Bool = (*Value)->AsBool(); check(Bool == TestValues[i]); } else { check((*Value)->Type == EJsonx::Null); check((*Value)->IsNull()); } } FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create( &OutputString ); check( FJsonxSerializer::Serialize( Object.ToSharedRef(), Writer ) ); const FString TestOutput = TEXT("{") TEXT( "\"Value1\":true,") TEXT( "\"Value2\":true,") TEXT( "\"Value3\":false,") TEXT( "\"Value4\":null,") TEXT( "\"Value5\":null") TEXT("}"); check(OutputString == TestOutput); } // Object Test && extra whitespace test { const FString InputStringWithExtraWhitespace = TEXT(" \n\r\n {") TEXT( "\"Object\":") TEXT( "{") TEXT( "\"NestedValue\":null,") TEXT( "\"NestedObject\":{}") TEXT( "},") TEXT( "\"Value\":true") TEXT("} \n\r\n "); const FString InputString = TEXT("{") TEXT( "\"Object\":") TEXT( "{") TEXT( "\"NestedValue\":null,") TEXT( "\"NestedObject\":{}") TEXT( "},") TEXT( "\"Value\":true") TEXT("}"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputStringWithExtraWhitespace ); TSharedPtr Object; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Object); check(bSuccessful); check( Object.IsValid() ); const TSharedPtr* InnerValueFail = Object->Values.Find(TEXT("InnerValue")); check(!InnerValueFail); const TSharedPtr* ObjectValue = Object->Values.Find(TEXT("Object")); check(ObjectValue && (*ObjectValue)->Type == EJsonx::Object); const TSharedPtr InnerObject = (*ObjectValue)->AsObject(); check(InnerObject.IsValid()); { const TSharedPtr* NestedValueValue = InnerObject->Values.Find(TEXT("NestedValue")); check(NestedValueValue && (*NestedValueValue)->Type == EJsonx::Null); check((*NestedValueValue)->IsNull()); const TSharedPtr* NestedObjectValue = InnerObject->Values.Find(TEXT("NestedObject")); check(NestedObjectValue && (*NestedObjectValue)->Type == EJsonx::Object); const TSharedPtr InnerInnerObject = (*NestedObjectValue)->AsObject(); check(InnerInnerObject.IsValid()); { const TSharedPtr* NestedValueValueFail = InnerInnerObject->Values.Find(TEXT("NestedValue")); check(!NestedValueValueFail); } } const TSharedPtr* ValueValue = Object->Values.Find(TEXT("Value")); check(ValueValue && (*ValueValue)->Type == EJsonx::Boolean); const bool Bool = (*ValueValue)->AsBool(); check(Bool); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create( &OutputString ); check( FJsonxSerializer::Serialize( Object.ToSharedRef(), Writer ) ); check(OutputString == InputString); } // Array Test { const FString InputString = TEXT("{") TEXT( "\"Array\":") TEXT( "[") TEXT( "[],") TEXT( "\"Some String\",") TEXT( "\"Another String\",") TEXT( "null,") TEXT( "true,") TEXT( "false,") TEXT( "45,") TEXT( "{}") TEXT( "]") TEXT("}"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); TSharedPtr Object; bool bSuccessful = FJsonxSerializer::Deserialize(Reader, Object); check(bSuccessful); check( Object.IsValid() ); const TSharedPtr* InnerValueFail = Object->Values.Find(TEXT("InnerValue")); check(!InnerValueFail); const TSharedPtr* ArrayValue = Object->Values.Find(TEXT("Array")); check(ArrayValue && (*ArrayValue)->Type == EJsonx::Array); const TArray< TSharedPtr > Array = (*ArrayValue)->AsArray(); check(Array.Num() == 8); EJsonx ValueTypes[] = {EJsonx::Array, EJsonx::String, EJsonx::String, EJsonx::Null, EJsonx::Boolean, EJsonx::Boolean, EJsonx::Number, EJsonx::Object}; for (int32 i = 0; i < Array.Num(); ++i) { const TSharedPtr& Value = Array[i]; check(Value.IsValid()); check(Value->Type == ValueTypes[i]); } const TArray< TSharedPtr >& InnerArray = Array[0]->AsArray(); check(InnerArray.Num() == 0); check(Array[1]->AsString() == TEXT("Some String")); check(Array[2]->AsString() == TEXT("Another String")); check(Array[3]->IsNull()); check(Array[4]->AsBool()); check(!Array[5]->AsBool()); check(FMath::Abs(Array[6]->AsNumber() - 45.f) < KINDA_SMALL_NUMBER); const TSharedPtr InnerObject = Array[7]->AsObject(); check(InnerObject.IsValid()); FString OutputString; TSharedRef< FCondensedJsonxStringWriter > Writer = FCondensedJsonxStringWriterFactory::Create( &OutputString ); check( FJsonxSerializer::Serialize( Object.ToSharedRef(), Writer ) ); check(OutputString == InputString); } // Pretty Print Test { const FString InputString = TEXT("{") LINE_TERMINATOR TEXT(" \"Data1\": \"value\",") LINE_TERMINATOR TEXT(" \"Data2\": \"value\",") LINE_TERMINATOR TEXT(" \"Array\": [") LINE_TERMINATOR TEXT(" {") LINE_TERMINATOR TEXT(" \"InnerData1\": \"value\"") LINE_TERMINATOR TEXT(" },") LINE_TERMINATOR TEXT(" [],") LINE_TERMINATOR TEXT(" [ 1, 2, 3, 4 ],") LINE_TERMINATOR TEXT(" {") LINE_TERMINATOR TEXT(" },") LINE_TERMINATOR TEXT(" \"value\",") LINE_TERMINATOR TEXT(" \"value\"") LINE_TERMINATOR TEXT(" ],") LINE_TERMINATOR TEXT(" \"Object\":") LINE_TERMINATOR TEXT(" {") LINE_TERMINATOR TEXT(" }") LINE_TERMINATOR TEXT("}"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); TSharedPtr Object; check( FJsonxSerializer::Deserialize( Reader, Object ) ); check( Object.IsValid() ); FString OutputString; TSharedRef< FPrettyJsonxStringWriter > Writer = FPrettyJsonxStringWriterFactory::Create( &OutputString ); check( FJsonxSerializer::Serialize( Object.ToSharedRef(), Writer ) ); check(OutputString == InputString); } // Line and Character # test { const FString InputString = TEXT("{") LINE_TERMINATOR TEXT(" \"Data1\": \"value\",") LINE_TERMINATOR TEXT(" \"Array\":") LINE_TERMINATOR TEXT(" [") LINE_TERMINATOR TEXT(" 12345,") LINE_TERMINATOR TEXT(" True") LINE_TERMINATOR TEXT(" ],") LINE_TERMINATOR TEXT(" \"Object\":") LINE_TERMINATOR TEXT(" {") LINE_TERMINATOR TEXT(" }") LINE_TERMINATOR TEXT("}"); TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( InputString ); EJsonxNotation Notation = EJsonxNotation::Null; check( Reader->ReadNext( Notation ) && Notation == EJsonxNotation::ObjectStart ); check( Reader->GetLineNumber() == 1 && Reader->GetCharacterNumber() == 1 ); check( Reader->ReadNext( Notation ) && Notation == EJsonxNotation::String ); check( Reader->GetLineNumber() == 2 && Reader->GetCharacterNumber() == 17 ); check( Reader->ReadNext( Notation ) && Notation == EJsonxNotation::ArrayStart ); check( Reader->GetLineNumber() == 4 && Reader->GetCharacterNumber() == 2 ); check( Reader->ReadNext( Notation ) && Notation == EJsonxNotation::Number ); check( Reader->GetLineNumber() == 5 && Reader->GetCharacterNumber() == 7 ); check( Reader->ReadNext( Notation ) && Notation == EJsonxNotation::Boolean ); check( Reader->GetLineNumber() == 6 && Reader->GetCharacterNumber() == 6 ); } // Failure Cases TArray FailureInputs; // Unclosed Object FailureInputs.Add( TEXT("{")); // Values in Object without identifiers FailureInputs.Add( TEXT("{") TEXT( "\"Value1\",") TEXT( "\"Value2\",") TEXT( "43") TEXT("}")); // Unexpected End Of Input Found FailureInputs.Add( TEXT("{") TEXT( "\"Object\":") TEXT( "{") TEXT( "\"NestedValue\":null,")); // Missing first brace FailureInputs.Add( TEXT( "\"Object\":") TEXT( "{") TEXT( "\"NestedValue\":null,") TEXT( "\"NestedObject\":{}") TEXT( "},") TEXT( "\"Value\":true") TEXT("}")); // Missing last character FailureInputs.Add( TEXT("{") TEXT( "\"Object\":") TEXT( "{") TEXT( "\"NestedValue\":null,") TEXT( "\"NestedObject\":{}") TEXT( "},") TEXT( "\"Value\":true")); // Missing curly brace FailureInputs.Add(TEXT("}")); // Missing bracket FailureInputs.Add(TEXT("]")); // Extra last character FailureInputs.Add( TEXT("{") TEXT( "\"Object\":") TEXT( "{") TEXT( "\"NestedValue\":null,") TEXT( "\"NestedObject\":{}") TEXT( "},") TEXT( "\"Value\":true") TEXT("}0")); // Missing comma FailureInputs.Add( TEXT("{") TEXT( "\"Value1\":null,") TEXT( "\"Value2\":\"string\"") TEXT( "\"Value3\":65.3") TEXT("}")); // Extra comma FailureInputs.Add( TEXT("{") TEXT( "\"Value1\":null,") TEXT( "\"Value2\":\"string\",") TEXT( "\"Value3\":65.3,") TEXT("}")); // Badly formed true/false/null FailureInputs.Add(TEXT("{\"Value\":tru}")); FailureInputs.Add(TEXT("{\"Value\":full}")); FailureInputs.Add(TEXT("{\"Value\":nulle}")); FailureInputs.Add(TEXT("{\"Value\":n%ll}")); // Floating Point Failures FailureInputs.Add(TEXT("{\"Value\":65.3e}")); FailureInputs.Add(TEXT("{\"Value\":65.}")); FailureInputs.Add(TEXT("{\"Value\":.7}")); FailureInputs.Add(TEXT("{\"Value\":+6}")); FailureInputs.Add(TEXT("{\"Value\":01}")); FailureInputs.Add(TEXT("{\"Value\":00.56}")); FailureInputs.Add(TEXT("{\"Value\":-1.e+4}")); FailureInputs.Add(TEXT("{\"Value\":2e+}")); // Bad Escape Characters FailureInputs.Add(TEXT("{\"Value\":\"Hello\\xThere\"}")); FailureInputs.Add(TEXT("{\"Value\":\"Hello\\u123There\"}")); FailureInputs.Add(TEXT("{\"Value\":\"Hello\\RThere\"}")); for (int32 i = 0; i < FailureInputs.Num(); ++i) { TSharedRef< TJsonxReader<> > Reader = TJsonxReaderFactory<>::Create( FailureInputs[i] ); TSharedPtr Object; check( FJsonxSerializer::Deserialize( Reader, Object ) == false ); check( !Object.IsValid() ); } // TryGetNumber tests { auto JsonxNumberToInt64 = [](double Val, int64& OutVal) -> bool { FJsonxValueNumber JsonxVal(Val); return ((FJsonxValue&)JsonxVal).TryGetNumber(OutVal); }; auto JsonxNumberToInt32 = [](double Val, int32& OutVal) -> bool { FJsonxValueNumber JsonxVal(Val); return ((FJsonxValue&)JsonxVal).TryGetNumber(OutVal); }; auto JsonxNumberToUInt32 = [](double Val, uint32& OutVal) -> bool { FJsonxValueNumber JsonxVal(Val); return ((FJsonxValue&)JsonxVal).TryGetNumber(OutVal); }; // TryGetNumber-Int64 tests { int64 IntVal; bool bOk = JsonxNumberToInt64(9007199254740991.0, IntVal); TestTrue(TEXT("TryGetNumber-Int64 Big Float64 succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int64 Big Float64"), IntVal, 9007199254740991LL); } { int64 IntVal; bool bOk = JsonxNumberToInt64(-9007199254740991.0, IntVal); TestTrue(TEXT("TryGetNumber-Int64 Small Float64 succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int64 Small Float64"), IntVal, -9007199254740991LL); } { int64 IntVal; bool bOk = JsonxNumberToInt64(0.4999999999999997, IntVal); TestTrue(TEXT("TryGetNumber-Int64 Lesser than near half succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int64 Lesser than near half rounds to zero"), IntVal, 0LL); } { int64 IntVal; bool bOk = JsonxNumberToInt64(-0.4999999999999997, IntVal); TestTrue(TEXT("TryGetNumber-Int64 Greater than near negative half succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int64 Greater than near negative half rounds to zero"), IntVal, 0LL); } { int64 IntVal; bool bOk = JsonxNumberToInt64(0.5, IntVal); TestTrue(TEXT("TryGetNumber-Int64 Half rounds to next integer succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int64 Half rounds to next integer"), IntVal, 1LL); } { int64 IntVal; bool bOk = JsonxNumberToInt64(-0.5, IntVal); TestTrue(TEXT("TryGetNumber-Int64 Negative half rounds to next negative integer succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int64 Negative half rounds to next negative integer succeeds"), IntVal, -1LL); } // TryGetNumber-Int32 tests { int32 IntVal; bool bOk = JsonxNumberToInt32(2147483647.000001, IntVal); TestFalse(TEXT("TryGetNumber-Int32 Number greater than max Int32 fails"), bOk); } { int32 IntVal; bool bOk = JsonxNumberToInt32(-2147483648.000001, IntVal); TestFalse(TEXT("TryGetNumber-Int32 Number lesser than min Int32 fails"), bOk); } { int32 IntVal; bool bOk = JsonxNumberToInt32(2147483647.0, IntVal); TestTrue(TEXT("TryGetNumber-Int32 Max Int32 succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int32 Max Int32"), IntVal, INT_MAX); } { int32 IntVal; bool bOk = JsonxNumberToInt32(2147483646.5, IntVal); TestTrue(TEXT("TryGetNumber-Int32 Round up to max Int32 succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int32 Round up to max Int32"), IntVal, INT_MAX); } { int32 IntVal; bool bOk = JsonxNumberToInt32(-2147483648.0, IntVal); TestTrue(TEXT("TryGetNumber-Int32 Min Int32 succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int32 Min Int32"), IntVal, INT_MIN); } { int32 IntVal; bool bOk = JsonxNumberToInt32(-2147483647.5, IntVal); TestTrue(TEXT("TryGetNumber-Int32 Round down to min Int32 succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int32 Round down to min Int32"), IntVal, INT_MIN); } { int32 IntVal; bool bOk = JsonxNumberToInt32(0.4999999999999997, IntVal); TestTrue(TEXT("TryGetNumber-Int32 Lesser than near half succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int32 Lesser than near half rounds to zero"), IntVal, 0); } { int32 IntVal; bool bOk = JsonxNumberToInt32(-0.4999999999999997, IntVal); TestTrue(TEXT("TryGetNumber-Int32 Greater than near negative half succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int32 Greater than near negative half rounds to zero"), IntVal, 0); } { int32 IntVal; bool bOk = JsonxNumberToInt32(0.5, IntVal); TestTrue(TEXT("TryGetNumber-Int32 Half rounds to next integer succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int32 Half rounds to next integer"), IntVal, 1); } { int32 IntVal; bool bOk = JsonxNumberToInt32(-0.5, IntVal); TestTrue(TEXT("TryGetNumber-Int32 Negative half rounds to next negative integer succeeds"), bOk); TestEqual(TEXT("TryGetNumber-Int32 Negative half rounds to next negative integer succeeds"), IntVal, -1); } // TryGetNumber-UInt32 tests { uint32 IntVal; bool bOk = JsonxNumberToUInt32(4294967295.000001, IntVal); TestFalse(TEXT("TryGetNumber-UInt32 Number greater than max Uint32 fails"), bOk); } { uint32 IntVal; bool bOk = JsonxNumberToUInt32(-0.000000000000001, IntVal); TestFalse(TEXT("TryGetNumber-UInt32 Negative number fails"), bOk); } { uint32 IntVal; bool bOk = JsonxNumberToUInt32(4294967295.0, IntVal); TestTrue(TEXT("TryGetNumber-UInt32 Max UInt32 succeeds"), bOk); TestEqual(TEXT("TryGetNumber-UInt32 Max UInt32"), IntVal, UINT_MAX); } { uint32 IntVal; bool bOk = JsonxNumberToUInt32(4294967294.5, IntVal); TestTrue(TEXT("TryGetNumber-UInt32 Round up to max UInt32 succeeds"), bOk); TestEqual(TEXT("TryGetNumber-UInt32 Round up to max UInt32"), IntVal, UINT_MAX); } { uint32 IntVal; bool bOk = JsonxNumberToUInt32(0.4999999999999997, IntVal); TestTrue(TEXT("TryGetNumber-UInt32 Lesser than near half succeeds"), bOk); TestEqual(TEXT("TryGetNumber-UInt32 Lesser than near half rounds to zero"), IntVal, 0U); } { uint32 IntVal; bool bOk = JsonxNumberToUInt32(0.5, IntVal); TestTrue(TEXT("TryGetNumber-UInt32 Half rounds to next integer succeeds"), bOk); TestEqual(TEXT("TryGetNumber-UInt32 Half rounds to next integer"), IntVal, 1U); } } return true; } #endif //WITH_DEV_AUTOMATION_TESTS ================================================ FILE: Source/Jsonx/Public/Dom/JsonxObject.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "JsonxGlobals.h" #include "Serialization/JsonxTypes.h" #include "Dom/JsonxValue.h" /** * A Jsonx Object is a structure holding an unordered set of name/value pairs. * In a Jsonx file, it is represented by everything between curly braces {}. */ class JSONX_API FJsonxObject { public: TMap> Values; template TSharedPtr GetField( const FString& FieldName ) const { const TSharedPtr* Field = Values.Find(FieldName); if ( Field != nullptr && Field->IsValid() ) { if (JsonxType == EJsonx::None || (*Field)->Type == JsonxType) { return (*Field); } else { UE_LOG(LogJsonx, Warning, TEXT("Field %s is of the wrong type."), *FieldName); } } else { UE_LOG(LogJsonx, Warning, TEXT("Field %s was not found."), *FieldName); } return MakeShared(); } /** * Attempts to get the field with the specified name. * * @param FieldName The name of the field to get. * @return A pointer to the field, or nullptr if the field doesn't exist. */ TSharedPtr TryGetField( const FString& FieldName ) const { const TSharedPtr* Field = Values.Find(FieldName); return (Field != nullptr && Field->IsValid()) ? *Field : TSharedPtr(); } /** * Checks whether a field with the specified name exists in the object. * * @param FieldName The name of the field to check. * @return true if the field exists, false otherwise. */ bool HasField( const FString& FieldName) const { const TSharedPtr* Field = Values.Find(FieldName); if(Field && Field->IsValid()) { return true; } return false; } /** * Checks whether a field with the specified name and type exists in the object. * * @param JsonxType The type of the field to check. * @param FieldName The name of the field to check. * @return true if the field exists, false otherwise. */ template bool HasTypedField(const FString& FieldName) const { const TSharedPtr* Field = Values.Find(FieldName); if(Field && Field->IsValid() && ((*Field)->Type == JsonxType)) { return true; } return false; } /** * Sets the value of the field with the specified name. * * @param FieldName The name of the field to set. * @param Value The value to set. */ void SetField( const FString& FieldName, const TSharedPtr& Value ); /** * Removes the field with the specified name. * * @param FieldName The name of the field to remove. */ void RemoveField(const FString& FieldName); /** * Gets the field with the specified name as a number. * * Ensures that the field is present and is of type Jsonx number. * * @param FieldName The name of the field to get. * @return The field's value as a number. */ double GetNumberField(const FString& FieldName) const; /** * Gets a numeric field and casts to an int32 */ FORCEINLINE int32 GetIntegerField(const FString& FieldName) const { return (int32)GetNumberField(FieldName); } /** Get the field named FieldName as a number. Returns false if it doesn't exist or cannot be converted. */ bool TryGetNumberField(const FString& FieldName, double& OutNumber) const; /** Get the field named FieldName as a number, and makes sure it's within int32 range. Returns false if it doesn't exist or cannot be converted. */ bool TryGetNumberField(const FString& FieldName, int32& OutNumber) const; /** Get the field named FieldName as a number, and makes sure it's within uint32 range. Returns false if it doesn't exist or cannot be converted. */ bool TryGetNumberField(const FString& FieldName, uint32& OutNumber) const; /** Get the field named FieldName as a number. Returns false if it doesn't exist or cannot be converted. */ bool TryGetNumberField(const FString& FieldName, int64& OutNumber) const; /** Add a field named FieldName with Number as value */ void SetNumberField( const FString& FieldName, double Number ); /** Get the field named FieldName as a string. */ FString GetStringField(const FString& FieldName) const; /** Get the field named FieldName as a string. Returns false if it doesn't exist or cannot be converted. */ bool TryGetStringField(const FString& FieldName, FString& OutString) const; /** Get the field named FieldName as an array of strings. Returns false if it doesn't exist or any member cannot be converted. */ bool TryGetStringArrayField(const FString& FieldName, TArray& OutArray) const; /** Get the field named FieldName as an array of enums. Returns false if it doesn't exist or any member is not a string. */ template bool TryGetEnumArrayField(const FString& FieldName, TArray& OutArray) const { TArray Strings; if (!TryGetStringArrayField(FieldName, Strings)) { return false; } OutArray.Empty(); for (const FString& String : Strings) { TEnum Value; if (LexTryParseString(Value, *String)) { OutArray.Add(Value); } } return true; } /** Add a field named FieldName with value of StringValue */ void SetStringField( const FString& FieldName, const FString& StringValue ); /** * Gets the field with the specified name as a boolean. * * Ensures that the field is present and is of type Jsonx number. * * @param FieldName The name of the field to get. * @return The field's value as a boolean. */ bool GetBoolField(const FString& FieldName) const; /** Get the field named FieldName as a string. Returns false if it doesn't exist or cannot be converted. */ bool TryGetBoolField(const FString& FieldName, bool& OutBool) const; /** Set a boolean field named FieldName and value of InValue */ void SetBoolField( const FString& FieldName, bool InValue ); /** Get the field named FieldName as an array. */ const TArray< TSharedPtr >& GetArrayField(const FString& FieldName) const; /** Try to get the field named FieldName as an array, or return false if it's another type */ bool TryGetArrayField(const FString& FieldName, const TArray< TSharedPtr >*& OutArray) const; /** Set an array field named FieldName and value of Array */ void SetArrayField( const FString& FieldName, const TArray< TSharedPtr >& Array ); /** * Gets the field with the specified name as a Jsonx object. * * Ensures that the field is present and is of type Jsonx object. * * @param FieldName The name of the field to get. * @return The field's value as a Jsonx object. */ const TSharedPtr& GetObjectField(const FString& FieldName) const; /** Try to get the field named FieldName as an object, or return false if it's another type */ bool TryGetObjectField(const FString& FieldName, const TSharedPtr*& OutObject) const; /** Set an ObjectField named FieldName and value of JsonxObject */ void SetObjectField( const FString& FieldName, const TSharedPtr& JsonxObject ); }; ================================================ FILE: Source/Jsonx/Public/Dom/JsonxValue.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "Serialization/JsonxTypes.h" class FJsonxObject; /** * A Jsonx Value is a structure that can be any of the Jsonx Types. * It should never be used on its, only its derived types should be used. */ class JSONX_API FJsonxValue { public: /** Returns this value as a double, logging an error and returning zero if this is not an Jsonx Number */ double AsNumber() const; /** Returns this value as a string, logging an error and returning an empty string if not possible */ FString AsString() const; /** Returns this value as a boolean, logging an error and returning false if not possible */ bool AsBool() const; /** Returns this value as an array, logging an error and returning an empty array reference if not possible */ const TArray< TSharedPtr >& AsArray() const; /** Returns this value as an object, throwing an error if this is not an Jsonx Object */ virtual const TSharedPtr& AsObject() const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(double& OutNumber) const { return false; } /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(float& OutNumber) const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(int8& OutNumber) const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(int16& OutNumber) const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(int32& OutNumber) const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(int64& OutNumber) const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(uint8& OutNumber) const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(uint16& OutNumber) const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(uint32& OutNumber) const; /** Tries to convert this value to a number, returning false if not possible */ virtual bool TryGetNumber(uint64& OutNumber) const; /** Tries to convert this value to a string, returning false if not possible */ virtual bool TryGetString(FString& OutString) const { return false; } /** Tries to convert this value to a bool, returning false if not possible */ virtual bool TryGetBool(bool& OutBool) const { return false; } /** Tries to convert this value to an array, returning false if not possible */ virtual bool TryGetArray(const TArray< TSharedPtr >*& OutArray) const { return false; } /** Tries to convert this value to an object, returning false if not possible */ virtual bool TryGetObject(const TSharedPtr*& Object) const { return false; } /** Returns true if this value is a 'null' */ bool IsNull() const { return Type == EJsonx::Null || Type == EJsonx::None; } /** Get a field of the same type as the argument */ void AsArgumentType(double & Value) { Value = AsNumber(); } void AsArgumentType(FString & Value) { Value = AsString(); } void AsArgumentType(bool & Value) { Value = AsBool (); } void AsArgumentType(TArray< TSharedPtr >& Value) { Value = AsArray (); } void AsArgumentType(TSharedPtr & Value) { Value = AsObject(); } EJsonx Type; static bool CompareEqual(const FJsonxValue& Lhs, const FJsonxValue& Rhs); protected: FJsonxValue() : Type(EJsonx::None) {} virtual ~FJsonxValue() {} virtual FString GetType() const = 0; void ErrorMessage(const FString& InType) const; }; inline bool operator==(const FJsonxValue& Lhs, const FJsonxValue& Rhs) { return FJsonxValue::CompareEqual(Lhs, Rhs); } inline bool operator!=(const FJsonxValue& Lhs, const FJsonxValue& Rhs) { return !FJsonxValue::CompareEqual(Lhs, Rhs); } /** A Jsonx String Value. */ class JSONX_API FJsonxValueString : public FJsonxValue { public: FJsonxValueString(const FString& InString) : Value(InString) {Type = EJsonx::String;} virtual bool TryGetString(FString& OutString) const override { OutString = Value; return true; } virtual bool TryGetNumber(double& OutDouble) const override { if (Value.IsNumeric()) { OutDouble = FCString::Atod(*Value); return true; } else { return false; } } virtual bool TryGetNumber(int32& OutValue) const override { LexFromString(OutValue, *Value); return true; } virtual bool TryGetNumber(uint32& OutValue) const override { LexFromString(OutValue, *Value); return true; } virtual bool TryGetNumber(int64& OutValue) const override { LexFromString(OutValue, *Value); return true; } virtual bool TryGetNumber(uint64& OutValue) const override { LexFromString(OutValue, *Value); return true; } virtual bool TryGetBool(bool& OutBool) const override { OutBool = Value.ToBool(); return true; } // Way to check if string value is empty without copying the string bool IsEmpty() const { return Value.IsEmpty(); } protected: FString Value; virtual FString GetType() const override {return TEXT("String");} }; /** A Jsonx Number Value. */ class JSONX_API FJsonxValueNumber : public FJsonxValue { public: FJsonxValueNumber(double InNumber) : Value(InNumber) {Type = EJsonx::Number;} virtual bool TryGetNumber(double& OutNumber) const override { OutNumber = Value; return true; } virtual bool TryGetBool(bool& OutBool) const override { OutBool = (Value != 0.0); return true; } virtual bool TryGetString(FString& OutString) const override { OutString = FString::SanitizeFloat(Value, 0); return true; } protected: double Value; virtual FString GetType() const override {return TEXT("Number");} }; /** A Jsonx Number Value, stored internally as a string so as not to lose precision */ class JSONX_API FJsonxValueNumberString : public FJsonxValue { public: FJsonxValueNumberString(const FString& InString) : Value(InString) { Type = EJsonx::Number; } virtual bool TryGetString(FString& OutString) const override { OutString = Value; return true; } virtual bool TryGetNumber(double& OutDouble) const override { return LexTryParseString(OutDouble, *Value); } virtual bool TryGetNumber(float &OutDouble) const override { return LexTryParseString(OutDouble, *Value); } virtual bool TryGetNumber(int8& OutValue) const override { return LexTryParseString(OutValue, *Value); } virtual bool TryGetNumber(int16& OutValue) const override { return LexTryParseString(OutValue, *Value); } virtual bool TryGetNumber(int32& OutValue) const override { return LexTryParseString(OutValue, *Value); } virtual bool TryGetNumber(int64& OutValue) const override { return LexTryParseString(OutValue, *Value); } virtual bool TryGetNumber(uint8& OutValue) const override { return LexTryParseString(OutValue, *Value); } virtual bool TryGetNumber(uint16& OutValue) const override { return LexTryParseString(OutValue, *Value); } virtual bool TryGetNumber(uint32& OutValue) const override { return LexTryParseString(OutValue, *Value); } virtual bool TryGetNumber(uint64& OutValue) const override { return LexTryParseString(OutValue, *Value); } virtual bool TryGetBool(bool& OutBool) const override { OutBool = Value.ToBool(); return true; } protected: FString Value; virtual FString GetType() const override { return TEXT("NumberString"); } }; /** A Jsonx Boolean Value. */ class JSONX_API FJsonxValueBoolean : public FJsonxValue { public: FJsonxValueBoolean(bool InBool) : Value(InBool) {Type = EJsonx::Boolean;} virtual bool TryGetNumber(double& OutNumber) const override { OutNumber = Value ? 1 : 0; return true; } virtual bool TryGetBool(bool& OutBool) const override { OutBool = Value; return true; } virtual bool TryGetString(FString& OutString) const override { OutString = Value ? TEXT("true") : TEXT("false"); return true; } protected: bool Value; virtual FString GetType() const override {return TEXT("Boolean");} }; /** A Jsonx Array Value. */ class JSONX_API FJsonxValueArray : public FJsonxValue { public: FJsonxValueArray(const TArray< TSharedPtr >& InArray) : Value(InArray) {Type = EJsonx::Array;} virtual bool TryGetArray(const TArray< TSharedPtr >*& OutArray) const override { OutArray = &Value; return true; } protected: TArray< TSharedPtr > Value; virtual FString GetType() const override {return TEXT("Array");} }; /** A Jsonx Object Value. */ class JSONX_API FJsonxValueObject : public FJsonxValue { public: FJsonxValueObject(TSharedPtr InObject) : Value(InObject) {Type = EJsonx::Object;} virtual bool TryGetObject(const TSharedPtr*& OutObject) const override { OutObject = &Value; return true; } protected: TSharedPtr Value; virtual FString GetType() const override {return TEXT("Object");} }; /** A Jsonx Null Value. */ class JSONX_API FJsonxValueNull : public FJsonxValue { public: FJsonxValueNull() {Type = EJsonx::Null;} protected: virtual FString GetType() const override {return TEXT("Null");} }; ================================================ FILE: Source/Jsonx/Public/Jsonx.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once /* Boilerplate *****************************************************************************/ #include "Misc/MonolithicHeaderBoilerplate.h" MONOLITHIC_HEADER_BOILERPLATE() /* Public dependencies *****************************************************************************/ #include "Core.h" /* Public includes *****************************************************************************/ #include "JsonxGlobals.h" #include "Policies/JsonxPrintPolicy.h" #include "Policies/PrettyJsonxPrintPolicy.h" #include "Policies/CondensedJsonxPrintPolicy.h" #include "Serialization/JsonxTypes.h" #include "Dom/JsonxValue.h" #include "Dom/JsonxObject.h" #include "Serialization/JsonxReader.h" #include "Serialization/JsonxWriter.h" #include "Serialization/JsonxSerializer.h" #include "Serialization/JsonxSerializerMacros.h" ================================================ FILE: Source/Jsonx/Public/JsonxGlobals.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" JSONX_API DECLARE_LOG_CATEGORY_EXTERN(LogJsonx, Log, All); ================================================ FILE: Source/Jsonx/Public/JsonxUtils/JsonxObjectArrayUpdater.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "Dom/JsonxObject.h" #include "Dom/JsonxValue.h" /** * Utility to update an array of json objects from an array of elements (of arbitrary type). * Elements in the source array and the destination json object array are matched based on an * arbitrary key (provided by the FGetElementKey and FTryGetJsonxObjectKey delegates respectively). * Existing elements get "updated" via the FUpdateJsonxObject delegate. The update scheme is entirely * customizable; for example, it can be non-destructive and leave some json fields unchanged. * Elements from the source array that are not in the json array (based on the "key") are added to it. * Elements that are not present in the source array (based on the "key") are removed from the json array. * If the source array is empty the json array field is removed. */ template struct FJsonxObjectArrayUpdater { DECLARE_DELEGATE_RetVal_OneParam(KeyType, FGetElementKey, const ElementType&); DECLARE_DELEGATE_RetVal_TwoParams(bool, FTryGetJsonxObjectKey, const FJsonxObject&, KeyType& /*OutKey*/); DECLARE_DELEGATE_TwoParams(FUpdateJsonxObject, const ElementType&, FJsonxObject&); static void Execute(FJsonxObject& JsonxObject, const FString& ArrayName, const TArray& SourceArray, FGetElementKey GetElementKey, FTryGetJsonxObjectKey TryGetJsonxObjectKey, FUpdateJsonxObject UpdateJsonxObject) { if (SourceArray.Num() > 0) { TArray> NewJsonxValues; { const TArray>* ExistingJsonxValues; if (JsonxObject.TryGetArrayField(ArrayName, ExistingJsonxValues)) { // Build a map of elements for quick access and to keep track of which ones got updated TMap ElementsMap; for (const ElementType& Element : SourceArray) { ElementsMap.Add(GetElementKey.Execute(Element), &Element); } // Update existing json values and discard entries that no longer exist or are invalid for (TSharedPtr ExistingJsonxValue : *ExistingJsonxValues) { const TSharedPtr* ExistingJsonxValueAsObject; if (ExistingJsonxValue->TryGetObject(ExistingJsonxValueAsObject)) { KeyType ElementKey; if (TryGetJsonxObjectKey.Execute(**ExistingJsonxValueAsObject, ElementKey)) { if (const ElementType** ElementPtr = ElementsMap.Find(ElementKey)) { UpdateJsonxObject.Execute(**ElementPtr, **ExistingJsonxValueAsObject); NewJsonxValues.Add(ExistingJsonxValue); ElementsMap.Remove(ElementKey); } } } } // Add new elements for (auto It = ElementsMap.CreateConstIterator(); It; ++It) { TSharedPtr NewJsonxObject = MakeShareable(new FJsonxObject); UpdateJsonxObject.Execute(*It.Value(), *NewJsonxObject.Get()); NewJsonxValues.Add(MakeShareable(new FJsonxValueObject(NewJsonxObject))); } } else { // Array doesn't exist in the given JsonxObject, so build a new array for (const ElementType& Element : SourceArray) { TSharedPtr NewJsonxObject = MakeShareable(new FJsonxObject); UpdateJsonxObject.Execute(Element, *NewJsonxObject.Get()); NewJsonxValues.Add(MakeShareable(new FJsonxValueObject(NewJsonxObject))); } } } // Set the new content of the json array JsonxObject.SetArrayField(ArrayName, NewJsonxValues); } else { // Source array is empty so remove the json array JsonxObject.RemoveField(ArrayName); } } }; ================================================ FILE: Source/Jsonx/Public/Policies/CondensedJsonxPrintPolicy.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "Policies/JsonxPrintPolicy.h" /** * Template for print policies that generate compressed output. * * @param CharType The type of characters to print, i.e. TCHAR or ANSICHAR. */ template struct TCondensedJsonxPrintPolicy : public TJsonxPrintPolicy { static inline void WriteLineTerminator(FArchive* Stream) {} static inline void WriteTabs(FArchive* Stream, int32 Count) {} static inline void WriteSpace(FArchive* Stream) {} }; ================================================ FILE: Source/Jsonx/Public/Policies/JsonxPrintPolicy.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" /** * Base template for Jsonx print policies. * * @param CharType The type of characters to print, i.e. TCHAR or ANSICHAR. */ template struct TJsonxPrintPolicy { /** * Writes a single character to the output stream. * * @param Stream The stream to write to. * @param Char The character to write. */ static inline void WriteChar( FArchive* Stream, CharType Char ) { Stream->Serialize(&Char, sizeof(CharType)); } /** * Writes a string to the output stream. * * @param Stream The stream to write to. * @param String The string to write. */ static inline void WriteString( FArchive* Stream, const FString& String ) { const TCHAR* CharPtr = *String; for (int32 CharIndex = 0; CharIndex < String.Len(); ++CharIndex, ++CharPtr) { WriteChar(Stream, *CharPtr); } } }; /** * Specialization for TCHAR that allows direct copying from FString data. */ template <> inline void TJsonxPrintPolicy::WriteString( FArchive* Stream, const FString& String ) { Stream->Serialize((void*)*String, String.Len() * sizeof(TCHAR)); } #if !PLATFORM_TCHAR_IS_CHAR16 /** * Specialization for UTF16CHAR that writes FString data UTF-16. */ template <> inline void TJsonxPrintPolicy::WriteString(FArchive* Stream, const FString& String) { // Note: This is a no-op on platforms that are using a 16-bit TCHAR FTCHARToUTF16 UTF16String(*String, String.Len()); Stream->Serialize((void*)UTF16String.Get(), UTF16String.Length() * sizeof(UTF16CHAR)); } #endif ================================================ FILE: Source/Jsonx/Public/Policies/PrettyJsonxPrintPolicy.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "Policies/JsonxPrintPolicy.h" /** * Template for print policies that generate human readable output. * * @param CharType The type of characters to print, i.e. TCHAR or ANSICHAR. */ template struct TPrettyJsonxPrintPolicy : public TJsonxPrintPolicy { static inline void WriteLineTerminator( FArchive* Stream ) { TJsonxPrintPolicy::WriteString(Stream, LINE_TERMINATOR); } static inline void WriteTabs( FArchive* Stream, int32 Count ) { CharType Tab = CharType('\t'); for (int32 i = 0; i < Count; ++i) { TJsonxPrintPolicy::WriteChar(Stream, Tab); } } static inline void WriteSpace( FArchive* Stream ) { TJsonxPrintPolicy::WriteChar(Stream, CharType(' ')); } }; ================================================ FILE: Source/Jsonx/Public/Serialization/JsonxReader.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "Serialization/JsonxTypes.h" #include "Serialization/BufferReader.h" class Error; #define JSONX_NOTATIONMAP_DEF \ static EJsonxNotation TokenToNotationTablex[] = \ { \ EJsonxNotation::Error, /*EJsonxToken::None*/ \ EJsonxNotation::Error, /*EJsonxToken::Comma*/ \ EJsonxNotation::ObjectStart, /*EJsonxToken::CurlyOpen*/ \ EJsonxNotation::ObjectEnd, /*EJsonxToken::CurlyClose*/ \ EJsonxNotation::ArrayStart, /*EJsonxToken::SquareOpen*/ \ EJsonxNotation::ArrayEnd, /*EJsonxToken::SquareClose*/ \ EJsonxNotation::Error, /*EJsonxToken::Colon*/ \ EJsonxNotation::String, /*EJsonxToken::String*/ \ EJsonxNotation::Number, /*EJsonxToken::Number*/ \ EJsonxNotation::Boolean, /*EJsonxToken::True*/ \ EJsonxNotation::Boolean, /*EJsonxToken::False*/ \ EJsonxNotation::Null, /*EJsonxToken::Null*/ \ }; #ifndef WITH_JSONX_INLINED_NOTATIONMAP #define WITH_JSONX_INLINED_NOTATIONMAP 0 #endif // WITH_JSONX_INLINED_NOTATIONMAP #if !WITH_JSONX_INLINED_NOTATIONMAP JSONX_NOTATIONMAP_DEF; #endif // WITH_JSONX_INLINED_NOTATIONMAP template class TJsonxReader { public: static TSharedRef< TJsonxReader > Create( FArchive* const Stream ) { return MakeShareable( new TJsonxReader( Stream ) ); } public: virtual ~TJsonxReader() {} bool ReadNext( EJsonxNotation& Notation ) { if (!ErrorMessage.IsEmpty()) { Notation = EJsonxNotation::Error; return false; } if (Stream == nullptr) { Notation = EJsonxNotation::Error; SetErrorMessage(TEXT("Null Stream")); return true; } const bool AtEndOfStream = Stream->AtEnd(); if (AtEndOfStream && !FinishedReadingRootObject) { Notation = EJsonxNotation::Error; SetErrorMessage(TEXT("Improperly formatted.")); return true; } if (FinishedReadingRootObject && !AtEndOfStream) { Notation = EJsonxNotation::Error; SetErrorMessage(TEXT("Unexpected additional input found.")); return true; } if (AtEndOfStream) { return false; } bool ReadWasSuccess = false; Identifier.Empty(); do { EJsonx CurrentState = EJsonx::None; if (ParseState.Num() > 0) { CurrentState = ParseState.Top(); } switch (CurrentState) { case EJsonx::Array: ReadWasSuccess = ReadNextArrayValue( /*OUT*/ CurrentToken ); break; case EJsonx::Object: ReadWasSuccess = ReadNextObjectValue( /*OUT*/ CurrentToken ); break; default: ReadWasSuccess = ReadStart( /*OUT*/ CurrentToken ); break; } } while (ReadWasSuccess && (CurrentToken == EJsonxToken::None)); #if WITH_JSONX_INLINED_NOTATIONMAP JSONX_NOTATIONMAP_DEF; #endif // WITH_JSONX_INLINED_NOTATIONMAP Notation = TokenToNotationTablex[(int32)CurrentToken]; FinishedReadingRootObject = ParseState.Num() == 0; if (!ReadWasSuccess || (Notation == EJsonxNotation::Error)) { Notation = EJsonxNotation::Error; if (ErrorMessage.IsEmpty()) { SetErrorMessage(TEXT("Unknown Error Occurred")); } return true; } if (FinishedReadingRootObject && !Stream->AtEnd()) { ReadWasSuccess = ParseWhiteSpace(); } return ReadWasSuccess; } bool SkipObject() { return ReadUntilMatching(EJsonxNotation::ObjectEnd); } bool SkipArray() { return ReadUntilMatching(EJsonxNotation::ArrayEnd); } FORCEINLINE virtual const FString& GetIdentifier() const { return Identifier; } FORCEINLINE virtual const FString& GetValueAsString() const { check(CurrentToken == EJsonxToken::String); return StringValue; } FORCEINLINE double GetValueAsNumber() const { check(CurrentToken == EJsonxToken::Number); return NumberValue; } FORCEINLINE const FString& GetValueAsNumberString() const { check(CurrentToken == EJsonxToken::Number); return StringValue; } FORCEINLINE bool GetValueAsBoolean() const { check((CurrentToken == EJsonxToken::True) || (CurrentToken == EJsonxToken::False)); return BoolValue; } FORCEINLINE const FString& GetErrorMessage() const { return ErrorMessage; } FORCEINLINE const uint32 GetLineNumber() const { return LineNumber; } FORCEINLINE const uint32 GetCharacterNumber() const { return CharacterNumber; } protected: /** Hidden default constructor. */ TJsonxReader() : ParseState() , CurrentToken( EJsonxToken::None ) , Stream( nullptr ) , Identifier() , ErrorMessage() , StringValue() , NumberValue( 0.0f ) , LineNumber( 1 ) , CharacterNumber( 0 ) , BoolValue( false ) , FinishedReadingRootObject( false ) { } /** * Creates and initializes a new instance with the given input. * * @param InStream An archive containing the input. */ TJsonxReader(FArchive* InStream) : ParseState() , CurrentToken(EJsonxToken::None) , Stream(InStream) , Identifier() , ErrorMessage() , StringValue() , NumberValue(0.0f) , LineNumber(1) , CharacterNumber(0) , BoolValue(false) , FinishedReadingRootObject(false) { } private: void SetErrorMessage( const FString& Message ) { ErrorMessage = Message + FString::Printf(TEXT(" Line: %u Ch: %u"), LineNumber, CharacterNumber); } bool ReadUntilMatching( const EJsonxNotation ExpectedNotation ) { uint32 ScopeCount = 0; EJsonxNotation Notation; while (ReadNext(Notation)) { if ((ScopeCount == 0) && (Notation == ExpectedNotation)) { return true; } switch (Notation) { case EJsonxNotation::ObjectStart: case EJsonxNotation::ArrayStart: ++ScopeCount; break; case EJsonxNotation::ObjectEnd: case EJsonxNotation::ArrayEnd: --ScopeCount; break; case EJsonxNotation::Boolean: case EJsonxNotation::Null: case EJsonxNotation::Number: case EJsonxNotation::String: break; case EJsonxNotation::Error: return false; break; } } return !Stream->IsError(); } bool ReadStart( EJsonxToken& Token ) { if (!ParseWhiteSpace()) { return false; } Token = EJsonxToken::None; if (NextToken(Token) == false) { return false; } if ((Token != EJsonxToken::CurlyOpen) && (Token != EJsonxToken::SquareOpen)) { SetErrorMessage(TEXT("Open Curly or Square Brace token expected, but not found.")); return false; } return true; } bool ReadNextObjectValue( EJsonxToken& Token ) { const bool bCommaPrepend = Token != EJsonxToken::CurlyOpen; Token = EJsonxToken::None; if (NextToken(Token) == false) { return false; } if (Token == EJsonxToken::CurlyClose) { return true; } else { if (bCommaPrepend) { if (Token != EJsonxToken::Comma) { SetErrorMessage( TEXT("Comma token expected, but not found.") ); return false; } Token = EJsonxToken::None; if (!NextToken(Token)) { return false; } } if (Token != EJsonxToken::String) { SetErrorMessage( TEXT("String token expected, but not found.") ); return false; } Identifier = StringValue; Token = EJsonxToken::None; if (!NextToken(Token)) { return false; } if (Token != EJsonxToken::Colon) { SetErrorMessage( TEXT("Colon token expected, but not found.") ); return false; } Token = EJsonxToken::None; if (!NextToken(Token)) { return false; } } return true; } bool ReadNextArrayValue( EJsonxToken& Token ) { const bool bCommaPrepend = Token != EJsonxToken::SquareOpen; Token = EJsonxToken::None; if (!NextToken(Token)) { return false; } if (Token == EJsonxToken::SquareClose) { return true; } else { if (bCommaPrepend) { if (Token != EJsonxToken::Comma) { SetErrorMessage( TEXT("Comma token expected, but not found.") ); return false; } Token = EJsonxToken::None; if (!NextToken(Token)) { return false; } } } return true; } bool NextToken( EJsonxToken& OutToken ) { while (!Stream->AtEnd()) { CharType Char; if (!Serialize(&Char, sizeof(CharType))) { return false; } ++CharacterNumber; if (Char == CharType('\0')) { break; } if (IsLineBreak(Char)) { ++LineNumber; CharacterNumber = 0; } if (!IsWhitespace(Char)) { if (IsJsonxNumber(Char)) { if (!ParseNumberToken(Char)) { return false; } OutToken = EJsonxToken::Number; return true; } switch (Char) { case CharType('{'): OutToken = EJsonxToken::CurlyOpen; ParseState.Push( EJsonx::Object ); return true; case CharType('}'): { OutToken = EJsonxToken::CurlyClose; if (ParseState.Num()) { ParseState.Pop(); return true; } else { SetErrorMessage(TEXT("Unknown state reached while parsing Jsonx token.")); return false; } } case CharType('['): OutToken = EJsonxToken::SquareOpen; ParseState.Push( EJsonx::Array ); return true; case CharType(']'): { OutToken = EJsonxToken::SquareClose; if (ParseState.Num()) { ParseState.Pop(); return true; } else { SetErrorMessage(TEXT("Unknown state reached while parsing Jsonx token.")); return false; } } case CharType(':'): OutToken = EJsonxToken::Colon; return true; case CharType(','): OutToken = EJsonxToken::Comma; return true; case CharType('\"'): { if (!ParseStringToken()) { return false; } OutToken = EJsonxToken::String; } return true; case CharType('t'): case CharType('T'): case CharType('f'): case CharType('F'): case CharType('n'): case CharType('N'): { FString Test; Test += Char; while (!Stream->AtEnd()) { if (!Serialize(&Char, sizeof(CharType))) { return false; } if (IsAlphaNumber(Char)) { ++CharacterNumber; Test += Char; } else { // backtrack and break Stream->Seek(Stream->Tell() - sizeof(CharType)); break; } } if (Test == TEXT("False")) { BoolValue = false; OutToken = EJsonxToken::False; return true; } if (Test == TEXT("True")) { BoolValue = true; OutToken = EJsonxToken::True; return true; } if (Test == TEXT("Null")) { OutToken = EJsonxToken::Null; return true; } SetErrorMessage( TEXT("Invalid Jsonx Token. Check that your member names have quotes around them!") ); return false; } default: SetErrorMessage( TEXT("Invalid Jsonx Token.") ); return false; } } } SetErrorMessage( TEXT("Invalid Jsonx Token.") ); return false; } bool ParseStringToken() { FString String; while (true) { if (Stream->AtEnd()) { SetErrorMessage( TEXT("String Token Abruptly Ended.") ); return false; } CharType Char; if (!Serialize(&Char, sizeof(CharType))) { return false; } ++CharacterNumber; if (Char == CharType('\"')) { break; } if (Char == CharType('\\')) { if (!Serialize(&Char, sizeof(CharType))) { return false; } ++CharacterNumber; switch (Char) { case CharType('\"'): case CharType('\\'): case CharType('/'): String += Char; break; case CharType('f'): String += CharType('\f'); break; case CharType('r'): String += CharType('\r'); break; case CharType('n'): String += CharType('\n'); break; case CharType('b'): String += CharType('\b'); break; case CharType('t'): String += CharType('\t'); break; case CharType('u'): // 4 hex digits, like \uAB23, which is a 16 bit number that we would usually see as 0xAB23 { int32 HexNum = 0; for (int32 Radix = 3; Radix >= 0; --Radix) { if (Stream->AtEnd()) { SetErrorMessage( TEXT("String Token Abruptly Ended.") ); return false; } if (!Serialize(&Char, sizeof(CharType))) { return false; } ++CharacterNumber; int32 HexDigit = FParse::HexDigit(Char); if ((HexDigit == 0) && (Char != CharType('0'))) { SetErrorMessage( TEXT("Invalid Hexadecimal digit parsed.") ); return false; } //@TODO: FLOATPRECISION: this is gross HexNum += HexDigit * (int32)FMath::Pow(16, (float)Radix); } String += (FString::ElementType)HexNum; } break; default: SetErrorMessage( TEXT("Bad Jsonx escaped char.") ); return false; } } else { String += Char; } } StringValue = MoveTemp(String); // Inline combine any surrogate pairs in the data when loading into a UTF-32 string StringConv::InlineCombineSurrogates(StringValue); return true; } bool ParseNumberToken( CharType FirstChar ) { FString String; int32 State = 0; bool UseFirstChar = true; bool StateError = false; while (true) { if (Stream->AtEnd()) { SetErrorMessage( TEXT("Number Token Abruptly Ended.") ); return false; } CharType Char; if (UseFirstChar) { Char = FirstChar; UseFirstChar = false; } else { if (!Serialize(&Char, sizeof(CharType))) { return false; } ++CharacterNumber; } // The following code doesn't actually derive the Jsonx Number: that is handled // by the function FCString::Atof below. This code only ensures the Jsonx Number is // EXACTLY to specification if (IsJsonxNumber(Char)) { // ensure number follows Jsonx format before converting // This switch statement is derived from a finite state automata // derived from the Jsonx spec. A table was not used for simplicity. switch (State) { case 0: if (Char == CharType('-')) { State = 1; } else if (Char == CharType('0')) { State = 2; } else if (IsNonZeroDigit(Char)) { State = 3; } else { StateError = true; } break; case 1: if (Char == CharType('0')) { State = 2; } else if (IsNonZeroDigit(Char)) { State = 3; } else { StateError = true; } break; case 2: if (Char == CharType('.')) { State = 4; } else if (Char == CharType('e') || Char == CharType('E')) { State = 5; } else { StateError = true; } break; case 3: if (IsDigit(Char)) { State = 3; } else if (Char == CharType('.')) { State = 4; } else if (Char == CharType('e') || Char == CharType('E')) { State = 5; } else { StateError = true; } break; case 4: if (IsDigit(Char)) { State = 6; } else { StateError = true; } break; case 5: if (Char == CharType('-') ||Char == CharType('+')) { State = 7; } else if (IsDigit(Char)) { State = 8; } else { StateError = true; } break; case 6: if (IsDigit(Char)) { State = 6; } else if (Char == CharType('e') || Char == CharType('E')) { State = 5; } else { StateError = true; } break; case 7: if (IsDigit(Char)) { State = 8; } else { StateError = true; } break; case 8: if (IsDigit(Char)) { State = 8; } else { StateError = true; } break; default: SetErrorMessage( TEXT("Unknown state reached in Jsonx Number Token.") ); return false; } if (StateError) { break; } String += Char; } else { // backtrack once because we read a non-number character Stream->Seek(Stream->Tell() - sizeof(CharType)); --CharacterNumber; // and now the number is fully tokenized break; } } // ensure the number has followed valid Jsonx format if (!StateError && ((State == 2) || (State == 3) || (State == 6) || (State == 8))) { StringValue = String; NumberValue = FCString::Atod(*String); return true; } SetErrorMessage( TEXT("Poorly formed Jsonx Number Token.") ); return false; } bool ParseWhiteSpace() { while (!Stream->AtEnd()) { CharType Char; if (!Serialize(&Char, sizeof(CharType))) { return false; } ++CharacterNumber; if (IsLineBreak(Char)) { ++LineNumber; CharacterNumber = 0; } if (!IsWhitespace(Char)) { // backtrack and break Stream->Seek(Stream->Tell() - sizeof(CharType)); --CharacterNumber; break; } } return true; } bool IsLineBreak( const CharType& Char ) { return Char == CharType('\n'); } /** Can't use FChar::IsWhitespace because it is TCHAR specific, and it doesn't handle newlines */ bool IsWhitespace( const CharType& Char ) { return Char == CharType(' ') || Char == CharType('\t') || Char == CharType('\n') || Char == CharType('\r'); } /** Can't use FChar::IsDigit because it is TCHAR specific, and it doesn't handle all the other Jsonx number characters */ bool IsJsonxNumber( const CharType& Char ) { return ((Char >= CharType('0') && Char <= CharType('9')) || Char == CharType('-') || Char == CharType('.') || Char == CharType('+') || Char == CharType('e') || Char == CharType('E')); } /** Can't use FChar::IsDigit because it is TCHAR specific */ bool IsDigit( const CharType& Char ) { return (Char >= CharType('0') && Char <= CharType('9')); } bool IsNonZeroDigit( const CharType& Char ) { return (Char >= CharType('1') && Char <= CharType('9')); } /** Can't use FChar::IsAlpha because it is TCHAR specific. Also, this only checks A through Z (no underscores or other characters). */ bool IsAlphaNumber( const CharType& Char ) { return (Char >= CharType('a') && Char <= CharType('z')) || (Char >= CharType('A') && Char <= CharType('Z')); } protected: bool Serialize(void* V, int64 Length) { Stream->Serialize(V, Length); if (Stream->IsError()) { SetErrorMessage(TEXT("Stream I/O Error")); return false; } return true; } protected: TArray ParseState; EJsonxToken CurrentToken; FArchive* Stream; FString Identifier; FString ErrorMessage; FString StringValue; double NumberValue; uint32 LineNumber; uint32 CharacterNumber; bool BoolValue; bool FinishedReadingRootObject; }; class FJsonxStringReader : public TJsonxReader { public: static TSharedRef Create(const FString& JsonxString) { return MakeShareable(new FJsonxStringReader(JsonxString)); } static TSharedRef Create(FString&& JsonxString) { return MakeShareable(new FJsonxStringReader(MoveTemp(JsonxString))); } const FString& GetSourceString() const { return Content; } public: virtual ~FJsonxStringReader() = default; protected: /** * Parses a string containing Jsonx information. * * @param JsonxString The Jsonx string to parse. */ FJsonxStringReader(const FString& JsonxString) : Content(JsonxString) , Reader(nullptr) { InitReader(); } /** * Parses a string containing Jsonx information. * * @param JsonxString The Jsonx string to parse. */ FJsonxStringReader(FString&& JsonxString) : Content(MoveTemp(JsonxString)) , Reader(nullptr) { InitReader(); } FORCEINLINE void InitReader() { if (Content.IsEmpty()) { return; } Reader = MakeUnique((void*)*Content, Content.Len() * sizeof(TCHAR), false); check(Reader.IsValid()); Stream = Reader.Get(); } protected: const FString Content; TUniquePtr Reader; }; template class TJsonxReaderFactory { public: static TSharedRef> Create(const FString& JsonxString) { return FJsonxStringReader::Create(JsonxString); } static TSharedRef> Create(FString&& JsonxString) { return FJsonxStringReader::Create(MoveTemp(JsonxString)); } static TSharedRef> Create(FArchive* const Stream) { return TJsonxReader::Create(Stream); } }; ================================================ FILE: Source/Jsonx/Public/Serialization/JsonxSerializer.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "Serialization/JsonxTypes.h" #include "Serialization/JsonxReader.h" #include "Dom/JsonxValue.h" #include "Dom/JsonxObject.h" #include "Serialization/JsonxWriter.h" class Error; class FJsonxSerializer { public: enum class EFlags { None = 0, StoreNumbersAsStrings = 1, }; template static bool Deserialize(const TSharedRef>& Reader, TArray>& OutArray, EFlags InOptions = EFlags::None) { return Deserialize(*Reader, OutArray, InOptions); } template static bool Deserialize(TJsonxReader& Reader, TArray>& OutArray, EFlags InOptions = EFlags::None) { StackState State; if (!Deserialize(Reader, /*OUT*/State, InOptions)) { return false; } // Empty array is ok. if (State.Type != EJsonx::Array) { return false; } OutArray = State.Array; return true; } template static bool Deserialize(const TSharedRef>& Reader, TSharedPtr& OutObject, EFlags InOptions = EFlags::None) { return Deserialize(*Reader, OutObject, InOptions); } template static bool Deserialize(TJsonxReader& Reader, TSharedPtr& OutObject, EFlags InOptions = EFlags::None) { StackState State; if (!Deserialize(Reader, /*OUT*/State, InOptions)) { return false; } if (!State.Object.IsValid()) { return false; } OutObject = State.Object; return true; } template static bool Deserialize(const TSharedRef>& Reader, TSharedPtr& OutValue, EFlags InOptions = EFlags::None) { return Deserialize(*Reader, OutValue, InOptions); } template static bool Deserialize(TJsonxReader& Reader, TSharedPtr& OutValue, EFlags InOptions = EFlags::None) { StackState State; if (!Deserialize(Reader, /*OUT*/State, InOptions)) { return false; } switch (State.Type) { case EJsonx::Object: if (!State.Object.IsValid()) { return false; } OutValue = MakeShared(State.Object); break; case EJsonx::Array: OutValue = MakeShared(State.Array); break; default: // FIXME: would be nice to handle non-composite root values but StackState Deserialize just drops them on the floor return false; } return true; } template static bool Serialize(const TArray>& Array, const TSharedRef>& Writer, bool bCloseWriter = true) { return Serialize(Array, *Writer, bCloseWriter); } template static bool Serialize(const TArray>& Array, TJsonxWriter& Writer, bool bCloseWriter = true ) { const TSharedRef StartingElement = MakeShared(Array); return FJsonxSerializer::Serialize(StartingElement, Writer, bCloseWriter); } template static bool Serialize(const TSharedRef& Object, const TSharedRef>& Writer, bool bCloseWriter = true ) { return Serialize(Object, *Writer, bCloseWriter); } template static bool Serialize(const TSharedRef& Object, TJsonxWriter& Writer, bool bCloseWriter = true) { const TSharedRef StartingElement = MakeShared(Object); return FJsonxSerializer::Serialize(StartingElement, Writer, bCloseWriter); } template static bool Serialize(const TSharedPtr& Value, const FString& Identifier, const TSharedRef>& Writer, bool bCloseWriter = true) { return Serialize(Value, Identifier, *Writer, bCloseWriter); } template static bool Serialize(const TSharedPtr& Value, const FString& Identifier, TJsonxWriter& Writer, bool bCloseWriter = true) { const TSharedRef StartingElement = MakeShared(Identifier, Value); return FJsonxSerializer::Serialize(StartingElement, Writer, bCloseWriter); } private: struct StackState { EJsonx Type; FString Identifier; TArray> Array; TSharedPtr Object; }; struct FElement { FElement( const TSharedPtr& InValue ) : Identifier() , Value(InValue) , HasBeenProcessed(false) { } FElement( const TSharedRef& Object ) : Identifier() , Value(MakeShared(Object)) , HasBeenProcessed( false ) { } FElement( const TArray>& Array ) : Identifier() , Value(MakeShared(Array)) , HasBeenProcessed(false) { } FElement( const FString& InIdentifier, const TSharedPtr< FJsonxValue >& InValue ) : Identifier( InIdentifier ) , Value( InValue ) , HasBeenProcessed( false ) { } FString Identifier; TSharedPtr< FJsonxValue > Value; bool HasBeenProcessed; }; private: template static bool Deserialize(TJsonxReader& Reader, StackState& OutStackState, EFlags InOptions) { TArray> ScopeStack; TSharedPtr CurrentState; TSharedPtr NewValue; EJsonxNotation Notation; while (Reader.ReadNext(Notation)) { FString Identifier = Reader.GetIdentifier(); NewValue.Reset(); switch( Notation ) { case EJsonxNotation::ObjectStart: { if (CurrentState.IsValid()) { ScopeStack.Push(CurrentState.ToSharedRef()); } CurrentState = MakeShared(); CurrentState->Type = EJsonx::Object; CurrentState->Identifier = Identifier; CurrentState->Object = MakeShared(); } break; case EJsonxNotation::ObjectEnd: { if (ScopeStack.Num() > 0) { Identifier = CurrentState->Identifier; NewValue = MakeShared(CurrentState->Object); CurrentState = ScopeStack.Pop(); } } break; case EJsonxNotation::ArrayStart: { if (CurrentState.IsValid()) { ScopeStack.Push(CurrentState.ToSharedRef()); } CurrentState = MakeShared(); CurrentState->Type = EJsonx::Array; CurrentState->Identifier = Identifier; } break; case EJsonxNotation::ArrayEnd: { if (ScopeStack.Num() > 0) { Identifier = CurrentState->Identifier; NewValue = MakeShared(CurrentState->Array); CurrentState = ScopeStack.Pop(); } } break; case EJsonxNotation::Boolean: NewValue = MakeShared(Reader.GetValueAsBoolean()); break; case EJsonxNotation::String: NewValue = MakeShared(Reader.GetValueAsString()); break; case EJsonxNotation::Number: if (EnumHasAnyFlags(InOptions, EFlags::StoreNumbersAsStrings)) { NewValue = MakeShared(Reader.GetValueAsNumberString()); } else { NewValue = MakeShared(Reader.GetValueAsNumber()); } break; case EJsonxNotation::Null: NewValue = MakeShared(); break; case EJsonxNotation::Error: return false; break; } if (NewValue.IsValid() && CurrentState.IsValid()) { if (CurrentState->Type == EJsonx::Object) { CurrentState->Object->SetField(Identifier, NewValue); } else { CurrentState->Array.Add(NewValue); } } } if (!CurrentState.IsValid() || !Reader.GetErrorMessage().IsEmpty()) { return false; } OutStackState = *CurrentState.Get(); return true; } template static bool Serialize(const TSharedRef& StartingElement, TJsonxWriter& Writer, bool bCloseWriter) { TArray> ElementStack; ElementStack.Push(StartingElement); while (ElementStack.Num() > 0) { TSharedRef Element = ElementStack.Pop(); check(Element->Value->Type != EJsonx::None); switch (Element->Value->Type) { case EJsonx::Number: { if (Element->Identifier.IsEmpty()) { Writer.WriteValue(Element->Value->AsNumber()); } else { Writer.WriteValue(Element->Identifier, Element->Value->AsNumber()); } } break; case EJsonx::Boolean: { if (Element->Identifier.IsEmpty()) { Writer.WriteValue(Element->Value->AsBool()); } else { Writer.WriteValue(Element->Identifier, Element->Value->AsBool()); } } break; case EJsonx::String: { if (Element->Identifier.IsEmpty()) { Writer.WriteValue(Element->Value->AsString()); } else { Writer.WriteValue(Element->Identifier, Element->Value->AsString()); } } break; case EJsonx::Null: { if (Element->Identifier.IsEmpty()) { Writer.WriteNull(); } else { Writer.WriteNull(Element->Identifier); } } break; case EJsonx::Array: { if (Element->HasBeenProcessed) { Writer.WriteArrayEnd(); } else { Element->HasBeenProcessed = true; ElementStack.Push(Element); if (Element->Identifier.IsEmpty()) { Writer.WriteArrayStart(); } else { Writer.WriteArrayStart(Element->Identifier); } TArray> Values = Element->Value->AsArray(); for (int Index = Values.Num() - 1; Index >= 0; --Index) { ElementStack.Push(MakeShared(Values[Index])); } } } break; case EJsonx::Object: { if (Element->HasBeenProcessed) { Writer.WriteObjectEnd(); } else { Element->HasBeenProcessed = true; ElementStack.Push(Element); if (Element->Identifier.IsEmpty()) { Writer.WriteObjectStart(); } else { Writer.WriteObjectStart(Element->Identifier); } TArray Keys; TArray> Values; TSharedPtr ElementObject = Element->Value->AsObject(); ElementObject->Values.GenerateKeyArray(Keys); ElementObject->Values.GenerateValueArray(Values); check(Keys.Num() == Values.Num()); for (int Index = Values.Num() - 1; Index >= 0; --Index) { ElementStack.Push(MakeShared(Keys[Index], Values[Index])); } } } break; default: UE_LOG(LogJsonx, Fatal,TEXT("Could not print Jsonx Value, unrecognized type.")); } } if (bCloseWriter) { return Writer.Close(); } else { return true; } } }; ================================================ FILE: Source/Jsonx/Public/Serialization/JsonxSerializerMacros.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "Policies/PrettyJsonxPrintPolicy.h" #include "Policies/CondensedJsonxPrintPolicy.h" #include "Serialization/JsonxTypes.h" #include "Serialization/JsonxReader.h" #include "Serialization/JsonxSerializer.h" /** * Macros used to generate a serialization function for a class derived from FJsonxSerializable */ #define BEGIN_JSONX_SERIALIZER \ virtual void Serialize(FJsonxSerializerBase& Serializer, bool bFlatObject) override \ { \ if (!bFlatObject) { Serializer.StartObject(); } #define END_JSONX_SERIALIZER \ if (!bFlatObject) { Serializer.EndObject(); } \ } #define JSONX_SERIALIZE(JsonxName, JsonxValue) \ Serializer.Serialize(TEXT(JsonxName), JsonxValue) #define JSONX_SERIALIZE_OPTIONAL(JsonxName, OptionalJsonxValue) \ if (Serializer.IsLoading()) \ { \ if (Serializer.GetObject()->HasField(TEXT(JsonxName))) \ { \ Serializer.Serialize(TEXT(JsonxName), OptionalJsonxValue.Emplace()); \ } \ } \ else \ { \ if (OptionalJsonxValue.IsSet()) \ { \ Serializer.Serialize(TEXT(JsonxName), OptionalJsonxValue.GetValue()); \ } \ } #define JSONX_SERIALIZE_ARRAY(JsonxName, JsonxArray) \ Serializer.SerializeArray(TEXT(JsonxName), JsonxArray) #define JSONX_SERIALIZE_MAP(JsonxName, JsonxMap) \ Serializer.SerializeMap(TEXT(JsonxName), JsonxMap) #define JSONX_SERIALIZE_SIMPLECOPY(JsonxMap) \ Serializer.SerializeSimpleMap(JsonxMap) #define JSONX_SERIALIZE_MAP_SAFE(JsonxName, JsonxMap) \ Serializer.SerializeMapSafe(TEXT(JsonxName), JsonxMap) #define JSONX_SERIALIZE_SERIALIZABLE(JsonxName, JsonxValue) \ JsonxValue.Serialize(Serializer, false) #define JSONX_SERIALIZE_RAW_JSONX_STRING(JsonxName, JsonxValue) \ if (Serializer.IsLoading()) \ { \ if (Serializer.GetObject()->HasTypedField(TEXT(JsonxName))) \ { \ TSharedPtr JsonxObject = Serializer.GetObject()->GetObjectField(TEXT(JsonxName)); \ if (JsonxObject.IsValid()) \ { \ auto Writer = TJsonxWriterFactory>::Create(&JsonxValue); \ FJsonxSerializer::Serialize(JsonxObject.ToSharedRef(), Writer); \ } \ } \ else \ { \ JsonxValue = FString(); \ } \ } \ else \ { \ if (!JsonxValue.IsEmpty()) \ { \ Serializer.WriteIdentifierPrefix(TEXT(JsonxName)); \ Serializer.WriteRawJSONXValue(*JsonxValue); \ } \ } #define JSONX_SERIALIZE_ARRAY_SERIALIZABLE(JsonxName, JsonxArray, ElementType) \ if (Serializer.IsLoading()) \ { \ if (Serializer.GetObject()->HasTypedField(JsonxName)) \ { \ for (auto It = Serializer.GetObject()->GetArrayField(JsonxName).CreateConstIterator(); It; ++It) \ { \ ElementType* Obj = new(JsonxArray) ElementType(); \ Obj->FromJsonx((*It)->AsObject()); \ } \ } \ } \ else \ { \ Serializer.StartArray(JsonxName); \ for (auto It = JsonxArray.CreateIterator(); It; ++It) \ { \ It->Serialize(Serializer, false); \ } \ Serializer.EndArray(); \ } #define JSONX_SERIALIZE_OPTIONAL_ARRAY_SERIALIZABLE(JsonxName, OptionalJsonxArray, ElementType) \ if (Serializer.IsLoading()) \ { \ if (Serializer.GetObject()->HasTypedField(JsonxName)) \ { \ TArray& JsonxArray = OptionalJsonxArray.Emplace(); \ for (auto It = Serializer.GetObject()->GetArrayField(JsonxName).CreateConstIterator(); It; ++It) \ { \ ElementType* Obj = new(JsonxArray) ElementType(); \ Obj->FromJsonx((*It)->AsObject()); \ } \ } \ } \ else \ { \ if (OptionalJsonxArray.IsSet()) \ { \ Serializer.StartArray(JsonxName); \ for (auto It = OptionalJsonxArray->CreateIterator(); It; ++It) \ { \ It->Serialize(Serializer, false); \ } \ Serializer.EndArray(); \ } \ } #define JSONX_SERIALIZE_MAP_SERIALIZABLE(JsonxName, JsonxMap, ElementType) \ if (Serializer.IsLoading()) \ { \ if (Serializer.GetObject()->HasTypedField(JsonxName)) \ { \ TSharedPtr JsonxObj = Serializer.GetObject()->GetObjectField(JsonxName); \ for (auto MapIt = JsonxObj->Values.CreateConstIterator(); MapIt; ++MapIt) \ { \ ElementType NewEntry; \ NewEntry.FromJsonx(MapIt.Value()->AsObject()); \ JsonxMap.Add(MapIt.Key(), NewEntry); \ } \ } \ } \ else \ { \ Serializer.StartObject(JsonxName); \ for (auto It = JsonxMap.CreateIterator(); It; ++It) \ { \ Serializer.StartObject(It.Key()); \ It.Value().Serialize(Serializer, true); \ Serializer.EndObject(); \ } \ Serializer.EndObject(); \ } #define JSONX_SERIALIZE_OBJECT_SERIALIZABLE(JsonxName, JsonxSerializableObject) \ /* Process the JsonxName field differently because it is an object */ \ if (Serializer.IsLoading()) \ { \ /* Read in the value from the JsonxName field */ \ if (Serializer.GetObject()->HasTypedField(JsonxName)) \ { \ TSharedPtr JsonxObj = Serializer.GetObject()->GetObjectField(JsonxName); \ if (JsonxObj.IsValid()) \ { \ (JsonxSerializableObject).FromJsonx(JsonxObj); \ } \ } \ } \ else \ { \ /* Write the value to the Name field */ \ Serializer.StartObject(JsonxName); \ (JsonxSerializableObject).Serialize(Serializer, true); \ Serializer.EndObject(); \ } #define JSONX_SERIALIZE_OPTIONAL_OBJECT_SERIALIZABLE(JsonxName, JsonxSerializableObject) \ if (Serializer.IsLoading()) \ { \ using ObjectType = TRemoveReference::Type; \ if (Serializer.GetObject()->HasTypedField(JsonxName)) \ { \ TSharedPtr JsonxObj = Serializer.GetObject()->GetObjectField(JsonxName); \ if (JsonxObj.IsValid()) \ { \ JsonxSerializableObject = ObjectType{}; \ JsonxSerializableObject.GetValue().FromJsonx(JsonxObj); \ } \ } \ } \ else \ { \ if (JsonxSerializableObject.IsSet()) \ { \ Serializer.StartObject(JsonxName); \ (JsonxSerializableObject.GetValue()).Serialize(Serializer, true); \ Serializer.EndObject(); \ } \ } #define JSONX_SERIALIZE_DATETIME_UNIX_TIMESTAMP(JsonxName, JsonxDateTime) \ if (Serializer.IsLoading()) \ { \ int64 UnixTimestampValue; \ Serializer.Serialize(TEXT(JsonxName), UnixTimestampValue); \ JsonxDateTime = FDateTime::FromUnixTimestamp(UnixTimestampValue); \ } \ else \ { \ int64 UnixTimestampValue = JsonxDateTime.ToUnixTimestamp(); \ Serializer.Serialize(TEXT(JsonxName), UnixTimestampValue); \ } #define JSONX_SERIALIZE_DATETIME_UNIX_TIMESTAMP_MILLISECONDS(JsonxName, JsonxDateTime) \ if (Serializer.IsLoading()) \ { \ int64 UnixTimestampValueInMilliseconds; \ Serializer.Serialize(TEXT(JsonxName), UnixTimestampValueInMilliseconds); \ JsonxDateTime = FDateTime::FromUnixTimestamp(UnixTimestampValueInMilliseconds / 1000); \ } \ else \ { \ int64 UnixTimestampValueInMilliseconds = JsonxDateTime.ToUnixTimestamp() * 1000; \ Serializer.Serialize(TEXT(JsonxName), UnixTimestampValueInMilliseconds); \ } #define JSONX_SERIALIZE_ENUM(JsonxName, JsonxEnum) \ if (Serializer.IsLoading()) \ { \ FString JsonxTextValue; \ Serializer.Serialize(TEXT(JsonxName), JsonxTextValue); \ LexFromString(JsonxEnum, *JsonxTextValue); \ } \ else \ { \ FString JsonxTextValue = LexToString(JsonxEnum); \ Serializer.Serialize(TEXT(JsonxName), JsonxTextValue); \ } struct FJsonxSerializerBase; /** Array of data */ typedef TArray FJsonxSerializableArray; typedef TArray FJsonxSerializableArrayInt; /** Maps a key to a value */ typedef TMap FJsonxSerializableKeyValueMap; typedef TMap FJsonxSerializableKeyValueMapInt; typedef TMap FJsonxSerializableKeyValueMapInt64; typedef TMap FJsonxSerializableKeyValueMapFloat; /** * Base interface used to serialize to/from JSONX. Hides the fact there are separate read/write classes */ struct FJsonxSerializerBase { virtual bool IsLoading() const = 0; virtual bool IsSaving() const = 0; virtual void StartObject() = 0; virtual void StartObject(const FString& Name) = 0; virtual void EndObject() = 0; virtual void StartArray() = 0; virtual void StartArray(const FString& Name) = 0; virtual void EndArray() = 0; virtual void Serialize(const TCHAR* Name, int32& Value) = 0; virtual void Serialize(const TCHAR* Name, uint32& Value) = 0; virtual void Serialize(const TCHAR* Name, int64& Value) = 0; virtual void Serialize(const TCHAR* Name, bool& Value) = 0; virtual void Serialize(const TCHAR* Name, FString& Value) = 0; virtual void Serialize(const TCHAR* Name, FText& Value) = 0; virtual void Serialize(const TCHAR* Name, float& Value) = 0; virtual void Serialize(const TCHAR* Name, double& Value) = 0; virtual void Serialize(const TCHAR* Name, FDateTime& Value) = 0; virtual void SerializeArray(FJsonxSerializableArray& Array) = 0; virtual void SerializeArray(const TCHAR* Name, FJsonxSerializableArray& Value) = 0; virtual void SerializeArray(const TCHAR* Name, FJsonxSerializableArrayInt& Value) = 0; virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMap& Map) = 0; virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapInt& Map) = 0; virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapInt64& Map) = 0; virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapFloat& Map) = 0; virtual void SerializeSimpleMap(FJsonxSerializableKeyValueMap& Map) = 0; virtual void SerializeMapSafe(const TCHAR* Name, FJsonxSerializableKeyValueMap& Map) = 0; virtual TSharedPtr GetObject() = 0; virtual void WriteIdentifierPrefix(const TCHAR* Name) = 0; virtual void WriteRawJSONXValue(const TCHAR* Value) = 0; }; /** * Implements the abstract serializer interface hiding the underlying writer object */ template > class FJsonxSerializerWriter : public FJsonxSerializerBase { /** The object to write the JSONX output to */ TSharedRef > JsonxWriter; public: /** * Initializes the writer object * * @param InJsonxWriter the object to write the JSONX output to */ FJsonxSerializerWriter(TSharedRef > InJsonxWriter) : JsonxWriter(InJsonxWriter) { } virtual ~FJsonxSerializerWriter() { } /** Is the JSONX being read from */ virtual bool IsLoading() const override { return false; } /** Is the JSONX being written to */ virtual bool IsSaving() const override { return true; } /** Access to the root object */ virtual TSharedPtr GetObject() override { return TSharedPtr(); } /** * Starts a new object "{" */ virtual void StartObject() override { JsonxWriter->WriteObjectStart(); } /** * Starts a new object "{" */ virtual void StartObject(const FString& Name) override { JsonxWriter->WriteObjectStart(Name); } /** * Completes the definition of an object "}" */ virtual void EndObject() override { JsonxWriter->WriteObjectEnd(); } virtual void StartArray() override { JsonxWriter->WriteArrayStart(); } virtual void StartArray(const FString& Name) override { JsonxWriter->WriteArrayStart(Name); } virtual void EndArray() override { JsonxWriter->WriteArrayEnd(); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, int32& Value) override { JsonxWriter->WriteValue(Name, Value); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, uint32& Value) override { JsonxWriter->WriteValue(Name, static_cast(Value)); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, int64& Value) override { JsonxWriter->WriteValue(Name, Value); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, bool& Value) override { JsonxWriter->WriteValue(Name, Value); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, FString& Value) override { JsonxWriter->WriteValue(Name, Value); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, FText& Value) override { JsonxWriter->WriteValue(Name, Value.ToString()); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, float& Value) override { JsonxWriter->WriteValue(Name, Value); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, double& Value) override { JsonxWriter->WriteValue(Name, Value); } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, FDateTime& Value) override { if (Value.GetTicks() > 0) { JsonxWriter->WriteValue(Name, Value.ToIso8601()); } } /** * Serializes an array of values * * @param Name the name of the property to serialize * @param Array the array to serialize */ virtual void SerializeArray(FJsonxSerializableArray& Array) override { JsonxWriter->WriteArrayStart(); // Iterate all of values for (FJsonxSerializableArray::TIterator ArrayIt(Array); ArrayIt; ++ArrayIt) { JsonxWriter->WriteValue(*ArrayIt); } JsonxWriter->WriteArrayEnd(); } /** * Serializes an array of values with an identifier * * @param Name the name of the property to serialize * @param Array the array to serialize */ virtual void SerializeArray(const TCHAR* Name, FJsonxSerializableArray& Array) override { JsonxWriter->WriteArrayStart(Name); // Iterate all of values for (FJsonxSerializableArray::ElementType& Item : Array) { JsonxWriter->WriteValue(Item); } JsonxWriter->WriteArrayEnd(); } /** * Serializes an array of values with an identifier * * @param Name the name of the property to serialize * @param Array the array to serialize */ virtual void SerializeArray(const TCHAR* Name, FJsonxSerializableArrayInt& Array) override { JsonxWriter->WriteArrayStart(Name); // Iterate all of values for (FJsonxSerializableArrayInt::ElementType& Item : Array) { JsonxWriter->WriteValue(Item); } JsonxWriter->WriteArrayEnd(); } /** * Serializes the keys & values for map * * @param Name the name of the property to serialize * @param Map the map to serialize */ virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMap& Map) override { JsonxWriter->WriteObjectStart(Name); // Iterate all of the keys and their values for (FJsonxSerializableKeyValueMap::ElementType& Pair : Map) { Serialize(*Pair.Key, Pair.Value); } JsonxWriter->WriteObjectEnd(); } /** * Serializes the keys & values for map * * @param Name the name of the property to serialize * @param Map the map to serialize */ virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapInt& Map) override { JsonxWriter->WriteObjectStart(Name); // Iterate all of the keys and their values for (FJsonxSerializableKeyValueMapInt::ElementType& Pair : Map) { Serialize(*Pair.Key, Pair.Value); } JsonxWriter->WriteObjectEnd(); } /** * Serializes the keys & values for map * * @param Name the name of the property to serialize * @param Map the map to serialize */ virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapInt64& Map) override { JsonxWriter->WriteObjectStart(Name); // Iterate all of the keys and their values for (FJsonxSerializableKeyValueMapInt64::ElementType& Pair : Map) { Serialize(*Pair.Key, Pair.Value); } JsonxWriter->WriteObjectEnd(); } /** * Serializes the keys & values for map * * @param Name the name of the property to serialize * @param Map the map to serialize */ virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapFloat& Map) override { JsonxWriter->WriteObjectStart(Name); // Iterate all of the keys and their values for (FJsonxSerializableKeyValueMapFloat::ElementType& Pair : Map) { Serialize(*Pair.Key, Pair.Value); } JsonxWriter->WriteObjectEnd(); } virtual void SerializeSimpleMap(FJsonxSerializableKeyValueMap& Map) override { // writing does nothing here, this is meant to read in all data from a json object // writing is explicitly handled per key/type } /** * Serializes keys and values from an object into a map. * * @param Name Name of property to serialize * @param Map The Map to copy String values from */ virtual void SerializeMapSafe(const TCHAR* Name, FJsonxSerializableKeyValueMap& Map) { SerializeMap(Name, Map); } virtual void WriteIdentifierPrefix(const TCHAR* Name) { JsonxWriter->WriteIdentifierPrefix(Name); } virtual void WriteRawJSONXValue(const TCHAR* Value) { JsonxWriter->WriteRawJSONXValue(Value); } }; /** * Implements the abstract serializer interface hiding the underlying reader object */ class FJsonxSerializerReader : public FJsonxSerializerBase { /** The object that holds the parsed JSONX data */ TSharedPtr JsonxObject; public: /** * Inits the base JSONX object that is being read from * * @param InJsonxObject the JSONX object to serialize from */ FJsonxSerializerReader(TSharedPtr InJsonxObject) : JsonxObject(InJsonxObject) { } virtual ~FJsonxSerializerReader() { } /** Is the JSONX being read from */ virtual bool IsLoading() const override { return true; } /** Is the JSONX being written to */ virtual bool IsSaving() const override { return false; } /** Access to the root Jsonx object being read */ virtual TSharedPtr GetObject() override { return JsonxObject; } /** Ignored */ virtual void StartObject() override { // Empty on purpose } /** Ignored */ virtual void StartObject(const FString& Name) override { // Empty on purpose } /** Ignored */ virtual void EndObject() override { // Empty on purpose } /** Ignored */ virtual void StartArray() override { // Empty on purpose } /** Ignored */ virtual void StartArray(const FString& Name) override { // Empty on purpose } /** Ignored */ virtual void EndArray() override { // Empty on purpose } /** * If the underlying json object has the field, it is read into the value * * @param Name the name of the field to read * @param Value the out value to read the data into */ virtual void Serialize(const TCHAR* Name, int32& Value) override { if (JsonxObject->HasTypedField(Name)) { JsonxObject->TryGetNumberField(Name, Value); } } /** * If the underlying json object has the field, it is read into the value * * @param Name the name of the field to read * @param Value the out value to read the data into */ virtual void Serialize(const TCHAR* Name, uint32& Value) override { if (JsonxObject->HasTypedField(Name)) { JsonxObject->TryGetNumberField(Name, Value); } } /** * If the underlying json object has the field, it is read into the value * * @param Name the name of the field to read * @param Value the out value to read the data into */ virtual void Serialize(const TCHAR* Name, int64& Value) override { if (JsonxObject->HasTypedField(Name)) { JsonxObject->TryGetNumberField(Name, Value); } } /** * If the underlying json object has the field, it is read into the value * * @param Name the name of the field to read * @param Value the out value to read the data into */ virtual void Serialize(const TCHAR* Name, bool& Value) override { if (JsonxObject->HasTypedField(Name)) { Value = JsonxObject->GetBoolField(Name); } } /** * If the underlying json object has the field, it is read into the value * * @param Name the name of the field to read * @param Value the out value to read the data into */ virtual void Serialize(const TCHAR* Name, FString& Value) override { if (JsonxObject->HasTypedField(Name)) { Value = JsonxObject->GetStringField(Name); } } /** * If the underlying json object has the field, it is read into the value * * @param Name the name of the field to read * @param Value the out value to read the data into */ virtual void Serialize(const TCHAR* Name, FText& Value) override { if (JsonxObject->HasTypedField(Name)) { Value = FText::FromString(JsonxObject->GetStringField(Name)); } } /** * If the underlying json object has the field, it is read into the value * * @param Name the name of the field to read * @param Value the out value to read the data into */ virtual void Serialize(const TCHAR* Name, float& Value) override { if (JsonxObject->HasTypedField(Name)) { Value = (float)JsonxObject->GetNumberField(Name); } } /** * If the underlying json object has the field, it is read into the value * * @param Name the name of the field to read * @param Value the out value to read the data into */ virtual void Serialize(const TCHAR* Name, double& Value) override { if (JsonxObject->HasTypedField(Name)) { Value = JsonxObject->GetNumberField(Name); } } /** * Writes the field name and the corresponding value to the JSONX data * * @param Name the field name to write out * @param Value the value to write out */ virtual void Serialize(const TCHAR* Name, FDateTime& Value) override { if (JsonxObject->HasTypedField(Name)) { FDateTime::ParseIso8601(*JsonxObject->GetStringField(Name), Value); } } /** * Serializes an array of values * * @param Name the name of the property to serialize * @param Array the array to serialize */ virtual void SerializeArray(FJsonxSerializableArray& Array) override { // @todo - higher level serialization is expecting a Jsonx Object check(0 && TEXT("Not implemented")); } /** * Serializes an array of values with an identifier * * @param Name the name of the property to serialize * @param Array the array to serialize */ virtual void SerializeArray(const TCHAR* Name, FJsonxSerializableArray& Array) override { if (JsonxObject->HasTypedField(Name)) { TArray< TSharedPtr > JsonxArray = JsonxObject->GetArrayField(Name); // Iterate all of the keys and their values for (TSharedPtr& Value : JsonxArray) { Array.Add(Value->AsString()); } } } /** * Serializes an array of values with an identifier * * @param Name the name of the property to serialize * @param Array the array to serialize */ virtual void SerializeArray(const TCHAR* Name, FJsonxSerializableArrayInt& Array) override { if (JsonxObject->HasTypedField(Name)) { TArray< TSharedPtr > JsonxArray = JsonxObject->GetArrayField(Name); // Iterate all of the keys and their values for (TSharedPtr& Value : JsonxArray) { Array.Add(Value->AsNumber()); } } } /** * Serializes the keys & values for map * * @param Name the name of the property to serialize * @param Map the map to serialize */ virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMap& Map) override { if (JsonxObject->HasTypedField(Name)) { TSharedPtr JsonxMap = JsonxObject->GetObjectField(Name); // Iterate all of the keys and their values for (const TPair>& Pair : JsonxMap->Values) { Map.Add(Pair.Key, Pair.Value->AsString()); } } } /** * Serializes the keys & values for map * * @param Name the name of the property to serialize * @param Map the map to serialize */ virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapInt& Map) override { if (JsonxObject->HasTypedField(Name)) { TSharedPtr JsonxMap = JsonxObject->GetObjectField(Name); // Iterate all of the keys and their values for (const TPair>& Pair : JsonxMap->Values) { const int32 Value = (int32)Pair.Value->AsNumber(); Map.Add(Pair.Key, Value); } } } /** * Serializes the keys & values for map * * @param Name the name of the property to serialize * @param Map the map to serialize */ virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapInt64& Map) override { if (JsonxObject->HasTypedField(Name)) { TSharedPtr JsonxMap = JsonxObject->GetObjectField(Name); // Iterate all of the keys and their values for (const TPair>& Pair : JsonxMap->Values) { const int64 Value = (int64)Pair.Value->AsNumber(); Map.Add(Pair.Key, Value); } } } /** * Serializes the keys & values for map * * @param Name the name of the property to serialize * @param Map the map to serialize */ virtual void SerializeMap(const TCHAR* Name, FJsonxSerializableKeyValueMapFloat& Map) override { if (JsonxObject->HasTypedField(Name)) { TSharedPtr JsonxMap = JsonxObject->GetObjectField(Name); // Iterate all of the keys and their values for (const TPair>& Pair : JsonxMap->Values) { const float Value = (float)Pair.Value->AsNumber(); Map.Add(Pair.Key, Value); } } } virtual void SerializeSimpleMap(FJsonxSerializableKeyValueMap& Map) override { // Iterate all of the keys and their values, only taking simple types (not array/object), all in string form for (auto KeyValueIt = JsonxObject->Values.CreateConstIterator(); KeyValueIt; ++KeyValueIt) { FString Value; if (KeyValueIt.Value()->TryGetString(Value)) { Map.Add(KeyValueIt.Key(), MoveTemp(Value)); } } } /** * Deserializes keys and values from an object into a map, but only if the value is trivially convertable to string. * * @param Name Name of property to deserialize * @param Map The Map to fill with String values found */ virtual void SerializeMapSafe(const TCHAR* Name, FJsonxSerializableKeyValueMap& Map) override { if (JsonxObject->HasTypedField(Name)) { // Iterate all of the keys and their values, only taking simple types (not array/object), all in string form TSharedPtr JsonxMap = JsonxObject->GetObjectField(Name); for (const TPair>& Pair : JsonxMap->Values) { FString Value; if (Pair.Value->TryGetString(Value)) { Map.Add(Pair.Key, MoveTemp(Value)); } } } } virtual void WriteIdentifierPrefix(const TCHAR* Name) { // Should never be called on a reader check(false); } virtual void WriteRawJSONXValue(const TCHAR* Value) { // Should never be called on a reader check(false); } }; /** * Base class for a JSONX serializable object */ struct FJsonxSerializable { /** * Virtualize destructor as we provide overridable functions */ virtual ~FJsonxSerializable() {} /** * Used to allow serialization of a const ref * * @return the corresponding json string */ inline const FString ToJsonx(bool bPrettyPrint = true) const { // Strip away const, because we use a single method that can read/write which requires non-const semantics // Otherwise, we'd have to have 2 separate macros for declaring const to json and non-const from json return ((FJsonxSerializable*)this)->ToJsonx(bPrettyPrint); } /** * Serializes this object to its JSONX string form * * @param bPrettyPrint - If true, will use the pretty json formatter * @return the corresponding json string */ virtual const FString ToJsonx(bool bPrettyPrint=true) { FString JsonxStr; if (bPrettyPrint) { TSharedRef > JsonxWriter = TJsonxWriterFactory<>::Create(&JsonxStr); FJsonxSerializerWriter<> Serializer(JsonxWriter); Serialize(Serializer, false); JsonxWriter->Close(); } else { TSharedRef< TJsonxWriter< TCHAR, TCondensedJsonxPrintPolicy< TCHAR > > > JsonxWriter = TJsonxWriterFactory< TCHAR, TCondensedJsonxPrintPolicy< TCHAR > >::Create( &JsonxStr ); FJsonxSerializerWriter> Serializer(JsonxWriter); Serialize(Serializer, false); JsonxWriter->Close(); } return JsonxStr; } virtual void ToJsonx(TSharedRef >& JsonxWriter, bool bFlatObject) const { FJsonxSerializerWriter<> Serializer(JsonxWriter); ((FJsonxSerializable*)this)->Serialize(Serializer, bFlatObject); } virtual void ToJsonx(TSharedRef< TJsonxWriter< TCHAR, TCondensedJsonxPrintPolicy< TCHAR > > >& JsonxWriter, bool bFlatObject) const { FJsonxSerializerWriter> Serializer(JsonxWriter); ((FJsonxSerializable*)this)->Serialize(Serializer, bFlatObject); } /** * Serializes the contents of a JSONX string into this object * * @param Jsonx the JSONX data to serialize from */ virtual bool FromJsonx(const FString& Jsonx) { return FromJsonx(CopyTemp(Jsonx)); } /** * Serializes the contents of a JSONX string into this object * * @param Jsonx the JSONX data to serialize from */ virtual bool FromJsonx(FString&& Jsonx) { TSharedPtr JsonxObject; TSharedRef > JsonxReader = TJsonxReaderFactory<>::Create(MoveTemp(Jsonx)); if (FJsonxSerializer::Deserialize(JsonxReader,JsonxObject) && JsonxObject.IsValid()) { FJsonxSerializerReader Serializer(JsonxObject); Serialize(Serializer, false); return true; } return false; } virtual bool FromJsonx(TSharedPtr JsonxObject) { if (JsonxObject.IsValid()) { FJsonxSerializerReader Serializer(JsonxObject); Serialize(Serializer, false); return true; } return false; } /** * Abstract method that needs to be supplied using the macros * * @param Serializer the object that will perform serialization in/out of JSONX * @param bFlatObject if true then no object wrapper is used */ virtual void Serialize(FJsonxSerializerBase& Serializer, bool bFlatObject) = 0; }; /** * Useful if you just want access to the underlying FJsonxObject (for cases where the schema is loose or an outer system will do further de/serialization) */ struct FJsonxDataBag : public FJsonxSerializable { virtual void Serialize(FJsonxSerializerBase& Serializer, bool bFlatObject) override { if (Serializer.IsLoading()) { // just grab a reference to the underlying JSONX object JsonxObject = Serializer.GetObject(); } else { if (!bFlatObject) { Serializer.StartObject(); } if (JsonxObject.IsValid()) { for (const auto& It : JsonxObject->Values) { TSharedPtr JsonxValue = It.Value; if (JsonxValue.IsValid()) { switch (JsonxValue->Type) { case EJsonx::Boolean: { auto Value = JsonxValue->AsBool(); Serializer.Serialize(*It.Key, Value); break; } case EJsonx::Number: { auto Value = JsonxValue->AsNumber(); Serializer.Serialize(*It.Key, Value); break; } case EJsonx::String: { auto Value = JsonxValue->AsString(); Serializer.Serialize(*It.Key, Value); break; } case EJsonx::Array: { // if we have an array, serialize to string and write raw FString JsonxStr; auto Writer = TJsonxWriterFactory>::Create(&JsonxStr); FJsonxSerializer::Serialize(JsonxValue->AsArray(), Writer); Serializer.WriteIdentifierPrefix(*It.Key); Serializer.WriteRawJSONXValue(*JsonxStr); break; } case EJsonx::Object: { // if we have an object, serialize to string and write raw FString JsonxStr; auto Writer = TJsonxWriterFactory>::Create(&JsonxStr); FJsonxSerializer::Serialize(JsonxValue->AsObject().ToSharedRef(), Writer); // too bad there's no JsonxObject serialization method on FJsonxSerializerBase directly :-/ Serializer.WriteIdentifierPrefix(*It.Key); Serializer.WriteRawJSONXValue(*JsonxStr); break; } } } } } if (!bFlatObject) { Serializer.EndObject(); } } } double GetDouble(const FString& Key) const { const auto Jsonx = GetField(Key); return Jsonx.IsValid() ? Jsonx->AsNumber() : 0.0; } FString GetString(const FString& Key) const { const auto Jsonx = GetField(Key); return Jsonx.IsValid() ? Jsonx->AsString() : FString(); } bool GetBool(const FString& Key) const { const auto Jsonx = GetField(Key); return Jsonx.IsValid() ? Jsonx->AsBool() : false; } TSharedPtr GetField(const FString& Key) const { if (JsonxObject.IsValid()) { return JsonxObject->TryGetField(Key); } return TSharedPtr(); } template void SetField(const FString& Key, Arg&& Value) { SetFieldJsonx(Key, MakeShared(MoveTempIfPossible(Value))); } void SetFieldJsonx(const FString& Key, const TSharedPtr& Value) { if (!JsonxObject.IsValid()) { JsonxObject = MakeShared(); } JsonxObject->SetField(Key, Value); } public: TSharedPtr JsonxObject; }; ================================================ FILE: Source/Jsonx/Public/Serialization/JsonxTypes.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" class Error; /** * Jsonx (JavaScript Object Notation) is a lightweight data-interchange format. * Information on how it works can be found here: http://www.json.org/. * This code was written from scratch with only the Jsonx spec as a guide. * * In order to use Jsonx effectively, you need to be familiar with the Object/Value * hierarchy, and you should use the FJsonxObject class and FJsonxValue subclasses. */ /** * Represents all the types a Jsonx Value can be. */ enum class EJsonx { None, Null, String, Number, Boolean, Array, Object }; enum class EJsonxToken { None, Comma, CurlyOpen, CurlyClose, SquareOpen, SquareClose, Colon, String, // short values Number, True, False, Null, Identifier }; FORCEINLINE bool EJsonxToken_IsShortValue(EJsonxToken Token) { return Token >= EJsonxToken::Number && Token <= EJsonxToken::Null; } enum class EJsonxNotation { ObjectStart, ObjectEnd, ArrayStart, ArrayEnd, Boolean, String, Number, Null, Error }; ================================================ FILE: Source/Jsonx/Public/Serialization/JsonxWriter.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "CoreMinimal.h" #include "GenericPlatform/GenericPlatformMath.h" #include "Serialization/JsonxTypes.h" #include "Policies/PrettyJsonxPrintPolicy.h" #include "Serialization/MemoryWriter.h" #define JSONX_LOW_PRECISION /** * Takes an input string and escapes it so it can be written as a valid Jsonx string. Also adds the quotes. * Appends to a given string-like object to avoid reallocations. * String-like object must support operator+=(const TCHAR*) and operation+=(TCHAR) * * @param AppendTo the string to append to. * @param StringVal the string to escape * @return the AppendTo string for convenience. */ template inline StringType& AppendEscapeJsonxString(StringType& AppendTo, const FString& StringVal) { AppendTo += TEXT("\""); for (const TCHAR* Char = *StringVal; *Char != TCHAR('\0'); ++Char) { switch (*Char) { case TCHAR('\\'): AppendTo += TEXT("\\\\"); break; case TCHAR('\n'): AppendTo += TEXT("\\n"); break; case TCHAR('\t'): AppendTo += TEXT("\\t"); break; case TCHAR('\b'): AppendTo += TEXT("\\b"); break; case TCHAR('\f'): AppendTo += TEXT("\\f"); break; case TCHAR('\r'): AppendTo += TEXT("\\r"); break; case TCHAR('\"'): AppendTo += TEXT("\\\""); break; default: // Must escape control characters if (*Char >= TCHAR(32)) { AppendTo += *Char; } else { AppendTo.Appendf(TEXT("\\u%04x"), *Char); } } } AppendTo += TEXT("\""); return AppendTo; } /** * Takes an input string and escapes it so it can be written as a valid Jsonx string. Also adds the quotes. * * @param StringVal the string to escape * @return the given string, escaped to produce a valid Jsonx string. */ inline FString EscapeJsonxString(const FString& StringVal) { FString Result; return AppendEscapeJsonxString(Result, StringVal); } /** * Template for Jsonx writers. * * @param CharType The type of characters to print, i.e. TCHAR or ANSICHAR. * @param PrintPolicy The print policy to use when writing the output string (default = TPrettyJsonxPrintPolicy). */ template > class TJsonxWriter { public: static TSharedRef< TJsonxWriter > Create( FArchive* const Stream, int32 InitialIndentLevel = 0 ) { return MakeShareable( new TJsonxWriter< CharType, PrintPolicy >( Stream, InitialIndentLevel ) ); } public: virtual ~TJsonxWriter() { } FORCEINLINE int32 GetIndentLevel() const { return IndentLevel; } bool CanWriteObjectStart() const { return CanWriteObjectWithoutIdentifier(); } void WriteObjectStart() { check(CanWriteObjectWithoutIdentifier()); if (PreviousTokenWritten != EJsonxToken::None ) { WriteCommaIfNeeded(); } if ( PreviousTokenWritten != EJsonxToken::None ) { PrintPolicy::WriteLineTerminator(Stream); PrintPolicy::WriteTabs(Stream, IndentLevel); } PrintPolicy::WriteChar(Stream, CharType('{')); ++IndentLevel; Stack.Push( EJsonx::Object ); PreviousTokenWritten = EJsonxToken::CurlyOpen; } void WriteObjectStart( const FString& Identifier ) { check( Stack.Top() == EJsonx::Object ); WriteIdentifier( Identifier ); PrintPolicy::WriteLineTerminator(Stream); PrintPolicy::WriteTabs(Stream, IndentLevel); PrintPolicy::WriteChar(Stream, CharType('{')); ++IndentLevel; Stack.Push( EJsonx::Object ); PreviousTokenWritten = EJsonxToken::CurlyOpen; } void WriteObjectEnd() { check( Stack.Top() == EJsonx::Object ); PrintPolicy::WriteLineTerminator(Stream); --IndentLevel; PrintPolicy::WriteTabs(Stream, IndentLevel); PrintPolicy::WriteChar(Stream, CharType('}')); Stack.Pop(); PreviousTokenWritten = EJsonxToken::CurlyClose; } void WriteArrayStart() { check(CanWriteValueWithoutIdentifier()); if ( PreviousTokenWritten != EJsonxToken::None ) { WriteCommaIfNeeded(); } if ( PreviousTokenWritten != EJsonxToken::None ) { PrintPolicy::WriteLineTerminator(Stream); PrintPolicy::WriteTabs(Stream, IndentLevel); } PrintPolicy::WriteChar(Stream, CharType('[')); ++IndentLevel; Stack.Push( EJsonx::Array ); PreviousTokenWritten = EJsonxToken::SquareOpen; } void WriteArrayStart( const FString& Identifier ) { check( Stack.Top() == EJsonx::Object ); WriteIdentifier( Identifier ); PrintPolicy::WriteSpace( Stream ); PrintPolicy::WriteChar(Stream, CharType('[')); ++IndentLevel; Stack.Push( EJsonx::Array ); PreviousTokenWritten = EJsonxToken::SquareOpen; } void WriteArrayEnd() { check( Stack.Top() == EJsonx::Array ); --IndentLevel; if ( PreviousTokenWritten == EJsonxToken::SquareClose || PreviousTokenWritten == EJsonxToken::CurlyClose || PreviousTokenWritten == EJsonxToken::String ) { PrintPolicy::WriteLineTerminator(Stream); PrintPolicy::WriteTabs(Stream, IndentLevel); } else if ( PreviousTokenWritten != EJsonxToken::SquareOpen ) { PrintPolicy::WriteSpace( Stream ); } PrintPolicy::WriteChar(Stream, CharType(']')); Stack.Pop(); PreviousTokenWritten = EJsonxToken::SquareClose; } template void WriteValue(FValue Value) { check(CanWriteValueWithoutIdentifier()); WriteCommaIfNeeded(); if (PreviousTokenWritten == EJsonxToken::SquareOpen || EJsonxToken_IsShortValue(PreviousTokenWritten)) { PrintPolicy::WriteSpace( Stream ); } else { PrintPolicy::WriteLineTerminator(Stream); PrintPolicy::WriteTabs(Stream, IndentLevel); } PreviousTokenWritten = WriteValueOnly( Value ); } void WriteValue(const FString& Value) { check(CanWriteValueWithoutIdentifier()); WriteCommaIfNeeded(); PrintPolicy::WriteLineTerminator(Stream); PrintPolicy::WriteTabs(Stream, IndentLevel); PreviousTokenWritten = WriteValueOnly(Value); } template void WriteValue(const FString& Identifier, FValue Value) { check( Stack.Top() == EJsonx::Object ); WriteIdentifier( Identifier ); PrintPolicy::WriteSpace(Stream); PreviousTokenWritten = WriteValueOnly(MoveTemp(Value)); } template void WriteValue(const FString& Identifier, const TArray& Array) { WriteArrayStart(Identifier); for (int Idx = 0; Idx < Array.Num(); Idx++) { WriteValue(Array[Idx]); } WriteArrayEnd(); } void WriteValue(const FString& Identifier, const TCHAR* Value) { WriteValue(Identifier, FString(Value)); } // WARNING: THIS IS DANGEROUS. Use this only if you know for a fact that the Value is valid JSONX! // Use this to insert the results of a different JSONX Writer in. void WriteRawJSONXValue( const FString& Identifier, const FString& Value ) { check( Stack.Top() == EJsonx::Object ); WriteIdentifier( Identifier ); PrintPolicy::WriteSpace(Stream); PrintPolicy::WriteString(Stream, Value); PreviousTokenWritten = EJsonxToken::String; } void WriteNull( const FString& Identifier ) { WriteValue(Identifier, nullptr); } void WriteValue( const TCHAR* Value ) { WriteValue(FString(Value)); } // WARNING: THIS IS DANGEROUS. Use this only if you know for a fact that the Value is valid JSONX! // Use this to insert the results of a different JSONX Writer in. void WriteRawJSONXValue( const FString& Value ) { check(CanWriteValueWithoutIdentifier()); WriteCommaIfNeeded(); if ( PreviousTokenWritten != EJsonxToken::True && PreviousTokenWritten != EJsonxToken::False && PreviousTokenWritten != EJsonxToken::SquareOpen ) { PrintPolicy::WriteLineTerminator(Stream); PrintPolicy::WriteTabs(Stream, IndentLevel); } else { PrintPolicy::WriteSpace( Stream ); } PrintPolicy::WriteString(Stream, Value); PreviousTokenWritten = EJsonxToken::String; } void WriteNull() { WriteValue(nullptr); } virtual bool Close() { return ( PreviousTokenWritten == EJsonxToken::None || PreviousTokenWritten == EJsonxToken::CurlyClose || PreviousTokenWritten == EJsonxToken::SquareClose ) && Stack.Num() == 0; } /** * WriteValue("Foo", Bar) should be equivalent to WriteIdentifierPrefix("Foo"), WriteValue(Bar) */ void WriteIdentifierPrefix(const FString& Identifier) { check(Stack.Top() == EJsonx::Object); WriteIdentifier(Identifier); PrintPolicy::WriteSpace(Stream); PreviousTokenWritten = EJsonxToken::Identifier; } protected: /** * Creates and initializes a new instance. * * @param InStream An archive containing the input. * @param InitialIndentLevel The initial indentation level. */ TJsonxWriter( FArchive* const InStream, int32 InitialIndentLevel ) : Stream( InStream ) , Stack() , PreviousTokenWritten(EJsonxToken::None) , IndentLevel(InitialIndentLevel) { } protected: FORCEINLINE bool CanWriteValueWithoutIdentifier() const { return Stack.Num() <= 0 || Stack.Top() == EJsonx::Array || PreviousTokenWritten == EJsonxToken::Identifier; } FORCEINLINE bool CanWriteObjectWithoutIdentifier() const { return Stack.Num() <= 0 || Stack.Top() == EJsonx::Array || PreviousTokenWritten == EJsonxToken::Identifier || PreviousTokenWritten == EJsonxToken::Colon; } FORCEINLINE void WriteCommaIfNeeded() { if ( PreviousTokenWritten != EJsonxToken::CurlyOpen && PreviousTokenWritten != EJsonxToken::SquareOpen && PreviousTokenWritten != EJsonxToken::Identifier) { PrintPolicy::WriteChar(Stream, CharType(',')); } } FORCEINLINE void WriteIdentifier( const FString& Identifier ) { WriteCommaIfNeeded(); PrintPolicy::WriteLineTerminator(Stream); PrintPolicy::WriteTabs(Stream, IndentLevel); WriteStringValue( Identifier ); PrintPolicy::WriteChar(Stream, CharType(':')); } FORCEINLINE EJsonxToken WriteValueOnly(bool Value) { PrintPolicy::WriteString(Stream, Value ? TEXT("true") : TEXT("false")); return Value ? EJsonxToken::True : EJsonxToken::False; } #ifdef JSONX_LOW_PRECISION FORCEINLINE EJsonxToken WriteValueOnly(float Value) { if (FGenericPlatformMath::IsFinite(Value)) { PrintPolicy::WriteString(Stream, FString::Printf(TEXT("%.7g"), Value)); } else { PrintPolicy::WriteString(Stream, FString::Printf(TEXT("Infinity"))); } //PrintPolicy::WriteString(Stream, FString::Printf(TEXT("%g"), Value)); return EJsonxToken::Number; } FORCEINLINE EJsonxToken WriteValueOnly(double Value) { // Specify 17 significant digits, the most that can ever be useful from a double // In particular, this ensures large integers are written correctly if (FGenericPlatformMath::IsFinite(Value)) { PrintPolicy::WriteString(Stream, FString::Printf(TEXT("%.7g"), Value)); } else { PrintPolicy::WriteString(Stream, FString::Printf(TEXT("Infinity"))); } return EJsonxToken::Number; } #else FORCEINLINE EJsonxToken WriteValueOnly(float Value) { PrintPolicy::WriteString(Stream, FString::Printf(TEXT("%g"), Value)); return EJsonxToken::Number; } FORCEINLINE EJsonxToken WriteValueOnly(double Value) { // Specify 17 significant digits, the most that can ever be useful from a double // In particular, this ensures large integers are written correctly PrintPolicy::WriteString(Stream, FString::Printf(TEXT("%.17g"), Value)); return EJsonxToken::Number; } #endif FORCEINLINE EJsonxToken WriteValueOnly(int32 Value) { return WriteValueOnly((int64)Value); } FORCEINLINE EJsonxToken WriteValueOnly(int64 Value) { PrintPolicy::WriteString(Stream, FString::Printf(TEXT("%lld"), Value)); return EJsonxToken::Number; } FORCEINLINE EJsonxToken WriteValueOnly(TYPE_OF_NULLPTR) { PrintPolicy::WriteString(Stream, TEXT("null")); return EJsonxToken::Null; } FORCEINLINE EJsonxToken WriteValueOnly(const FString& Value) { WriteStringValue(Value); return EJsonxToken::String; } virtual void WriteStringValue( const FString& String ) { FString OutString = EscapeJsonxString(String); PrintPolicy::WriteString(Stream, OutString); } FArchive* const Stream; TArray Stack; EJsonxToken PreviousTokenWritten; int32 IndentLevel; }; template > class TJsonxStringWriter : public TJsonxWriter { public: static TSharedRef Create( FString* const InStream, int32 InitialIndent = 0 ) { return MakeShareable(new TJsonxStringWriter(InStream, InitialIndent)); } public: virtual ~TJsonxStringWriter() { check(this->Stream->Close()); delete this->Stream; } virtual bool Close() override { FString Out; for (int32 i = 0; i < Bytes.Num(); i+=sizeof(TCHAR)) { TCHAR* Char = static_cast(static_cast(&Bytes[i])); Out += *Char; } *OutString = Out; return TJsonxWriter::Close(); } protected: TJsonxStringWriter( FString* const InOutString, int32 InitialIndent ) : TJsonxWriter(new FMemoryWriter(Bytes), InitialIndent) , Bytes() , OutString(InOutString) { } private: TArray Bytes; FString* OutString; }; template > class TJsonxWriterFactory { public: static TSharedRef> Create( FArchive* const Stream, int32 InitialIndent = 0 ) { return TJsonxWriter< CharType, PrintPolicy >::Create(Stream, InitialIndent); } static TSharedRef> Create( FString* const Stream, int32 InitialIndent = 0 ) { return StaticCastSharedRef>(TJsonxStringWriter::Create(Stream, InitialIndent)); } }; ================================================ FILE: Source/JsonxUtilities/JsonxUtilities.Build.cs ================================================ // Copyright Epic Games, Inc. All Rights Reserved. using UnrealBuildTool; public class JsonxUtilities : ModuleRules { public JsonxUtilities( ReadOnlyTargetRules Target ) : base(Target) { PublicDependencyModuleNames.AddRange( new string[] { "Core", "CoreUObject", "Jsonx", } ); } } ================================================ FILE: Source/JsonxUtilities/Private/JsonxObjectConverter.cpp ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #include "JsonxObjectConverter.h" #include "Internationalization/Culture.h" #include "UObject/ObjectMacros.h" #include "UObject/Class.h" #include "UObject/UnrealType.h" #include "UObject/EnumProperty.h" #include "UObject/TextProperty.h" #include "UObject/PropertyPortFlags.h" #include "UObject/Package.h" #include "Policies/CondensedJsonxPrintPolicy.h" #include "JsonxObjectWrapper.h" FString FJsonxObjectConverter::StandardizeCase(const FString &StringIn) { // this probably won't work for all cases, consider downcaseing the string fully FString FixedString = StringIn; FixedString[0] = FChar::ToLower(FixedString[0]); // our json classes/variable start lower case FixedString.ReplaceInline(TEXT("ID"), TEXT("Id"), ESearchCase::CaseSensitive); // Id is standard instead of ID, some of our fnames use ID return FixedString; } namespace { const FString ObjectClassNameKey = "_ClassName"; /** Convert property to JSONX, assuming either the property is not an array or the value is an individual array element */ TSharedPtr ConvertScalarFPropertyToJsonxValue(FProperty* Property, const void* Value, int64 CheckFlags, int64 SkipFlags, const FJsonxObjectConverter::CustomExportCallback* ExportCb, FProperty* OuterProperty) { // See if there's a custom export callback first, so it can override default behavior if (ExportCb && ExportCb->IsBound()) { TSharedPtr CustomValue = ExportCb->Execute(Property, Value); if (CustomValue.IsValid()) { return CustomValue; } // fall through to default cases } if (FEnumProperty* EnumProperty = CastField(Property)) { // export enums as strings UEnum* EnumDef = EnumProperty->GetEnum(); FString StringValue = EnumDef->GetNameStringByValue(EnumProperty->GetUnderlyingProperty()->GetSignedIntPropertyValue(Value)); return MakeShared(StringValue); } else if (FNumericProperty *NumericProperty = CastField(Property)) { // see if it's an enum UEnum* EnumDef = NumericProperty->GetIntPropertyEnum(); if (EnumDef != NULL) { // export enums as strings FString StringValue = EnumDef->GetNameStringByValue(NumericProperty->GetSignedIntPropertyValue(Value)); return MakeShared(StringValue); } // We want to export numbers as numbers if (NumericProperty->IsFloatingPoint()) { return MakeShared(NumericProperty->GetFloatingPointPropertyValue(Value)); } else if (NumericProperty->IsInteger()) { return MakeShared(NumericProperty->GetSignedIntPropertyValue(Value)); } // fall through to default } else if (FBoolProperty *BoolProperty = CastField(Property)) { // Export bools as bools return MakeShared(BoolProperty->GetPropertyValue(Value)); } else if (FStrProperty *StringProperty = CastField(Property)) { return MakeShared(StringProperty->GetPropertyValue(Value)); } else if (FTextProperty *TextProperty = CastField(Property)) { return MakeShared(TextProperty->GetPropertyValue(Value).ToString()); } else if (FArrayProperty *ArrayProperty = CastField(Property)) { TArray< TSharedPtr > Out; FScriptArrayHelper Helper(ArrayProperty, Value); for (int32 i=0, n=Helper.Num(); i Elem = FJsonxObjectConverter::UPropertyToJsonxValue(ArrayProperty->Inner, Helper.GetRawPtr(i), CheckFlags & ( ~CPF_ParmFlags ), SkipFlags, ExportCb, ArrayProperty); if ( Elem.IsValid() ) { // add to the array Out.Push(Elem); } } return MakeShared(Out); } else if ( FSetProperty* SetProperty = CastField(Property) ) { TArray< TSharedPtr > Out; FScriptSetHelper Helper(SetProperty, Value); for ( int32 i=0, n=Helper.Num(); n; ++i ) { if ( Helper.IsValidIndex(i) ) { TSharedPtr Elem = FJsonxObjectConverter::UPropertyToJsonxValue(SetProperty->ElementProp, Helper.GetElementPtr(i), CheckFlags & ( ~CPF_ParmFlags ), SkipFlags, ExportCb, SetProperty); if ( Elem.IsValid() ) { // add to the array Out.Push(Elem); } --n; } } return MakeShared(Out); } else if ( FMapProperty* MapProperty = CastField(Property) ) { TSharedRef Out = MakeShared(); FScriptMapHelper Helper(MapProperty, Value); for ( int32 i=0, n = Helper.Num(); n; ++i ) { if ( Helper.IsValidIndex(i) ) { TSharedPtr KeyElement = FJsonxObjectConverter::UPropertyToJsonxValue(MapProperty->KeyProp, Helper.GetKeyPtr(i), CheckFlags & ( ~CPF_ParmFlags ), SkipFlags, ExportCb, MapProperty); TSharedPtr ValueElement = FJsonxObjectConverter::UPropertyToJsonxValue(MapProperty->ValueProp, Helper.GetValuePtr(i), CheckFlags & ( ~CPF_ParmFlags ), SkipFlags, ExportCb, MapProperty); if ( KeyElement.IsValid() && ValueElement.IsValid() ) { FString KeyString; if (!KeyElement->TryGetString(KeyString)) { MapProperty->KeyProp->ExportTextItem(KeyString, Helper.GetKeyPtr(i), nullptr, nullptr, 0); if (KeyString.IsEmpty()) { UE_LOG(LogJsonx, Error, TEXT("Unable to convert key to string for property %s."), *MapProperty->GetName()) KeyString = FString::Printf(TEXT("Unparsed Key %d"), i); } } Out->SetField(KeyString, ValueElement); } --n; } } return MakeShared(Out); } else if (FStructProperty *StructProperty = CastField(Property)) { UScriptStruct::ICppStructOps* TheCppStructOps = StructProperty->Struct->GetCppStructOps(); // Intentionally exclude the JSONX Object wrapper, which specifically needs to export JSONX in an object representation instead of a string if (StructProperty->Struct != FJsonxObjectWrapper::StaticStruct() && TheCppStructOps && TheCppStructOps->HasExportTextItem()) { FString OutValueStr; TheCppStructOps->ExportTextItem(OutValueStr, Value, nullptr, nullptr, PPF_None, nullptr); return MakeShared(OutValueStr); } TSharedRef Out = MakeShared(); if (FJsonxObjectConverter::UStructToJsonxObject(StructProperty->Struct, Value, Out, CheckFlags & (~CPF_ParmFlags), SkipFlags, ExportCb)) { return MakeShared(Out); } } else if (FObjectProperty* ObjectProperty = CastField(Property)) { // Instanced properties should be copied by value, while normal UObject* properties should output as asset references UObject* Object = ObjectProperty->GetObjectPropertyValue(Value); if (Object && (ObjectProperty->HasAnyPropertyFlags(CPF_PersistentInstance) || (OuterProperty && OuterProperty->HasAnyPropertyFlags(CPF_PersistentInstance)))) { TSharedRef Out = MakeShared(); Out->SetStringField(ObjectClassNameKey, Object->GetClass()->GetFName().ToString()); if (FJsonxObjectConverter::UStructToJsonxObject(ObjectProperty->GetObjectPropertyValue(Value)->GetClass(), Object, Out, CheckFlags, SkipFlags, ExportCb)) { TSharedRef JsonxObject = MakeShared(Out); JsonxObject->Type = EJsonx::Object; return JsonxObject; } } else { FString StringValue; Property->ExportTextItem(StringValue, Value, nullptr, nullptr, PPF_None); return MakeShared(StringValue); } } else { // Default to export as string for everything else FString StringValue; Property->ExportTextItem(StringValue, Value, NULL, NULL, PPF_None); return MakeShared(StringValue); } // invalid return TSharedPtr(); } } PRAGMA_DISABLE_DEPRECATION_WARNINGS TSharedPtr FJsonxObjectConverter::ObjectJsonxCallback(FProperty* Property, const void* Value) { if (FObjectProperty* ObjectProperty = CastField(Property)) { if (!ObjectProperty->HasAnyFlags(RF_Transient)) // We are taking Transient to mean we don't want to serialize to Jsonx either (could make a new flag if nessasary) { TSharedRef Out = MakeShared(); CustomExportCallback CustomCB; CustomCB.BindStatic(FJsonxObjectConverter::ObjectJsonxCallback); void** PtrToValuePtr = (void**)Value; if (FJsonxObjectConverter::UStructToJsonxObject(ObjectProperty->PropertyClass, (*PtrToValuePtr), Out, 0, 0, &CustomCB)) { return MakeShared(Out); } } } // invalid return TSharedPtr(); } PRAGMA_ENABLE_DEPRECATION_WARNINGS TSharedPtr FJsonxObjectConverter::UPropertyToJsonxValue(FProperty* Property, const void* Value, int64 CheckFlags, int64 SkipFlags, const CustomExportCallback* ExportCb, FProperty* OuterProperty) { if (Property->ArrayDim == 1) { return ConvertScalarFPropertyToJsonxValue(Property, Value, CheckFlags, SkipFlags, ExportCb, OuterProperty); } TArray< TSharedPtr > Array; for (int Index = 0; Index != Property->ArrayDim; ++Index) { Array.Add(ConvertScalarFPropertyToJsonxValue(Property, (char*)Value + Index * Property->ElementSize, CheckFlags, SkipFlags, ExportCb, OuterProperty)); } return MakeShared(Array); } bool FJsonxObjectConverter::UStructToJsonxObject(const UStruct* StructDefinition, const void* Struct, TSharedRef OutJsonxObject, int64 CheckFlags, int64 SkipFlags, const CustomExportCallback* ExportCb) { return UStructToJsonxAttributes(StructDefinition, Struct, OutJsonxObject->Values, CheckFlags, SkipFlags, ExportCb); } bool FJsonxObjectConverter::UStructToJsonxAttributes(const UStruct* StructDefinition, const void* Struct, TMap< FString, TSharedPtr >& OutJsonxAttributes, int64 CheckFlags, int64 SkipFlags, const CustomExportCallback* ExportCb) { if (SkipFlags == 0) { // If we have no specified skip flags, skip deprecated, transient and skip serialization by default when writing SkipFlags |= CPF_Deprecated | CPF_Transient; } if (StructDefinition == FJsonxObjectWrapper::StaticStruct()) { // Just copy it into the object const FJsonxObjectWrapper* ProxyObject = (const FJsonxObjectWrapper *)Struct; if (ProxyObject->JsonxObject.IsValid()) { OutJsonxAttributes = ProxyObject->JsonxObject->Values; } return true; } for (TFieldIterator It(StructDefinition); It; ++It) { FProperty* Property = *It; // Check to see if we should ignore this property if (CheckFlags != 0 && !Property->HasAnyPropertyFlags(CheckFlags)) { continue; } if (Property->HasAnyPropertyFlags(SkipFlags)) { continue; } FString VariableName = StandardizeCase(Property->GetName()); const void* Value = Property->ContainerPtrToValuePtr(Struct); // convert the property to a FJsonxValue TSharedPtr JsonxValue = UPropertyToJsonxValue(Property, Value, CheckFlags, SkipFlags, ExportCb); if (!JsonxValue.IsValid()) { FFieldClass* PropClass = Property->GetClass(); UE_LOG(LogJsonx, Error, TEXT("UStructToJsonxObject - Unhandled property type '%s': %s"), *PropClass->GetName(), *Property->GetPathName()); return false; } // set the value on the output object OutJsonxAttributes.Add(VariableName, JsonxValue); } return true; } template bool UStructToJsonxObjectStringInternal(const TSharedRef& JsonxObject, FString& OutJsonxString, int32 Indent) { TSharedRef > JsonxWriter = TJsonxWriterFactory::Create(&OutJsonxString, Indent); bool bSuccess = FJsonxSerializer::Serialize(JsonxObject, JsonxWriter); JsonxWriter->Close(); return bSuccess; } bool FJsonxObjectConverter::UStructToJsonxObjectString(const UStruct* StructDefinition, const void* Struct, FString& OutJsonxString, int64 CheckFlags, int64 SkipFlags, int32 Indent, const CustomExportCallback* ExportCb, bool bPrettyPrint) { TSharedRef JsonxObject = MakeShared(); if (UStructToJsonxObject(StructDefinition, Struct, JsonxObject, CheckFlags, SkipFlags, ExportCb)) { bool bSuccess = false; if (bPrettyPrint) { bSuccess = UStructToJsonxObjectStringInternal >(JsonxObject, OutJsonxString, Indent); } else { bSuccess = UStructToJsonxObjectStringInternal >(JsonxObject, OutJsonxString, Indent); } if (bSuccess) { return true; } else { UE_LOG(LogJsonx, Warning, TEXT("UStructToJsonxObjectString - Unable to write out json")); } } return false; } //static bool FJsonxObjectConverter::GetTextFromObject(const TSharedRef& Obj, FText& TextOut) { // get the prioritized culture name list FCultureRef CurrentCulture = FInternationalization::Get().GetCurrentCulture(); TArray CultureList = CurrentCulture->GetPrioritizedParentCultureNames(); // try to follow the fall back chain that the engine uses FString TextString; for (const FString& CultureCode : CultureList) { if (Obj->TryGetStringField(CultureCode, TextString)) { TextOut = FText::FromString(TextString); return true; } } // try again but only search on the locale region (in the localized data). This is a common omission (i.e. en-US source text should be used if no en is defined) for (const FString& LocaleToMatch : CultureList) { int32 SeparatorPos; // only consider base language entries in culture chain (i.e. "en") if (!LocaleToMatch.FindChar('-', SeparatorPos)) { for (const auto& Pair : Obj->Values) { // only consider coupled entries now (base ones would have been matched on first path) (i.e. "en-US") if (Pair.Key.FindChar('-', SeparatorPos)) { if (Pair.Key.StartsWith(LocaleToMatch)) { TextOut = FText::FromString(Pair.Value->AsString()); return true; } } } } } // no luck, is this possibly an unrelated json object? return false; } namespace { bool JsonxValueToFPropertyWithContainer(const TSharedPtr& JsonxValue, FProperty* Property, void* OutValue, const UStruct* ContainerStruct, void* Container, int64 CheckFlags, int64 SkipFlags); bool JsonxAttributesToUStructWithContainer(const TMap< FString, TSharedPtr >& JsonxAttributes, const UStruct* StructDefinition, void* OutStruct, const UStruct* ContainerStruct, void* Container, int64 CheckFlags, int64 SkipFlags); /** Convert JSONX to property, assuming either the property is not an array or the value is an individual array element */ bool ConvertScalarJsonxValueToFPropertyWithContainer(const TSharedPtr& JsonxValue, FProperty* Property, void* OutValue, const UStruct* ContainerStruct, void* Container, int64 CheckFlags, int64 SkipFlags) { if (FEnumProperty* EnumProperty = CastField(Property)) { if (JsonxValue->Type == EJsonx::String) { // see if we were passed a string for the enum const UEnum* Enum = EnumProperty->GetEnum(); check(Enum); FString StrValue = JsonxValue->AsString(); int64 IntValue = Enum->GetValueByName(FName(*StrValue)); if (IntValue == INDEX_NONE) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable import enum %s from string value %s for property %s"), *Enum->CppType, *StrValue, *Property->GetNameCPP()); return false; } EnumProperty->GetUnderlyingProperty()->SetIntPropertyValue(OutValue, IntValue); } else { // AsNumber will log an error for completely inappropriate types (then give us a default) EnumProperty->GetUnderlyingProperty()->SetIntPropertyValue(OutValue, (int64)JsonxValue->AsNumber()); } } else if (FNumericProperty *NumericProperty = CastField(Property)) { if (NumericProperty->IsEnum() && JsonxValue->Type == EJsonx::String) { // see if we were passed a string for the enum const UEnum* Enum = NumericProperty->GetIntPropertyEnum(); check(Enum); // should be assured by IsEnum() FString StrValue = JsonxValue->AsString(); int64 IntValue = Enum->GetValueByName(FName(*StrValue)); if (IntValue == INDEX_NONE) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable import enum %s from string value %s for property %s"), *Enum->CppType, *StrValue, *Property->GetNameCPP()); return false; } NumericProperty->SetIntPropertyValue(OutValue, IntValue); } else if (NumericProperty->IsFloatingPoint()) { // AsNumber will log an error for completely inappropriate types (then give us a default) NumericProperty->SetFloatingPointPropertyValue(OutValue, JsonxValue->AsNumber()); } else if (NumericProperty->IsInteger()) { if (JsonxValue->Type == EJsonx::String) { // parse string -> int64 ourselves so we don't lose any precision going through AsNumber (aka double) NumericProperty->SetIntPropertyValue(OutValue, FCString::Atoi64(*JsonxValue->AsString())); } else { // AsNumber will log an error for completely inappropriate types (then give us a default) NumericProperty->SetIntPropertyValue(OutValue, (int64)JsonxValue->AsNumber()); } } else { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable to set numeric property type %s for property %s"), *Property->GetClass()->GetName(), *Property->GetNameCPP()); return false; } } else if (FBoolProperty *BoolProperty = CastField(Property)) { // AsBool will log an error for completely inappropriate types (then give us a default) BoolProperty->SetPropertyValue(OutValue, JsonxValue->AsBool()); } else if (FStrProperty *StringProperty = CastField(Property)) { // AsString will log an error for completely inappropriate types (then give us a default) StringProperty->SetPropertyValue(OutValue, JsonxValue->AsString()); } else if (FArrayProperty *ArrayProperty = CastField(Property)) { if (JsonxValue->Type == EJsonx::Array) { TArray< TSharedPtr > ArrayValue = JsonxValue->AsArray(); int32 ArrLen = ArrayValue.Num(); // make the output array size match FScriptArrayHelper Helper(ArrayProperty, OutValue); Helper.Resize(ArrLen); // set the property values for (int32 i = 0; i < ArrLen; ++i) { const TSharedPtr& ArrayValueItem = ArrayValue[i]; if (ArrayValueItem.IsValid() && !ArrayValueItem->IsNull()) { if (!JsonxValueToFPropertyWithContainer(ArrayValueItem, ArrayProperty->Inner, Helper.GetRawPtr(i), ContainerStruct, Container, CheckFlags & (~CPF_ParmFlags), SkipFlags)) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable to deserialize array element [%d] for property %s"), i, *Property->GetNameCPP()); return false; } } } } else { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Attempted to import TArray from non-array JSONX key for property %s"), *Property->GetNameCPP()); return false; } } else if (FMapProperty* MapProperty = CastField(Property)) { if (JsonxValue->Type == EJsonx::Object) { TSharedPtr ObjectValue = JsonxValue->AsObject(); FScriptMapHelper Helper(MapProperty, OutValue); check(ObjectValue); int32 MapSize = ObjectValue->Values.Num(); Helper.EmptyValues(MapSize); // set the property values for (const auto& Entry : ObjectValue->Values) { if (Entry.Value.IsValid() && !Entry.Value->IsNull()) { int32 NewIndex = Helper.AddDefaultValue_Invalid_NeedsRehash(); TSharedPtr TempKeyValue = MakeShared(Entry.Key); const bool bKeySuccess = JsonxValueToFPropertyWithContainer(TempKeyValue, MapProperty->KeyProp, Helper.GetKeyPtr(NewIndex), ContainerStruct, Container, CheckFlags & (~CPF_ParmFlags), SkipFlags); const bool bValueSuccess = JsonxValueToFPropertyWithContainer(Entry.Value, MapProperty->ValueProp, Helper.GetValuePtr(NewIndex), ContainerStruct, Container, CheckFlags & (~CPF_ParmFlags), SkipFlags); if (!(bKeySuccess && bValueSuccess)) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable to deserialize map element [key: %s] for property %s"), *Entry.Key, *Property->GetNameCPP()); return false; } } } Helper.Rehash(); } else { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Attempted to import TMap from non-object JSONX key for property %s"), *Property->GetNameCPP()); return false; } } else if (FSetProperty* SetProperty = CastField(Property)) { if (JsonxValue->Type == EJsonx::Array) { TArray< TSharedPtr > ArrayValue = JsonxValue->AsArray(); int32 ArrLen = ArrayValue.Num(); FScriptSetHelper Helper(SetProperty, OutValue); // set the property values for (int32 i = 0; i < ArrLen; ++i) { const TSharedPtr& ArrayValueItem = ArrayValue[i]; if (ArrayValueItem.IsValid() && !ArrayValueItem->IsNull()) { int32 NewIndex = Helper.AddDefaultValue_Invalid_NeedsRehash(); if (!JsonxValueToFPropertyWithContainer(ArrayValueItem, SetProperty->ElementProp, Helper.GetElementPtr(NewIndex), ContainerStruct, Container, CheckFlags & (~CPF_ParmFlags), SkipFlags)) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable to deserialize set element [%d] for property %s"), i, *Property->GetNameCPP()); return false; } } } Helper.Rehash(); } else { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Attempted to import TSet from non-array JSONX key for property %s"), *Property->GetNameCPP()); return false; } } else if (FTextProperty* TextProperty = CastField(Property)) { if (JsonxValue->Type == EJsonx::String) { // assume this string is already localized, so import as invariant TextProperty->SetPropertyValue(OutValue, FText::FromString(JsonxValue->AsString())); } else if (JsonxValue->Type == EJsonx::Object) { TSharedPtr Obj = JsonxValue->AsObject(); check(Obj.IsValid()); // should not fail if Type == EJsonx::Object // import the subvalue as a culture invariant string FText Text; if (!FJsonxObjectConverter::GetTextFromObject(Obj.ToSharedRef(), Text)) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Attempted to import FText from JSONX object with invalid keys for property %s"), *Property->GetNameCPP()); return false; } TextProperty->SetPropertyValue(OutValue, Text); } else { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Attempted to import FText from JSONX that was neither string nor object for property %s"), *Property->GetNameCPP()); return false; } } else if (FStructProperty *StructProperty = CastField(Property)) { static const FName NAME_DateTime(TEXT("DateTime")); static const FName NAME_Color(TEXT("Color")); static const FName NAME_LinearColor(TEXT("LinearColor")); if (JsonxValue->Type == EJsonx::Object) { TSharedPtr Obj = JsonxValue->AsObject(); check(Obj.IsValid()); // should not fail if Type == EJsonx::Object if (!JsonxAttributesToUStructWithContainer(Obj->Values, StructProperty->Struct, OutValue, ContainerStruct, Container, CheckFlags & (~CPF_ParmFlags), SkipFlags)) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - FJsonxObjectConverter::JsonxObjectToUStruct failed for property %s"), *Property->GetNameCPP()); return false; } } else if (JsonxValue->Type == EJsonx::String && StructProperty->Struct->GetFName() == NAME_LinearColor) { FLinearColor& ColorOut = *(FLinearColor*)OutValue; FString ColorString = JsonxValue->AsString(); FColor IntermediateColor; IntermediateColor = FColor::FromHex(ColorString); ColorOut = IntermediateColor; } else if (JsonxValue->Type == EJsonx::String && StructProperty->Struct->GetFName() == NAME_Color) { FColor& ColorOut = *(FColor*)OutValue; FString ColorString = JsonxValue->AsString(); ColorOut = FColor::FromHex(ColorString); } else if (JsonxValue->Type == EJsonx::String && StructProperty->Struct->GetFName() == NAME_DateTime) { FString DateString = JsonxValue->AsString(); FDateTime& DateTimeOut = *(FDateTime*)OutValue; if (DateString == TEXT("min")) { // min representable value for our date struct. Actual date may vary by platform (this is used for sorting) DateTimeOut = FDateTime::MinValue(); } else if (DateString == TEXT("max")) { // max representable value for our date struct. Actual date may vary by platform (this is used for sorting) DateTimeOut = FDateTime::MaxValue(); } else if (DateString == TEXT("now")) { // this value's not really meaningful from json serialization (since we don't know timezone) but handle it anyway since we're handling the other keywords DateTimeOut = FDateTime::UtcNow(); } else if (FDateTime::ParseIso8601(*DateString, DateTimeOut)) { // ok } else if (FDateTime::Parse(DateString, DateTimeOut)) { // ok } else { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable to import FDateTime for property %s"), *Property->GetNameCPP()); return false; } } else if (JsonxValue->Type == EJsonx::String && StructProperty->Struct->GetCppStructOps() && StructProperty->Struct->GetCppStructOps()->HasImportTextItem()) { UScriptStruct::ICppStructOps* TheCppStructOps = StructProperty->Struct->GetCppStructOps(); FString ImportTextString = JsonxValue->AsString(); const TCHAR* ImportTextPtr = *ImportTextString; if (!TheCppStructOps->ImportTextItem(ImportTextPtr, OutValue, PPF_None, nullptr, (FOutputDevice*)GWarn)) { // Fall back to trying the tagged property approach if custom ImportTextItem couldn't get it done Property->ImportText(ImportTextPtr, OutValue, PPF_None, nullptr); } } else if (JsonxValue->Type == EJsonx::String) { FString ImportTextString = JsonxValue->AsString(); const TCHAR* ImportTextPtr = *ImportTextString; Property->ImportText(ImportTextPtr, OutValue, PPF_None, nullptr); } else { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Attempted to import UStruct from non-object JSONX key for property %s"), *Property->GetNameCPP()); return false; } } else if (FObjectProperty *ObjectProperty = CastField(Property)) { if (JsonxValue->Type == EJsonx::Object) { UObject* Outer = GetTransientPackage(); if (ContainerStruct->IsChildOf(UObject::StaticClass())) { Outer = (UObject*)Container; } TSharedPtr Obj = JsonxValue->AsObject(); UClass* PropertyClass = ObjectProperty->PropertyClass; // If a specific subclass was stored in the Jsonx, use that instead of the PropertyClass FString ClassString = Obj->GetStringField(ObjectClassNameKey); Obj->RemoveField(ObjectClassNameKey); if (!ClassString.IsEmpty()) { UClass* FoundClass = FindObject(ANY_PACKAGE, *ClassString); if (FoundClass) { PropertyClass = FoundClass; } } UObject* createdObj = StaticAllocateObject(PropertyClass, Outer, NAME_None, EObjectFlags::RF_NoFlags, EInternalObjectFlags::None, false); (*PropertyClass->ClassConstructor)(FObjectInitializer(createdObj, PropertyClass->ClassDefaultObject, false, false)); ObjectProperty->SetObjectPropertyValue(OutValue, createdObj); check(Obj.IsValid()); // should not fail if Type == EJsonx::Object if (!JsonxAttributesToUStructWithContainer(Obj->Values, PropertyClass, createdObj, PropertyClass, createdObj, CheckFlags & (~CPF_ParmFlags), SkipFlags)) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - FJsonxObjectConverter::JsonxObjectToUStruct failed for property %s"), *Property->GetNameCPP()); return false; } } else if (JsonxValue->Type == EJsonx::String) { // Default to expect a string for everything else if (Property->ImportText(*JsonxValue->AsString(), OutValue, 0, NULL) == NULL) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable import property type %s from string value for property %s"), *Property->GetClass()->GetName(), *Property->GetNameCPP()); return false; } } } else { // Default to expect a string for everything else if (Property->ImportText(*JsonxValue->AsString(), OutValue, 0, NULL) == NULL) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Unable import property type %s from string value for property %s"), *Property->GetClass()->GetName(), *Property->GetNameCPP()); return false; } } return true; } bool JsonxValueToFPropertyWithContainer(const TSharedPtr& JsonxValue, FProperty* Property, void* OutValue, const UStruct* ContainerStruct, void* Container, int64 CheckFlags, int64 SkipFlags) { if (!JsonxValue.IsValid()) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Invalid value JSONX key")); return false; } bool bArrayOrSetProperty = Property->IsA() || Property->IsA(); bool bJsonxArray = JsonxValue->Type == EJsonx::Array; if (!bJsonxArray) { if (bArrayOrSetProperty) { UE_LOG(LogJsonx, Error, TEXT("JsonxValueToUProperty - Attempted to import TArray from non-array JSONX key")); return false; } if (Property->ArrayDim != 1) { UE_LOG(LogJsonx, Warning, TEXT("Ignoring excess properties when deserializing %s"), *Property->GetName()); } return ConvertScalarJsonxValueToFPropertyWithContainer(JsonxValue, Property, OutValue, ContainerStruct, Container, CheckFlags, SkipFlags); } // In practice, the ArrayDim == 1 check ought to be redundant, since nested arrays of FPropertys are not supported if (bArrayOrSetProperty && Property->ArrayDim == 1) { // Read into TArray return ConvertScalarJsonxValueToFPropertyWithContainer(JsonxValue, Property, OutValue, ContainerStruct, Container, CheckFlags, SkipFlags); } // We're deserializing a JSONX array const auto& ArrayValue = JsonxValue->AsArray(); if (Property->ArrayDim < ArrayValue.Num()) { UE_LOG(LogJsonx, Warning, TEXT("Ignoring excess properties when deserializing %s"), *Property->GetName()); } // Read into native array int ItemsToRead = FMath::Clamp(ArrayValue.Num(), 0, Property->ArrayDim); for (int Index = 0; Index != ItemsToRead; ++Index) { if (!ConvertScalarJsonxValueToFPropertyWithContainer(ArrayValue[Index], Property, (char*)OutValue + Index * Property->ElementSize, ContainerStruct, Container, CheckFlags, SkipFlags)) { return false; } } return true; } bool JsonxAttributesToUStructWithContainer(const TMap< FString, TSharedPtr >& JsonxAttributes, const UStruct* StructDefinition, void* OutStruct, const UStruct* ContainerStruct, void* Container, int64 CheckFlags, int64 SkipFlags) { if (StructDefinition == FJsonxObjectWrapper::StaticStruct()) { // Just copy it into the object FJsonxObjectWrapper* ProxyObject = (FJsonxObjectWrapper*)OutStruct; ProxyObject->JsonxObject = MakeShared(); ProxyObject->JsonxObject->Values = JsonxAttributes; return true; } int32 NumUnclaimedProperties = JsonxAttributes.Num(); if (NumUnclaimedProperties <= 0) { return true; } // iterate over the struct properties for (TFieldIterator PropIt(StructDefinition); PropIt; ++PropIt) { FProperty* Property = *PropIt; // Check to see if we should ignore this property if (CheckFlags != 0 && !Property->HasAnyPropertyFlags(CheckFlags)) { continue; } if (Property->HasAnyPropertyFlags(SkipFlags)) { continue; } // find a json value matching this property name const TSharedPtr* JsonxValue = JsonxAttributes.Find(Property->GetName()); if (!JsonxValue) { // we allow values to not be found since this mirrors the typical UObject mantra that all the fields are optional when deserializing continue; } if (JsonxValue->IsValid() && !(*JsonxValue)->IsNull()) { void* Value = Property->ContainerPtrToValuePtr(OutStruct); if (!JsonxValueToFPropertyWithContainer(*JsonxValue, Property, Value, ContainerStruct, Container, CheckFlags, SkipFlags)) { UE_LOG(LogJsonx, Error, TEXT("JsonxObjectToUStruct - Unable to parse %s.%s from JSONX"), *StructDefinition->GetName(), *Property->GetName()); return false; } } if (--NumUnclaimedProperties <= 0) { // If we found all properties that were in the JsonxAttributes map, there is no reason to keep looking for more. break; } } return true; } } bool FJsonxObjectConverter::JsonxValueToUProperty(const TSharedPtr& JsonxValue, FProperty* Property, void* OutValue, int64 CheckFlags, int64 SkipFlags) { return JsonxValueToFPropertyWithContainer(JsonxValue, Property, OutValue, nullptr, nullptr, CheckFlags, SkipFlags); } bool FJsonxObjectConverter::JsonxObjectToUStruct(const TSharedRef& JsonxObject, const UStruct* StructDefinition, void* OutStruct, int64 CheckFlags, int64 SkipFlags) { return JsonxAttributesToUStruct(JsonxObject->Values, StructDefinition, OutStruct, CheckFlags, SkipFlags); } bool FJsonxObjectConverter::JsonxAttributesToUStruct(const TMap< FString, TSharedPtr >& JsonxAttributes, const UStruct* StructDefinition, void* OutStruct, int64 CheckFlags, int64 SkipFlags) { return JsonxAttributesToUStructWithContainer(JsonxAttributes, StructDefinition, OutStruct, StructDefinition, OutStruct, CheckFlags, SkipFlags); } //static bool FJsonxObjectConverter::GetTextFromField(const FString& FieldName, const TSharedPtr& FieldValue, FText& TextOut) { if (FieldValue.IsValid()) { switch (FieldValue->Type) { case EJsonx::Number: { // number TextOut = FText::AsNumber(FieldValue->AsNumber()); return true; } case EJsonx::String: { if (FieldName.StartsWith(TEXT("date-"))) { FDateTime Dte; if (FDateTime::ParseIso8601(*FieldValue->AsString(), Dte)) { TextOut = FText::AsDate(Dte); return true; } } else if (FieldName.StartsWith(TEXT("datetime-"))) { FDateTime Dte; if (FDateTime::ParseIso8601(*FieldValue->AsString(), Dte)) { TextOut = FText::AsDateTime(Dte); return true; } } else { // culture invariant string TextOut = FText::FromString(FieldValue->AsString()); return true; } break; } case EJsonx::Object: { // localized string if (FJsonxObjectConverter::GetTextFromObject(FieldValue->AsObject().ToSharedRef(), TextOut)) { return true; } UE_LOG(LogJsonx, Error, TEXT("Unable to apply Jsonx parameter %s (could not parse object)"), *FieldName); break; } default: { UE_LOG(LogJsonx, Error, TEXT("Unable to apply Jsonx parameter %s (bad type)"), *FieldName); break; } } } return false; } FFormatNamedArguments FJsonxObjectConverter::ParseTextArgumentsFromJsonx(const TSharedPtr& JsonxObject) { FFormatNamedArguments NamedArgs; if (JsonxObject.IsValid()) { for (const auto& It : JsonxObject->Values) { FText TextValue; if (GetTextFromField(It.Key, It.Value, TextValue)) { NamedArgs.Emplace(It.Key, TextValue); } } } return NamedArgs; } ================================================ FILE: Source/JsonxUtilities/Private/JsonxObjectWrapper.cpp ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #include "JsonxObjectWrapper.h" #include "Policies/CondensedJsonxPrintPolicy.h" #include "Serialization/JsonxReader.h" #include "Serialization/JsonxSerializer.h" bool FJsonxObjectWrapper::ImportTextItem(const TCHAR*& Buffer, int32 PortFlags, UObject* Parent, FOutputDevice* ErrorText) { // read JSONX string from Buffer FString Jsonx; if (*Buffer == TCHAR('"')) { int32 NumCharsRead = 0; if (!FParse::QuotedString(Buffer, Jsonx, &NumCharsRead)) { ErrorText->Logf(ELogVerbosity::Warning, TEXT("FJsonxObjectWrapper::ImportTextItem: Bad quoted string: %s\n"), Buffer); return false; } Buffer += NumCharsRead; } else { // consume the rest of the string (this happens on Paste) Jsonx = Buffer; Buffer += Jsonx.Len(); } // empty string yields empty shared pointer if (Jsonx.IsEmpty()) { JsonxString.Empty(); JsonxObject.Reset(); return true; } // parse the json if (!JsonxObjectFromString(Jsonx)) { if (ErrorText) { ErrorText->Logf(ELogVerbosity::Warning, TEXT("FJsonxObjectWrapper::ImportTextItem - Unable to parse json: %s\n"), *Jsonx); } return false; } JsonxString = Jsonx; return true; } bool FJsonxObjectWrapper::ExportTextItem(FString& ValueStr, FJsonxObjectWrapper const& DefaultValue, UObject* Parent, int32 PortFlags, UObject* ExportRootScope) const { // empty pointer yields empty string if (!JsonxObject.IsValid()) { ValueStr.Empty(); return true; } // serialize the json return JsonxObjectToString(ValueStr); } void FJsonxObjectWrapper::PostSerialize(const FArchive& Ar) { if (!JsonxString.IsEmpty()) { // try to parse JsonxString if (!JsonxObjectFromString(JsonxString)) { // do not abide a string that won't parse JsonxString.Empty(); } } } bool FJsonxObjectWrapper::JsonxObjectToString(FString& Str) const { TSharedRef>> JsonxWriter = TJsonxWriterFactory>::Create(&Str, 0); return FJsonxSerializer::Serialize(JsonxObject.ToSharedRef(), JsonxWriter, true); } bool FJsonxObjectWrapper::JsonxObjectFromString(const FString& Str) { TSharedRef> JsonxReader = TJsonxReaderFactory<>::Create(Str); return FJsonxSerializer::Deserialize(JsonxReader, JsonxObject); } ================================================ FILE: Source/JsonxUtilities/Private/JsonxUtilitiesModule.cpp ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #include "CoreMinimal.h" #include "Modules/ModuleManager.h" IMPLEMENT_MODULE( FDefaultModuleImpl, JsonxUtilities ); ================================================ FILE: Source/JsonxUtilities/Public/JsonxDomBuilder.h ================================================ // Copyright Epic Games, Inc. All Rights Reserved. #pragma once #include "Dom/JsonxValue.h" #include "Dom/JsonxObject.h" #include "Serialization/JsonxSerializer.h" #include "Templates/IsFloatingPoint.h" #include "Templates/IsIntegral.h" #include "Templates/EnableIf.h" #include "Templates/Invoke.h" /** * Helpers for creating TSharedPtr JSONX trees * * Simple example: * * FJsonxDomBuilder::FArray InnerArray; * InnerArray.Add(7.f, TEXT("Hello"), true); * * FJsonxDomBuilder::FObject Object; * Object.Set(TEXT("Array"), InnerArray); * Object.Set(TEXT("Number"), 13.f); * * Object.AsJsonxValue(); * * produces {"Array": [7., "Hello", true], "Number": 13.} */ class FJsonxDomBuilder { public: class FArray; class FObject { public: FObject() : Object(MakeShared()) { } TSharedRef AsJsonxValue() const { return MakeShared(Object); } TSharedRef AsJsonxObject() const { return Object; } template