Repository: chauncygu/Multi-Agent-Constrained-Policy-Optimisation Branch: main Commit: b80a9f5b4a00 Files: 141 Total size: 802.8 KB Directory structure: gitextract_e69a341i/ ├── LICENSE ├── MACPO/ │ ├── .gitignore │ ├── environment.yaml │ ├── macpo/ │ │ ├── __init__.py │ │ ├── algorithms/ │ │ │ ├── __init__.py │ │ │ ├── r_mappo/ │ │ │ │ ├── __init__.py │ │ │ │ ├── algorithm/ │ │ │ │ │ ├── MACPPOPolicy.py │ │ │ │ │ ├── rMAPPOPolicy.py │ │ │ │ │ └── r_actor_critic.py │ │ │ │ └── r_macpo.py │ │ │ └── utils/ │ │ │ ├── act.py │ │ │ ├── cnn.py │ │ │ ├── distributions.py │ │ │ ├── mlp.py │ │ │ ├── rnn.py │ │ │ └── util.py │ │ ├── config.py │ │ ├── envs/ │ │ │ ├── __init__.py │ │ │ ├── env_wrappers.py │ │ │ └── safety_ma_mujoco/ │ │ │ ├── MUJOCO_LOG.TXT │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── safety_multiagent_mujoco/ │ │ │ │ ├── __init__.py │ │ │ │ ├── ant.py │ │ │ │ ├── assets/ │ │ │ │ │ ├── .gitignore │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── ant.xml │ │ │ │ │ ├── coupled_half_cheetah.xml │ │ │ │ │ ├── half_cheetah.xml │ │ │ │ │ ├── hopper.xml │ │ │ │ │ ├── humanoid.xml │ │ │ │ │ ├── manyagent_ant.xml │ │ │ │ │ ├── manyagent_ant.xml.template │ │ │ │ │ ├── manyagent_ant__stage1.xml │ │ │ │ │ ├── manyagent_swimmer.xml.template │ │ │ │ │ ├── manyagent_swimmer__bckp2.xml │ │ │ │ │ └── manyagent_swimmer_bckp.xml │ │ │ │ ├── coupled_half_cheetah.py │ │ │ │ ├── half_cheetah.py │ │ │ │ ├── hopper.py │ │ │ │ ├── humanoid.py │ │ │ │ ├── manyagent_ant.py │ │ │ │ ├── manyagent_swimmer.py │ │ │ │ ├── mujoco_env.py │ │ │ │ ├── mujoco_multi.py │ │ │ │ ├── multiagentenv.py │ │ │ │ └── obsk.py │ │ │ └── test.py │ │ ├── runner/ │ │ │ ├── __init__.py │ │ │ └── separated/ │ │ │ ├── __init__.py │ │ │ ├── base_runner.py │ │ │ ├── base_runner_macpo.py │ │ │ ├── mujoco_runner.py │ │ │ └── mujoco_runner_macpo.py │ │ ├── scripts/ │ │ │ ├── __init__.py │ │ │ ├── train/ │ │ │ │ ├── __init__.py │ │ │ │ └── train_mujoco.py │ │ │ └── train_mujoco.sh │ │ └── utils/ │ │ ├── __init__.py │ │ ├── multi_discrete.py │ │ ├── popart.py │ │ ├── separated_buffer.py │ │ └── util.py │ ├── macpo.egg-info/ │ │ ├── PKG-INFO │ │ ├── SOURCES.txt │ │ ├── dependency_links.txt │ │ └── top_level.txt │ └── setup.py ├── MAPPO-Lagrangian/ │ ├── .gitignore │ ├── environment.yaml │ ├── mappo_lagrangian/ │ │ ├── __init__.py │ │ ├── algorithms/ │ │ │ ├── __init__.py │ │ │ ├── r_mappo/ │ │ │ │ ├── __init__.py │ │ │ │ ├── algorithm/ │ │ │ │ │ ├── MACPPOPolicy.py │ │ │ │ │ ├── rMAPPOPolicy.py │ │ │ │ │ └── r_actor_critic.py │ │ │ │ └── r_mappo_lagr.py │ │ │ └── utils/ │ │ │ ├── act.py │ │ │ ├── cnn.py │ │ │ ├── distributions.py │ │ │ ├── mlp.py │ │ │ ├── rnn.py │ │ │ └── util.py │ │ ├── config.py │ │ ├── envs/ │ │ │ ├── __init__.py │ │ │ ├── env_wrappers.py │ │ │ └── safety_ma_mujoco/ │ │ │ ├── MUJOCO_LOG.TXT │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── safety_multiagent_mujoco/ │ │ │ │ ├── __init__.py │ │ │ │ ├── ant.py │ │ │ │ ├── assets/ │ │ │ │ │ ├── .gitignore │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── ant.xml │ │ │ │ │ ├── beifen_hopper.xml │ │ │ │ │ ├── coupled_half_cheetah.xml │ │ │ │ │ ├── half_cheetah.xml │ │ │ │ │ ├── hopper.xml │ │ │ │ │ ├── humanoid.xml │ │ │ │ │ ├── manyagent_ant.xml │ │ │ │ │ ├── manyagent_ant.xml.template │ │ │ │ │ ├── manyagent_ant__stage1.xml │ │ │ │ │ ├── manyagent_swimmer.xml.template │ │ │ │ │ ├── manyagent_swimmer__bckp2.xml │ │ │ │ │ └── manyagent_swimmer_bckp.xml │ │ │ │ ├── coupled_half_cheetah.py │ │ │ │ ├── half_cheetah.py │ │ │ │ ├── hopper.py │ │ │ │ ├── humanoid.py │ │ │ │ ├── manyagent_ant.py │ │ │ │ ├── manyagent_swimmer.py │ │ │ │ ├── mujoco_env.py │ │ │ │ ├── mujoco_multi.py │ │ │ │ ├── multiagentenv.py │ │ │ │ └── obsk.py │ │ │ └── test.py │ │ ├── runner/ │ │ │ ├── __init__.py │ │ │ └── separated/ │ │ │ ├── __init__.py │ │ │ ├── base_runner.py │ │ │ ├── base_runner_mappo_lagr.py │ │ │ ├── mujoco_runner.py │ │ │ └── mujoco_runner_mappo_lagr.py │ │ ├── scripts/ │ │ │ ├── __init__.py │ │ │ ├── eval/ │ │ │ │ └── eval_hanabi.py │ │ │ ├── train/ │ │ │ │ ├── __init__.py │ │ │ │ └── train_mujoco.py │ │ │ └── train_mujoco.sh │ │ └── utils/ │ │ ├── __init__.py │ │ ├── multi_discrete.py │ │ ├── popart.py │ │ ├── separated_buffer.py │ │ ├── shared_buffer.py │ │ └── util.py │ ├── mappo_lagrangian.egg-info/ │ │ ├── PKG-INFO │ │ ├── SOURCES.txt │ │ ├── dependency_links.txt │ │ └── top_level.txt │ └── setup.py ├── README.md ├── environment.yaml └── requirements.txt ================================================ FILE CONTENTS ================================================ ================================================ FILE: LICENSE ================================================ MIT License <<<<<<< HEAD Copyright (c) 2021 anybodyany ======= Copyright (c) 2020 Tianshou contributors >>>>>>> upload macpo code Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE <<<<<<< HEAD SOFTWARE. ======= SOFTWARE. >>>>>>> upload macpo code ================================================ FILE: MACPO/.gitignore ================================================ /.idea/ */__pycache__/ ================================================ FILE: MACPO/environment.yaml ================================================ name: marl channels: - defaults dependencies: - _libgcc_mutex=0.1=main - _tflow_select=2.1.0=gpu - absl-py=0.9.0=py36_0 - astor=0.8.0=py36_0 - blas=1.0=mkl - c-ares=1.15.0=h7b6447c_1001 - ca-certificates=2020.1.1=0 - certifi=2020.4.5.2=py36_0 - cudatoolkit=10.0.130=0 - cudnn=7.6.5=cuda10.0_0 - cupti=10.0.130=0 - gast=0.2.2=py36_0 - google-pasta=0.2.0=py_0 - grpcio=1.14.1=py36h9ba97e2_0 - h5py=2.10.0=py36h7918eee_0 - hdf5=1.10.4=hb1b8bf9_0 - intel-openmp=2020.1=217 - keras-applications=1.0.8=py_0 - keras-preprocessing=1.1.0=py_1 - libedit=3.1=heed3624_0 - libffi=3.2.1=hd88cf55_4 - libgcc-ng=9.1.0=hdf63c60_0 - libgfortran-ng=7.3.0=hdf63c60_0 - libprotobuf=3.12.3=hd408876_0 - libstdcxx-ng=9.1.0=hdf63c60_0 - markdown=3.1.1=py36_0 - mkl=2020.1=217 - mkl-service=2.3.0=py36he904b0f_0 - mkl_fft=1.1.0=py36h23d657b_0 - mkl_random=1.1.1=py36h0573a6f_0 - ncurses=6.0=h9df7e31_2 - numpy=1.18.1=py36h4f9e942_0 - numpy-base=1.18.1=py36hde5b4d6_1 - openssl=1.0.2u=h7b6447c_0 - opt_einsum=3.1.0=py_0 - pip=20.1.1=py36_1 - protobuf=3.12.3=py36he6710b0_0 - python=3.6.2=hca45abc_19 - readline=7.0=ha6073c6_4 - scipy=1.4.1=py36h0b6359f_0 - setuptools=47.3.0=py36_0 - six=1.15.0=py_0 - sqlite=3.23.1=he433501_0 - tensorboard=2.0.0=pyhb38c66f_1 - tensorflow=2.0.0=gpu_py36h6b29c10_0 - tensorflow-base=2.0.0=gpu_py36h0ec5d1f_0 - tensorflow-estimator=2.0.0=pyh2649769_0 - tensorflow-gpu=2.0.0=h0d30ee6_0 - termcolor=1.1.0=py36_1 - tk=8.6.8=hbc83047_0 - werkzeug=0.16.1=py_0 - wheel=0.34.2=py36_0 - wrapt=1.12.1=py36h7b6447c_1 - xz=5.2.5=h7b6447c_0 - zlib=1.2.11=h7b6447c_3 - pip: - aiohttp==3.6.2 - aioredis==1.3.1 - astunparse==1.6.3 - async-timeout==3.0.1 - atari-py==0.2.6 - atomicwrites==1.2.1 - attrs==18.2.0 - beautifulsoup4==4.9.1 - blessings==1.7 - cachetools==4.1.1 - cffi==1.14.1 - chardet==3.0.4 - click==7.1.2 - cloudpickle==1.3.0 - colorama==0.4.3 - colorful==0.5.4 - configparser==5.0.1 - contextvars==2.4 - cycler==0.10.0 - cython==0.29.21 - deepdiff==4.3.2 - dill==0.3.2 - docker-pycreds==0.4.0 - docopt==0.6.2 - fasteners==0.15 - filelock==3.0.12 - funcsigs==1.0.2 - future==0.16.0 - gin==0.1.6 - gin-config==0.3.0 - gitdb==4.0.5 - gitpython==3.1.9 - glfw==1.12.0 - google==3.0.0 - google-api-core==1.22.1 - google-auth==1.21.0 - google-auth-oauthlib==0.4.1 - googleapis-common-protos==1.52.0 - gpustat==0.6.0 - gql==0.2.0 - graphql-core==1.1 - gym==0.17.2 - hiredis==1.1.0 - idna==2.7 - idna-ssl==1.1.0 - imageio==2.4.1 - immutables==0.14 - importlib-metadata==1.7.0 - joblib==0.16.0 - jsonnet==0.16.0 - jsonpickle==0.9.6 - jsonschema==3.2.0 - kiwisolver==1.0.1 - lockfile==0.12.2 - mappo==0.0.1 - matplotlib==3.0.0 - mock==2.0.0 - monotonic==1.5 - more-itertools==4.3.0 - mpi4py==3.0.3 - mpyq==0.2.5 - msgpack==1.0.0 - mujoco-py==2.0.2.13 - mujoco-worldgen==0.0.0 - multidict==4.7.6 - munch==2.3.2 - nvidia-ml-py3==7.352.0 - oauthlib==3.1.0 - opencensus==0.7.10 - opencensus-context==0.1.1 - opencv-python==4.2.0.34 - ordered-set==4.0.2 - packaging==20.4 - pandas==1.1.1 - pathlib2==2.3.2 - pathtools==0.1.2 - pbr==4.3.0 - pillow==5.3.0 - pluggy==0.7.1 - portpicker==1.2.0 - probscale==0.2.3 - progressbar2==3.53.1 - prometheus-client==0.8.0 - promise==2.3 - psutil==5.7.2 - py==1.6.0 - py-spy==0.3.3 - pyasn1==0.4.8 - pyasn1-modules==0.2.8 - pycparser==2.20 - pygame==1.9.4 - pyglet==1.5.0 - pyopengl==3.1.5 - pyopengl-accelerate==3.1.5 - pyparsing==2.2.2 - pyrsistent==0.16.0 - pysc2==3.0.0 - pytest==3.8.2 - python-dateutil==2.7.3 - python-utils==2.4.0 - pytz==2020.1 - pyyaml==3.13 - pyzmq==19.0.2 - ray==0.8.0 - redis==3.4.1 - requests==2.24.0 - requests-oauthlib==1.3.0 - rsa==4.6 - s2clientprotocol==4.10.1.75800.0 - s2protocol==4.11.4.78285.0 - sacred==0.7.2 - seaborn==0.10.1 - sentry-sdk==0.18.0 - shortuuid==1.0.1 - sk-video==1.1.10 - smmap==3.0.4 - snakeviz==1.0.0 - soupsieve==2.0.1 - subprocess32==3.5.4 - tabulate==0.8.7 - tensorboard-logger==0.1.0 - tensorboard-plugin-wit==1.7.0 - tensorboardx==2.0 - torch==1.5.1+cu101 - torchvision==0.6.1+cu101 - tornado==5.1.1 - tqdm==4.48.2 - typing-extensions==3.7.4.3 - urllib3==1.23 - wandb==0.10.5 - watchdog==0.10.3 - websocket-client==0.53.0 - whichcraft==0.5.2 - xmltodict==0.12.0 - yarl==1.5.1 - zipp==3.1.0 - zmq==0.0.0 ================================================ FILE: MACPO/macpo/__init__.py ================================================ from macpo import algorithms, envs, runner, scripts, utils, config __version__ = "0.1.0" __all__ = [ "algorithms", "envs", "runner", "scripts", "utils", "config", ] ================================================ FILE: MACPO/macpo/algorithms/__init__.py ================================================ ================================================ FILE: MACPO/macpo/algorithms/r_mappo/__init__.py ================================================ def cost_trpo_macppo(): return None ================================================ FILE: MACPO/macpo/algorithms/r_mappo/algorithm/MACPPOPolicy.py ================================================ import torch from macpo.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic from macpo.utils.util import update_linear_schedule class MACPPOPolicy: """ MACPO Policy class. Wraps actor and critic networks to compute actions and value function predictions. :param args: (argparse.Namespace) arguments containing relevant model and policy information. :param obs_space: (gym.Space) observation space. :param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO). :param action_space: (gym.Space) action space. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device("cpu")): self.args = args self.device = device self.lr = args.lr self.critic_lr = args.critic_lr self.opti_eps = args.opti_eps self.weight_decay = args.weight_decay self.obs_space = obs_space self.share_obs_space = cent_obs_space self.act_space = act_space self.actor = R_Actor(args, self.obs_space, self.act_space, self.device) self.critic = R_Critic(args, self.share_obs_space, self.device) self.cost_critic = R_Critic(args, self.share_obs_space, self.device) self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr, eps=self.opti_eps, weight_decay=self.weight_decay) self.cost_optimizer = torch.optim.Adam(self.cost_critic.parameters(), lr=self.critic_lr, eps=self.opti_eps, weight_decay=self.weight_decay) def lr_decay(self, episode, episodes): """ Decay the actor and critic learning rates. :param episode: (int) current training episode. :param episodes: (int) total number of training episodes. """ update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr) update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr) update_linear_schedule(self.cost_optimizer, episode, episodes, self.critic_lr) def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False, rnn_states_cost=None): """ Compute actions and value function predictions for the given inputs. :param cent_obs (np.ndarray): centralized input to the critic. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether the action should be mode of distribution or should be sampled. :return values: (torch.Tensor) value function predictions. :return actions: (torch.Tensor) actions to take. :return action_log_probs: (torch.Tensor) log probabilities of chosen actions. :return rnn_states_actor: (torch.Tensor) updated actor network RNN states. :return rnn_states_critic: (torch.Tensor) updated critic network RNN states. """ actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic) values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks) if rnn_states_cost is None: return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic else: cost_preds, rnn_states_cost = self.cost_critic(cent_obs, rnn_states_cost, masks) return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic, cost_preds, rnn_states_cost def get_values(self, cent_obs, rnn_states_critic, masks): """ Get value function predictions. :param cent_obs (np.ndarray): centralized input to the critic. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :return values: (torch.Tensor) value function predictions. """ values, _ = self.critic(cent_obs, rnn_states_critic, masks) return values def get_cost_values(self, cent_obs, rnn_states_cost, masks): """ Get constraint cost predictions. :param cent_obs (np.ndarray): centralized input to the critic. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :return values: (torch.Tensor) value function predictions. """ cost_preds, _ = self.cost_critic(cent_obs, rnn_states_cost, masks) return cost_preds def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks, available_actions=None, active_masks=None, rnn_states_cost=None): """ Get action logprobs / entropy and value function predictions for actor update. :param cent_obs (np.ndarray): centralized input to the critic. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param action: (np.ndarray) actions whose log probabilites and entropy to compute. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return values: (torch.Tensor) value function predictions. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ if self.args.algorithm_name == "macpo": # todo: for mactrpo action_log_probs, dist_entropy, action_mu, action_std = self.actor.evaluate_actions(obs, rnn_states_actor, action, masks, available_actions, active_masks) values, _ = self.critic(cent_obs, rnn_states_critic, masks) cost_values, _ = self.cost_critic(cent_obs, rnn_states_cost, masks) values, _ = self.critic(cent_obs, rnn_states_critic, masks) return values, action_log_probs, dist_entropy, cost_values, action_mu, action_std else: # todo: for lagrangrian action_log_probs, dist_entropy = self.actor.evaluate_actions(obs, rnn_states_actor, action, masks, available_actions, active_masks) values, _ = self.critic(cent_obs, rnn_states_critic, masks) cost_values, _ = self.cost_critic(cent_obs, rnn_states_cost, masks) return values, action_log_probs, dist_entropy, cost_values def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False): """ Compute actions using the given inputs. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether the action should be mode of distribution or should be sampled. """ actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic) return actions, rnn_states_actor ================================================ FILE: MACPO/macpo/algorithms/r_mappo/algorithm/rMAPPOPolicy.py ================================================ import torch from macpo.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic from macpo.utils.util import update_linear_schedule class R_MAPPOPolicy: """ MAPPO Policy class. Wraps actor and critic networks to compute actions and value function predictions. :param args: (argparse.Namespace) arguments containing relevant model and policy information. :param obs_space: (gym.Space) observation space. :param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO). :param action_space: (gym.Space) action space. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device("cpu")): self.device = device self.lr = args.lr self.critic_lr = args.critic_lr self.opti_eps = args.opti_eps self.weight_decay = args.weight_decay self.obs_space = obs_space self.share_obs_space = cent_obs_space self.act_space = act_space self.actor = R_Actor(args, self.obs_space, self.act_space, self.device) self.critic = R_Critic(args, self.share_obs_space, self.device) self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr, eps=self.opti_eps, weight_decay=self.weight_decay) def lr_decay(self, episode, episodes): """ Decay the actor and critic learning rates. :param episode: (int) current training episode. :param episodes: (int) total number of training episodes. """ update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr) update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr) def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False): """ Compute actions and value function predictions for the given inputs. :param cent_obs (np.ndarray): centralized input to the critic. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether the action should be mode of distribution or should be sampled. :return values: (torch.Tensor) value function predictions. :return actions: (torch.Tensor) actions to take. :return action_log_probs: (torch.Tensor) log probabilities of chosen actions. :return rnn_states_actor: (torch.Tensor) updated actor network RNN states. :return rnn_states_critic: (torch.Tensor) updated critic network RNN states. """ actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic) values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks) return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic def get_values(self, cent_obs, rnn_states_critic, masks): """ Get value function predictions. :param cent_obs (np.ndarray): centralized input to the critic. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :return values: (torch.Tensor) value function predictions. """ values, _ = self.critic(cent_obs, rnn_states_critic, masks) return values def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks, available_actions=None, active_masks=None): """ Get action logprobs / entropy and value function predictions for actor update. :param cent_obs (np.ndarray): centralized input to the critic. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param action: (np.ndarray) actions whose log probabilites and entropy to compute. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return values: (torch.Tensor) value function predictions. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ action_log_probs, dist_entropy = self.actor.evaluate_actions(obs, rnn_states_actor, action, masks, available_actions, active_masks) values, _ = self.critic(cent_obs, rnn_states_critic, masks) return values, action_log_probs, dist_entropy def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False): """ Compute actions using the given inputs. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether the action should be mode of distribution or should be sampled. """ actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic) return actions, rnn_states_actor ================================================ FILE: MACPO/macpo/algorithms/r_mappo/algorithm/r_actor_critic.py ================================================ import torch import torch.nn as nn from macpo.algorithms.utils.util import init, check from macpo.algorithms.utils.cnn import CNNBase from macpo.algorithms.utils.mlp import MLPBase from macpo.algorithms.utils.rnn import RNNLayer from macpo.algorithms.utils.act import ACTLayer from macpo.utils.util import get_shape_from_obs_space class R_Actor(nn.Module): """ Actor network class for MACPO. Outputs actions given observations. :param args: (argparse.Namespace) arguments containing relevant model information. :param obs_space: (gym.Space) observation space. :param action_space: (gym.Space) action space. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, obs_space, action_space, device=torch.device("cpu")): super(R_Actor, self).__init__() self.args = args self.hidden_size = args.hidden_size self._gain = args.gain self._use_orthogonal = args.use_orthogonal self._use_policy_active_masks = args.use_policy_active_masks self._use_naive_recurrent_policy = args.use_naive_recurrent_policy self._use_recurrent_policy = args.use_recurrent_policy self._recurrent_N = args.recurrent_N self.tpdv = dict(dtype=torch.float32, device=device) obs_shape = get_shape_from_obs_space(obs_space) base = CNNBase if len(obs_shape) == 3 else MLPBase self.base = base(args, obs_shape) if self._use_naive_recurrent_policy or self._use_recurrent_policy: self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal) self.act = ACTLayer(action_space, self.hidden_size, self._use_orthogonal, self._gain, args) self.to(device) def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False): """ Compute actions from the given inputs. :param obs: (np.ndarray / torch.Tensor) observation inputs into network. :param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN. :param masks: (np.ndarray / torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros. :param available_actions: (np.ndarray / torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether to sample from action distribution or return the mode. :return actions: (torch.Tensor) actions to take. :return action_log_probs: (torch.Tensor) log probabilities of taken actions. :return rnn_states: (torch.Tensor) updated RNN hidden states. """ obs = check(obs).to(**self.tpdv) rnn_states = check(rnn_states).to(**self.tpdv) masks = check(masks).to(**self.tpdv) if available_actions is not None: available_actions = check(available_actions).to(**self.tpdv) actor_features = self.base(obs) if self._use_naive_recurrent_policy or self._use_recurrent_policy: actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks) actions, action_log_probs = self.act(actor_features, available_actions, deterministic) return actions, action_log_probs, rnn_states def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None): """ Compute log probability and entropy of given actions. :param obs: (torch.Tensor) observation inputs into network. :param action: (torch.Tensor) actions whose entropy and log probability to evaluate. :param rnn_states: (torch.Tensor) if RNN network, hidden states for RNN. :param masks: (torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ obs = check(obs).to(**self.tpdv) rnn_states = check(rnn_states).to(**self.tpdv) action = check(action).to(**self.tpdv) masks = check(masks).to(**self.tpdv) if available_actions is not None: available_actions = check(available_actions).to(**self.tpdv) if active_masks is not None: active_masks = check(active_masks).to(**self.tpdv) actor_features = self.base(obs) if self._use_naive_recurrent_policy or self._use_recurrent_policy: actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks) if self.args.algorithm_name == "macpo": action_log_probs, dist_entropy, action_mu, action_std = self.act.evaluate_actions_trpo(actor_features, action, available_actions, active_masks= active_masks if self._use_policy_active_masks else None) # print("action_log_probs", action_log_probs) # print("action_std", action_std) return action_log_probs, dist_entropy, action_mu, action_std else: action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features, action, available_actions, active_masks= active_masks if self._use_policy_active_masks else None) return action_log_probs, dist_entropy class R_Critic(nn.Module): """ Critic network class for MAPPO. Outputs value function predictions given centralized input (MAPPO) or local observations (IPPO). :param args: (argparse.Namespace) arguments containing relevant model information. :param cent_obs_space: (gym.Space) (centralized) observation space. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, cent_obs_space, device=torch.device("cpu")): super(R_Critic, self).__init__() self.hidden_size = args.hidden_size self._use_orthogonal = args.use_orthogonal self._use_naive_recurrent_policy = args.use_naive_recurrent_policy self._use_recurrent_policy = args.use_recurrent_policy self._recurrent_N = args.recurrent_N self.tpdv = dict(dtype=torch.float32, device=device) init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal] cent_obs_shape = get_shape_from_obs_space(cent_obs_space) base = CNNBase if len(cent_obs_shape) == 3 else MLPBase self.base = base(args, cent_obs_shape) if self._use_naive_recurrent_policy or self._use_recurrent_policy: self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal) def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0)) self.v_out = init_(nn.Linear(self.hidden_size, 1)) self.to(device) def forward(self, cent_obs, rnn_states, masks): """ Compute actions from the given inputs. :param cent_obs: (np.ndarray / torch.Tensor) observation inputs into network. :param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN. :param masks: (np.ndarray / torch.Tensor) mask tensor denoting if RNN states should be reinitialized to zeros. :return values: (torch.Tensor) value function predictions. :return rnn_states: (torch.Tensor) updated RNN hidden states. """ cent_obs = check(cent_obs).to(**self.tpdv) rnn_states = check(rnn_states).to(**self.tpdv) masks = check(masks).to(**self.tpdv) critic_features = self.base(cent_obs) if self._use_naive_recurrent_policy or self._use_recurrent_policy: critic_features, rnn_states = self.rnn(critic_features, rnn_states, masks) values = self.v_out(critic_features) return values, rnn_states ================================================ FILE: MACPO/macpo/algorithms/r_mappo/r_macpo.py ================================================ import numpy as np import torch import torch.nn as nn from macpo.utils.util import get_gard_norm, huber_loss, mse_loss from macpo.utils.popart import PopArt from macpo.algorithms.utils.util import check from macpo.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor from torch.nn.utils import clip_grad_norm import copy # EPS = 1e-8 class R_MACTRPO_CPO(): """ Trainer class for MACPO to update policies. :param args: (argparse.Namespace) arguments containing relevant model, policy, and env information. :param policy: (R_MAPPO_Policy) policy to update. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, policy, attempt_feasible_recovery=False, attempt_infeasible_recovery=False, revert_to_last_safe_point=False, delta_bound=0.011, safety_bound=0.1, _backtrack_ratio=0.8, _max_backtracks=15, _constraint_name_1="trust_region", _constraint_name_2="safety_region", linesearch_infeasible_recovery=True, accept_violation=False, learn_margin=False, device=torch.device("cpu")): self.device = device self.tpdv = dict(dtype=torch.float32, device=device) self.policy = policy self.clip_param = args.clip_param self.ppo_epoch = args.ppo_epoch self.num_mini_batch = args.num_mini_batch self.data_chunk_length = args.data_chunk_length self.value_loss_coef = args.value_loss_coef self.entropy_coef = args.entropy_coef self.max_grad_norm = args.max_grad_norm self.huber_delta = args.huber_delta self.episode_length = args.episode_length self.kl_threshold = args.kl_threshold self.safety_bound = args.safety_bound self.ls_step = args.ls_step self.accept_ratio = args.accept_ratio self.EPS = args.EPS self.gamma = args.gamma self.safety_gamma = args.safety_gamma self.line_search_fraction = args.line_search_fraction self.g_step_dir_coef = args.g_step_dir_coef self.b_step_dir_coef = args.b_step_dir_coef self.fraction_coef = args.fraction_coef self._use_recurrent_policy = args.use_recurrent_policy self._use_naive_recurrent = args.use_naive_recurrent_policy self._use_max_grad_norm = args.use_max_grad_norm self._use_clipped_value_loss = args.use_clipped_value_loss self._use_huber_loss = args.use_huber_loss self._use_popart = args.use_popart self._use_value_active_masks = args.use_value_active_masks self._use_policy_active_masks = args.use_policy_active_masks # todo: my args-start self.args = args self.device = device self.tpdv = dict(dtype=torch.float32, device=device) self.policy = policy self._damping = 0.0001 self._delta = 0.01 self._max_backtracks = 10 self._backtrack_coeff = 0.5 self.clip_param = args.clip_param self.ppo_epoch = args.ppo_epoch self.num_mini_batch = args.num_mini_batch self.data_chunk_length = args.data_chunk_length self.value_loss_coef = args.value_loss_coef self.entropy_coef = args.entropy_coef self.max_grad_norm = args.max_grad_norm self.huber_delta = args.huber_delta self._use_recurrent_policy = args.use_recurrent_policy self._use_naive_recurrent = args.use_naive_recurrent_policy self._use_max_grad_norm = args.use_max_grad_norm self._use_clipped_value_loss = args.use_clipped_value_loss self._use_huber_loss = args.use_huber_loss self._use_popart = args.use_popart self._use_value_active_masks = args.use_value_active_masks self._use_policy_active_masks = args.use_policy_active_masks self.attempt_feasible_recovery = attempt_feasible_recovery self.attempt_infeasible_recovery = attempt_infeasible_recovery self.revert_to_last_safe_point = revert_to_last_safe_point self._max_quad_constraint_val = args.kl_threshold # delta_bound self._max_lin_constraint_val = args.safety_bound self._backtrack_ratio = _backtrack_ratio self._max_backtracks = _max_backtracks self._constraint_name_1 = _constraint_name_1 self._constraint_name_2 = _constraint_name_2 self._linesearch_infeasible_recovery = linesearch_infeasible_recovery self._accept_violation = accept_violation hvp_approach = None num_slices = 1 self.lamda_coef = 0 self.lamda_coef_a_star = 0 self.lamda_coef_b_star = 0 self.margin = 0 self.margin_lr = 0.05 self.learn_margin = learn_margin self.n_rollout_threads = args.n_rollout_threads if self._use_popart: self.value_normalizer = PopArt(1, device=self.device) else: self.value_normalizer = None def cal_value_loss(self, values, value_preds_batch, return_batch, active_masks_batch): """ Calculate value function loss. :param values: (torch.Tensor) value function predictions. :param value_preds_batch: (torch.Tensor) "old" value predictions from data batch (used for value clip loss) :param return_batch: (torch.Tensor) reward to go returns. :param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep. :return value_loss: (torch.Tensor) value function loss. """ if self._use_popart: value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param, self.clip_param) error_clipped = self.value_normalizer(return_batch) - value_pred_clipped error_original = self.value_normalizer(return_batch) - values else: value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param, self.clip_param) error_clipped = return_batch - value_pred_clipped error_original = return_batch - values if self._use_huber_loss: value_loss_clipped = huber_loss(error_clipped, self.huber_delta) value_loss_original = huber_loss(error_original, self.huber_delta) else: value_loss_clipped = mse_loss(error_clipped) value_loss_original = mse_loss(error_original) if self._use_clipped_value_loss: value_loss = torch.max(value_loss_original, value_loss_clipped) else: value_loss = value_loss_original if self._use_value_active_masks: value_loss = (value_loss * active_masks_batch).sum() / active_masks_batch.sum() else: value_loss = value_loss.mean() return value_loss def flat_grad(self, grads): grad_flatten = [] for grad in grads: if grad is None: continue grad_flatten.append(grad.view(-1)) grad_flatten = torch.cat(grad_flatten) return grad_flatten def flat_hessian(self, hessians): hessians_flatten = [] for hessian in hessians: if hessian is None: continue hessians_flatten.append(hessian.contiguous().view(-1)) hessians_flatten = torch.cat(hessians_flatten).data return hessians_flatten def flat_params(self, model): params = [] for param in model.parameters(): params.append(param.data.view(-1)) params_flatten = torch.cat(params) return params_flatten def update_model(self, model, new_params): index = 0 for params in model.parameters(): params_length = len(params.view(-1)) new_param = new_params[index: index + params_length] new_param = new_param.view(params.size()) params.data.copy_(new_param) index += params_length def kl_divergence(self, obs, rnn_states, action, masks, available_actions, active_masks, new_actor, old_actor): _, _, mu, std = new_actor.evaluate_actions(obs, rnn_states, action, masks, available_actions, active_masks) _, _, mu_old, std_old = old_actor.evaluate_actions(obs, rnn_states, action, masks, available_actions, active_masks) logstd = torch.log(std) mu_old = mu_old.detach() std_old = std_old.detach() logstd_old = torch.log(std_old) # kl divergence between old policy and new policy : D( pi_old || pi_new ) # pi_old -> mu0, logstd0, std0 / pi_new -> mu, logstd, std # be careful of calculating KL-divergence. It is not symmetric metric kl = logstd_old - logstd + (std_old.pow(2) + (mu_old - mu).pow(2)) / \ (self.EPS + 2.0 * std.pow(2)) - 0.5 return kl.sum(1, keepdim=True) # from openai baseline code # https://github.com/openai/baselines/blob/master/baselines/common/cg.py def conjugate_gradient(self, actor, obs, rnn_states, action, masks, available_actions, active_masks, b, nsteps, residual_tol=1e-10): x = torch.zeros(b.size()).to(device=self.device) r = b.clone() p = b.clone() rdotr = torch.dot(r, r) for i in range(nsteps): _Avp = self.fisher_vector_product(actor, obs, rnn_states, action, masks, available_actions, active_masks, p) alpha = rdotr / torch.dot(p, _Avp) x += alpha * p r -= alpha * _Avp new_rdotr = torch.dot(r, r) betta = new_rdotr / rdotr p = r + betta * p rdotr = new_rdotr if rdotr < residual_tol: break return x def fisher_vector_product(self, actor, obs, rnn_states, action, masks, available_actions, active_masks, p): p.detach() kl = self.kl_divergence(obs, rnn_states, action, masks, available_actions, active_masks, new_actor=actor, old_actor=actor) kl = kl.mean() kl_grad = torch.autograd.grad(kl, actor.parameters(), create_graph=True, allow_unused=True) kl_grad = self.flat_grad(kl_grad) # check kl_grad == 0 kl_grad_p = (kl_grad * p).sum() kl_hessian_p = torch.autograd.grad(kl_grad_p, actor.parameters(), allow_unused=True) kl_hessian_p = self.flat_hessian(kl_hessian_p) return kl_hessian_p + 0.1 * p def _get_flat_grad(self, y, model, retain_graph=None, create_graph=False): grads = torch.autograd.grad(y, model.parameters(), retain_graph=retain_graph, create_graph=create_graph, allow_unused=True) _grads = [] for val, p in zip(grads, model.parameters()): if val is not None: _grads.append(val) else: _grads.append(torch.zeros_like(p.data, requires_grad=create_graph)) return torch.cat([grad.reshape(-1) for grad in _grads]) def _flat_grad_(self, f, model, retain_graph=None, create_graph=False): return self.flat_grad(torch.autograd.grad(f, model.parameters(), retain_graph=retain_graph, create_graph=create_graph, allow_unused=True)) def hessian_vector_product(self, f, model): # for H = grad**2 f, compute Hx g = self._flat_grad_(f, model) # g = self._get_flat_grad(f, model) # x = torch.placeholder(torch.float32, shape=g.shape) x = torch.FloatTensor(g.shape) return x, self._flat_grad_(torch.sum(g * x), model) def cg(self, Ax, b, cg_iters=10): x = np.zeros_like(b) r = b.clone() # Note: should be 'b - Ax(x)', but for x=0, Ax(x)=0. Change if doing warm start. p = r.clone() r_dot_old = torch.dot(r, r) for _ in range(cg_iters): z = Ax(p) alpha = r_dot_old / (torch.dot(p, z) + self.EPS) x += alpha * p r -= alpha * z r_dot_new = torch.dot(r, r) p = r + (r_dot_new / r_dot_old) * p r_dot_old = r_dot_new return x def trpo_update(self, sample, update_actor=True): """ Update actor and critic networks. :param sample: (Tuple) contains data batch with which to update networks. :update_actor: (bool) whether to update actor network. :return value_loss: (torch.Tensor) value function loss. :return critic_grad_norm: (torch.Tensor) gradient norm from critic update. ;return policy_loss: (torch.Tensor) actor(policy) loss value. :return dist_entropy: (torch.Tensor) action entropies. :return actor_grad_norm: (torch.Tensor) gradient norm from actor update. :return imp_weights: (torch.Tensor) importance sampling weights. """ share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, \ value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \ adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_returns_barch, rnn_states_cost_batch, \ cost_adv_targ, aver_episode_costs = sample old_action_log_probs_batch = check(old_action_log_probs_batch).to(**self.tpdv) adv_targ = check(adv_targ).to(**self.tpdv) cost_adv_targ = check(cost_adv_targ).to(**self.tpdv) value_preds_batch = check(value_preds_batch).to(**self.tpdv) return_batch = check(return_batch).to(**self.tpdv) active_masks_batch = check(active_masks_batch).to(**self.tpdv) factor_batch = check(factor_batch).to(**self.tpdv) cost_returns_barch = check(cost_returns_barch).to(**self.tpdv) cost_preds_batch = check(cost_preds_batch).to(**self.tpdv) # Reshape to do in a single forward pass for all steps # values, action_log_probs, dist_entropy, cost_values, action_mu, action_std values, action_log_probs, dist_entropy, cost_values, action_mu, action_std = self.policy.evaluate_actions( share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, masks_batch, available_actions_batch, active_masks_batch, rnn_states_cost_batch) # todo: reward critic update value_loss = self.cal_value_loss(values, value_preds_batch, return_batch, active_masks_batch) self.policy.critic_optimizer.zero_grad() (value_loss * self.value_loss_coef).backward() if self._use_max_grad_norm: critic_grad_norm = nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.max_grad_norm) else: critic_grad_norm = get_gard_norm(self.policy.critic.parameters()) self.policy.critic_optimizer.step() # todo: cost critic update cost_loss = self.cal_value_loss(cost_values, cost_preds_batch, cost_returns_barch, active_masks_batch) self.policy.cost_optimizer.zero_grad() (cost_loss * self.value_loss_coef).backward() if self._use_max_grad_norm: cost_grad_norm = nn.utils.clip_grad_norm_(self.policy.cost_critic.parameters(), self.max_grad_norm) else: cost_grad_norm = get_gard_norm(self.policy.cost_critic.parameters()) self.policy.cost_optimizer.step() # todo: actor update rescale_constraint_val = (aver_episode_costs.mean() - self._max_lin_constraint_val) * (1 - self.gamma) if rescale_constraint_val == 0: rescale_constraint_val = self.EPS # todo:reward-g ratio = torch.exp(action_log_probs - old_action_log_probs_batch) if self._use_policy_active_masks: reward_loss = (torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True) * active_masks_batch).sum() / active_masks_batch.sum() else: reward_loss = torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True).mean() reward_loss = - reward_loss # todo: reward_loss_grad = torch.autograd.grad(reward_loss, self.policy.actor.parameters(), retain_graph=True, allow_unused=True) reward_loss_grad = self.flat_grad(reward_loss_grad) # todo:cost-b if self._use_policy_active_masks: cost_loss = (torch.sum(ratio * factor_batch * (cost_adv_targ), dim=-1, keepdim=True) * active_masks_batch).sum() / active_masks_batch.sum() else: cost_loss = torch.sum(ratio * factor_batch * (cost_adv_targ), dim=-1, keepdim=True).mean() cost_loss_grad = torch.autograd.grad(cost_loss, self.policy.actor.parameters(), retain_graph=True, allow_unused=True) cost_loss_grad = self.flat_grad(cost_loss_grad) B_cost_loss_grad = cost_loss_grad.unsqueeze(0) B_cost_loss_grad = self.flat_grad(B_cost_loss_grad) # todo: compute lamda_coef and v_coef g_step_dir = self.conjugate_gradient(self.policy.actor, obs_batch, rnn_states_batch, actions_batch, masks_batch, available_actions_batch, active_masks_batch, reward_loss_grad.data, nsteps=10) # todo: compute H^{-1} g b_step_dir = self.conjugate_gradient(self.policy.actor, obs_batch, rnn_states_batch, actions_batch, masks_batch, available_actions_batch, active_masks_batch, B_cost_loss_grad.data, nsteps=10) # todo: compute H^{-1} b q_coef = (reward_loss_grad * g_step_dir).sum(0, keepdim=True) # todo: compute q_coef: = g^T H^{-1} g r_coef = (reward_loss_grad * b_step_dir).sum(0, keepdim=True) # todo: compute r_coef: = g^T H^{-1} b s_coef = (cost_loss_grad * b_step_dir).sum(0, keepdim=True) # todo: compute s_coef: = b^T H^{-1} b fraction = self.line_search_fraction #0.5 # 0.5 # line search step size loss_improve = 0 # initialization """self._max_lin_constraint_val = c, B_cost_loss_grad = c in cpo""" B_cost_loss_grad_dot = torch.dot(B_cost_loss_grad, B_cost_loss_grad) # torch.dot(B_cost_loss_grad, B_cost_loss_grad) # B_cost_loss_grad.mean() * B_cost_loss_grad.mean() if (torch.dot(B_cost_loss_grad, B_cost_loss_grad)) <= self.EPS and rescale_constraint_val < 0: # feasible and cost grad is zero---shortcut to pure TRPO update! # w, r, s, A, B = 0, 0, 0, 0, 0 # g_step_dir = torch.tensor(0) b_step_dir = torch.tensor(0) r_coef = torch.tensor(0) s_coef = torch.tensor(0) positive_Cauchy_value = torch.tensor(0) whether_recover_policy_value = torch.tensor(0) optim_case = 4 else: # cost grad is nonzero: CPO update! r_coef = (reward_loss_grad * b_step_dir).sum(0, keepdim=True) # todo: compute r_coef: = g^T H^{-1} b s_coef = (cost_loss_grad * b_step_dir).sum(0, keepdim=True) # todo: compute s_coef: = b^T H^{-1} b if r_coef == 0: r_coef = self.EPS if s_coef == 0: s_coef = self.EPS positive_Cauchy_value = ( q_coef - (r_coef ** 2) / (self.EPS + s_coef)) # should be always positive (Cauchy-Shwarz) whether_recover_policy_value = 2 * self._max_quad_constraint_val - ( rescale_constraint_val ** 2) / ( self.EPS + s_coef) # does safety boundary intersect trust region? (positive = yes) if rescale_constraint_val < 0 and whether_recover_policy_value < 0: # point in trust region is feasible and safety boundary doesn't intersect # ==> entire trust region is feasible optim_case = 3 elif rescale_constraint_val < 0 and whether_recover_policy_value >= 0: # x = 0 is feasible and safety boundary intersects # ==> most of trust region is feasible optim_case = 2 elif rescale_constraint_val >= 0 and whether_recover_policy_value >= 0: # x = 0 is infeasible and safety boundary intersects # ==> part of trust region is feasible, recovery possible optim_case = 1 else: # x = 0 infeasible, and safety halfspace is outside trust region # ==> whole trust region is infeasible, try to fail gracefully optim_case = 0 if whether_recover_policy_value == 0: whether_recover_policy_value = self.EPS if optim_case in [3, 4]: lam = torch.sqrt( (q_coef / (2 * self._max_quad_constraint_val))) # self.lamda_coef = lam = np.sqrt(q / (2 * target_kl)) nu = torch.tensor(0) # v_coef = 0 elif optim_case in [1, 2]: LA, LB = [0, r_coef / rescale_constraint_val], [r_coef / rescale_constraint_val, np.inf] LA, LB = (LA, LB) if rescale_constraint_val < 0 else (LB, LA) proj = lambda x, L: max(L[0], min(L[1], x)) lam_a = proj(torch.sqrt(positive_Cauchy_value / whether_recover_policy_value), LA) lam_b = proj(torch.sqrt(q_coef / (torch.tensor(2 * self._max_quad_constraint_val))), LB) f_a = lambda lam: -0.5 * (positive_Cauchy_value / ( self.EPS + lam) + whether_recover_policy_value * lam) - r_coef * rescale_constraint_val / ( self.EPS + s_coef) f_b = lambda lam: -0.5 * (q_coef / (self.EPS + lam) + 2 * self._max_quad_constraint_val * lam) lam = lam_a if f_a(lam_a) >= f_b(lam_b) else lam_b nu = max(0, lam * rescale_constraint_val - r_coef) / (self.EPS + s_coef) else: lam = torch.tensor(0) nu = torch.sqrt(torch.tensor(2 * self._max_quad_constraint_val) / (self.EPS + s_coef)) x_a = (1. / (lam + self.EPS)) * (g_step_dir + nu * b_step_dir) x_b = (nu * b_step_dir) x = x_a if optim_case > 0 else x_b # todo: update actor and learning reward_loss = reward_loss.data.cpu().numpy() cost_loss = cost_loss.data.cpu().numpy() params = self.flat_params(self.policy.actor) old_actor = R_Actor(self.policy.args, self.policy.obs_space, self.policy.act_space, self.device) self.update_model(old_actor, params) expected_improve = -torch.dot(x, reward_loss_grad).sum(0, keepdim=True) expected_improve = expected_improve.data.cpu().numpy() # line search flag = False fraction_coef = self.fraction_coef # print("fraction_coef", fraction_coef) for i in range(self.ls_step): x_norm = torch.norm(x) if x_norm > 0.5: x = x * 0.5 / x_norm new_params = params - fraction_coef * (fraction**i) * x self.update_model(self.policy.actor, new_params) values, action_log_probs, dist_entropy, new_cost_values, action_mu, action_std = self.policy.evaluate_actions( share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, masks_batch, available_actions_batch, active_masks_batch, rnn_states_cost_batch) ratio = torch.exp(action_log_probs - old_action_log_probs_batch) if self._use_policy_active_masks: new_reward_loss = (torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True) * active_masks_batch).sum() / active_masks_batch.sum() else: new_reward_loss = torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True).mean() if self._use_policy_active_masks: new_cost_loss = (torch.sum(ratio * factor_batch * cost_adv_targ, dim=-1, keepdim=True) * active_masks_batch).sum() / active_masks_batch.sum() else: new_cost_loss = torch.sum(ratio * factor_batch * cost_adv_targ, dim=-1, keepdim=True).mean() new_reward_loss = new_reward_loss.data.cpu().numpy() new_reward_loss = -new_reward_loss new_cost_loss = new_cost_loss.data.cpu().numpy() loss_improve = new_reward_loss - reward_loss kl = self.kl_divergence(obs_batch, rnn_states_batch, actions_batch, masks_batch, available_actions_batch, active_masks_batch, new_actor=self.policy.actor, old_actor=old_actor) kl = kl.mean() # see https: // en.wikipedia.org / wiki / Backtracking_line_search if ((kl < self.kl_threshold) and (loss_improve < 0 if optim_case > 1 else True) and (new_cost_loss.mean() - cost_loss.mean() <= max(-rescale_constraint_val, 0))): flag = True # print("line search successful") break expected_improve *= fraction if not flag: # line search failed print("line search failed") params = self.flat_params(old_actor) self.update_model(self.policy.actor, params) return value_loss, critic_grad_norm, kl, loss_improve, expected_improve, dist_entropy, ratio, cost_loss, cost_grad_norm, whether_recover_policy_value, cost_preds_batch, cost_returns_barch, B_cost_loss_grad, lam, nu, g_step_dir, b_step_dir, x, action_mu, action_std, B_cost_loss_grad_dot def train(self, buffer, shared_buffer=None, update_actor=True): """ Perform a training update using minibatch GD. :param buffer: (SharedReplayBuffer) buffer containing training data. :param update_actor: (bool) whether to update actor network. :return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc). """ if self._use_popart: advantages = buffer.returns[:-1] - self.value_normalizer.denormalize(buffer.value_preds[:-1]) else: advantages = buffer.returns[:-1] - buffer.value_preds[:-1] advantages_copy = advantages.copy() advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan mean_advantages = np.nanmean(advantages_copy) std_advantages = np.nanstd(advantages_copy) advantages = (advantages - mean_advantages) / (std_advantages + 1e-5) if self._use_popart: cost_adv = buffer.cost_returns[:-1] - self.value_normalizer.denormalize(buffer.cost_preds[:-1]) else: cost_adv = buffer.cost_returns[:-1] - buffer.cost_preds[:-1] cost_adv_copy = cost_adv.copy() cost_adv_copy[buffer.active_masks[:-1] == 0.0] = np.nan mean_cost_adv = np.nanmean(cost_adv_copy) std_cost_adv = np.nanstd(cost_adv_copy) cost_adv = (cost_adv - mean_cost_adv) / (std_cost_adv + 1e-5) train_info = {} train_info['value_loss'] = 0 train_info['kl'] = 0 train_info['dist_entropy'] = 0 train_info['loss_improve'] = 0 train_info['expected_improve'] = 0 train_info['critic_grad_norm'] = 0 train_info['ratio'] = 0 train_info['cost_loss'] = 0 train_info['cost_grad_norm'] = 0 train_info['whether_recover_policy_value'] = 0 train_info['cost_preds_batch'] = 0 train_info['cost_returns_barch'] = 0 train_info['B_cost_loss_grad'] = 0 train_info['lam'] = 0 train_info['nu'] = 0 train_info['g_step_dir'] = 0 train_info['b_step_dir'] = 0 train_info['x'] = 0 train_info['action_mu'] = 0 train_info['action_std'] = 0 train_info['B_cost_loss_grad_dot'] = 0 if self._use_recurrent_policy: data_generator = buffer.recurrent_generator(advantages, self.num_mini_batch, self.data_chunk_length, cost_adv=cost_adv) elif self._use_naive_recurrent: data_generator = buffer.naive_recurrent_generator(advantages, self.num_mini_batch, cost_adv=cost_adv) else: data_generator = buffer.feed_forward_generator(advantages, self.num_mini_batch, cost_adv=cost_adv) # old_actor = copy.deepcopy(self.policy.actor) for sample in data_generator: value_loss, critic_grad_norm, kl, loss_improve, expected_improve, dist_entropy, imp_weights, cost_loss, cost_grad_norm, whether_recover_policy_value, cost_preds_batch, cost_returns_barch, B_cost_loss_grad, lam, nu, g_step_dir, b_step_dir, x, action_mu, action_std, B_cost_loss_grad_dot \ = self.trpo_update(sample, update_actor) train_info['value_loss'] += value_loss.item() train_info['kl'] += kl train_info['loss_improve'] += loss_improve train_info['expected_improve'] += expected_improve train_info['dist_entropy'] += dist_entropy.item() train_info['critic_grad_norm'] += critic_grad_norm train_info['ratio'] += imp_weights.mean() train_info['cost_loss'] += value_loss.item() train_info['cost_grad_norm'] += cost_grad_norm train_info['whether_recover_policy_value'] += whether_recover_policy_value train_info['cost_preds_batch'] += cost_preds_batch.mean() train_info['cost_returns_barch'] += cost_returns_barch.mean() train_info['B_cost_loss_grad'] += B_cost_loss_grad.mean() train_info['g_step_dir'] += g_step_dir.float().mean() train_info['b_step_dir'] += b_step_dir.float().mean() train_info['x'] = x.float().mean() train_info['action_mu'] += action_mu.float().mean() train_info['action_std'] += action_std.float().mean() train_info['B_cost_loss_grad_dot'] += B_cost_loss_grad_dot.item() num_updates = self.ppo_epoch * self.num_mini_batch for k in train_info.keys(): train_info[k] /= num_updates return train_info def prep_training(self): self.policy.actor.train() self.policy.critic.train() def prep_rollout(self): self.policy.actor.eval() self.policy.critic.eval() ================================================ FILE: MACPO/macpo/algorithms/utils/act.py ================================================ from .distributions import Bernoulli, Categorical, DiagGaussian import torch import torch.nn as nn class ACTLayer(nn.Module): """ MLP Module to compute actions. :param action_space: (gym.Space) action space. :param inputs_dim: (int) dimension of network input. :param use_orthogonal: (bool) whether to use orthogonal initialization. :param gain: (float) gain of the output layer of the network. """ def __init__(self, action_space, inputs_dim, use_orthogonal, gain, args=None): super(ACTLayer, self).__init__() self.mixed_action = False self.multi_discrete = False # print("action_space.__class__.__name__", action_space.__class__.__name__) if action_space.__class__.__name__ == "Discrete": action_dim = action_space.n self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain) elif action_space.__class__.__name__ == "Box": action_dim = action_space.shape[0] self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain, args) elif action_space.__class__.__name__ == "MultiBinary": action_dim = action_space.shape[0] self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain) elif action_space.__class__.__name__ == "MultiDiscrete": self.multi_discrete = True action_dims = action_space.high - action_space.low + 1 self.action_outs = [] for action_dim in action_dims: self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain)) self.action_outs = nn.ModuleList(self.action_outs) else: # discrete + continous self.mixed_action = True continous_dim = action_space[0].shape[0] discrete_dim = action_space[1].n self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain, args), Categorical(inputs_dim, discrete_dim, use_orthogonal, gain)]) def forward(self, x, available_actions=None, deterministic=False): """ Compute actions and action logprobs from given input. :param x: (torch.Tensor) input to network. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether to sample from action distribution or return the mode. :return actions: (torch.Tensor) actions to take. :return action_log_probs: (torch.Tensor) log probabilities of taken actions. """ if self.mixed_action : actions = [] action_log_probs = [] for action_out in self.action_outs: action_logit = action_out(x) action = action_logit.mode() if deterministic else action_logit.sample() action_log_prob = action_logit.log_probs(action) actions.append(action.float()) action_log_probs.append(action_log_prob) actions = torch.cat(actions, -1) action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True) elif self.multi_discrete: actions = [] action_log_probs = [] for action_out in self.action_outs: action_logit = action_out(x) action = action_logit.mode() if deterministic else action_logit.sample() action_log_prob = action_logit.log_probs(action) actions.append(action) action_log_probs.append(action_log_prob) actions = torch.cat(actions, -1) action_log_probs = torch.cat(action_log_probs, -1) else: action_logits = self.action_out(x, available_actions) actions = action_logits.mode() if deterministic else action_logits.sample() action_log_probs = action_logits.log_probs(actions) return actions, action_log_probs def get_probs(self, x, available_actions=None): """ Compute action probabilities from inputs. :param x: (torch.Tensor) input to network. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :return action_probs: (torch.Tensor) """ if self.mixed_action or self.multi_discrete: action_probs = [] for action_out in self.action_outs: action_logit = action_out(x) action_prob = action_logit.probs action_probs.append(action_prob) action_probs = torch.cat(action_probs, -1) else: action_logits = self.action_out(x, available_actions) action_probs = action_logits.probs return action_probs def evaluate_actions(self, x, action, available_actions=None, active_masks=None): """ Compute log probability and entropy of given actions. :param x: (torch.Tensor) input to network. :param action: (torch.Tensor) actions whose entropy and log probability to evaluate. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ if self.mixed_action: a, b = action.split((2, 1), -1) b = b.long() action = [a, b] action_log_probs = [] dist_entropy = [] for action_out, act in zip(self.action_outs, action): action_logit = action_out(x) action_log_probs.append(action_logit.log_probs(act)) if active_masks is not None: if len(action_logit.entropy().shape) == len(active_masks.shape): dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum()) else: dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum()) else: dist_entropy.append(action_logit.entropy().mean()) action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True) dist_entropy = dist_entropy[0] / 2.0 + dist_entropy[1] / 0.98 #! dosen't make sense elif self.multi_discrete: action = torch.transpose(action, 0, 1) action_log_probs = [] dist_entropy = [] for action_out, act in zip(self.action_outs, action): action_logit = action_out(x) action_log_probs.append(action_logit.log_probs(act)) if active_masks is not None: dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()) else: dist_entropy.append(action_logit.entropy().mean()) action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong dist_entropy = torch.tensor(dist_entropy).mean() else: action_logits = self.action_out(x, available_actions) action_log_probs = action_logits.log_probs(action) if active_masks is not None: dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum() # dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum() else: dist_entropy = action_logits.entropy().mean() return action_log_probs, dist_entropy def evaluate_actions_trpo(self, x, action, available_actions=None, active_masks=None): """ Compute log probability and entropy of given actions. :param x: (torch.Tensor) input to network. :param action: (torch.Tensor) actions whose entropy and log probability to evaluate. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ if self.mixed_action: a, b = action.split((2, 1), -1) b = b.long() action = [a, b] action_log_probs = [] dist_entropy = [] for action_out, act in zip(self.action_outs, action): action_logit = action_out(x) action_log_probs.append(action_logit.log_probs(act)) if active_masks is not None: if len(action_logit.entropy().shape) == len(active_masks.shape): dist_entropy.append((action_logit.entropy() * active_masks).sum() / active_masks.sum()) else: dist_entropy.append( (action_logit.entropy() * active_masks.squeeze(-1)).sum() / active_masks.sum()) else: dist_entropy.append(action_logit.entropy().mean()) action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True) dist_entropy = dist_entropy[0] / 2.0 + dist_entropy[1] / 0.98 # ! dosen't make sense elif self.multi_discrete: action = torch.transpose(action, 0, 1) action_log_probs = [] dist_entropy = [] for action_out, act in zip(self.action_outs, action): action_logit = action_out(x) action_log_probs.append(action_logit.log_probs(act)) if active_masks is not None: dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum() / active_masks.sum()) else: dist_entropy.append(action_logit.entropy().mean()) action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong dist_entropy = torch.tensor(dist_entropy).mean() else: action_logits = self.action_out(x, available_actions) # print("action_logits.mean-macppo-act.py", action_logits.mean) action_mu = action_logits.mean action_std = action_logits.stddev action_log_probs = action_logits.log_probs(action) # print("action_log_probs-act.py", action_log_probs) if active_masks is not None: dist_entropy = (action_logits.entropy() * active_masks).sum() / active_masks.sum() # dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum() else: dist_entropy = action_logits.entropy().mean() # print("action_logits-act.py", action_logits) # print("action_mu-act.py", action_mu) return action_log_probs, dist_entropy, action_mu, action_std ================================================ FILE: MACPO/macpo/algorithms/utils/cnn.py ================================================ import torch.nn as nn from .util import init """CNN Modules and utils.""" class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class CNNLayer(nn.Module): def __init__(self, obs_shape, hidden_size, use_orthogonal, use_ReLU, kernel_size=3, stride=1): super(CNNLayer, self).__init__() active_func = [nn.Tanh(), nn.ReLU()][use_ReLU] init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU]) def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain) input_channel = obs_shape[0] input_width = obs_shape[1] input_height = obs_shape[2] self.cnn = nn.Sequential( init_(nn.Conv2d(in_channels=input_channel, out_channels=hidden_size // 2, kernel_size=kernel_size, stride=stride) ), active_func, Flatten(), init_(nn.Linear(hidden_size // 2 * (input_width - kernel_size + stride) * (input_height - kernel_size + stride), hidden_size) ), active_func, init_(nn.Linear(hidden_size, hidden_size)), active_func) def forward(self, x): x = x / 255.0 x = self.cnn(x) return x class CNNBase(nn.Module): def __init__(self, args, obs_shape): super(CNNBase, self).__init__() self._use_orthogonal = args.use_orthogonal self._use_ReLU = args.use_ReLU self.hidden_size = args.hidden_size self.cnn = CNNLayer(obs_shape, self.hidden_size, self._use_orthogonal, self._use_ReLU) def forward(self, x): x = self.cnn(x) return x ================================================ FILE: MACPO/macpo/algorithms/utils/distributions.py ================================================ import torch import torch.nn as nn from .util import init """ Modify standard PyTorch distributions so they to make compatible with this codebase. """ # # Standardize distribution interfaces # # Categorical class FixedCategorical(torch.distributions.Categorical): def sample(self): return super().sample().unsqueeze(-1) def log_probs(self, actions): return ( super() .log_prob(actions.squeeze(-1)) .view(actions.size(0), -1) .sum(-1) .unsqueeze(-1) ) def mode(self): return self.probs.argmax(dim=-1, keepdim=True) # Normal class FixedNormal(torch.distributions.Normal): def log_probs(self, actions): return super().log_prob(actions) # return super().log_prob(actions).sum(-1, keepdim=True) def entrop(self): return super.entropy().sum(-1) def mode(self): return self.mean # Bernoulli class FixedBernoulli(torch.distributions.Bernoulli): def log_probs(self, actions): return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1) def entropy(self): return super().entropy().sum(-1) def mode(self): return torch.gt(self.probs, 0.5).float() class Categorical(nn.Module): def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01): super(Categorical, self).__init__() init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain) self.linear = init_(nn.Linear(num_inputs, num_outputs)) def forward(self, x, available_actions=None): x = self.linear(x) if available_actions is not None: x[available_actions == 0] = -1e10 return FixedCategorical(logits=x) # class DiagGaussian(nn.Module): # def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01): # super(DiagGaussian, self).__init__() # # init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] # def init_(m): # return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain) # # self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) # self.logstd = AddBias(torch.zeros(num_outputs)) # # def forward(self, x, available_actions=None): # action_mean = self.fc_mean(x) # # # An ugly hack for my KFAC implementation. # zeros = torch.zeros(action_mean.size()) # if x.is_cuda: # zeros = zeros.cuda() # # action_logstd = self.logstd(zeros) # return FixedNormal(action_mean, action_logstd.exp()) class DiagGaussian(nn.Module): def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01, args=None): super(DiagGaussian, self).__init__() init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain) if args is not None: self.std_x_coef = args.std_x_coef self.std_y_coef = args.std_y_coef else: self.std_x_coef = 1. self.std_y_coef = 0.5 self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) log_std = torch.ones(num_outputs) * self.std_x_coef self.log_std = torch.nn.Parameter(log_std) def forward(self, x, available_actions=None): action_mean = self.fc_mean(x) action_std = torch.sigmoid(self.log_std / self.std_x_coef) * self.std_y_coef # print("self.log_std", self.log_std) # print("action_mean", action_mean) # print("_action_std", action_std) # action_std = torch.zeros_like(_action_std) # print("action_std", action_std) # action_std = torch.where(torch.isnan(action_std), torch.full_like(action_std, 1e-8), action_std) # torch.where((action_std == torch.tensor(0)), torch.tensor(1e-8), action_std) return FixedNormal(action_mean, action_std) class Bernoulli(nn.Module): def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01): super(Bernoulli, self).__init__() init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain) self.linear = init_(nn.Linear(num_inputs, num_outputs)) def forward(self, x): x = self.linear(x) return FixedBernoulli(logits=x) class AddBias(nn.Module): def __init__(self, bias): super(AddBias, self).__init__() self._bias = nn.Parameter(bias.unsqueeze(1)) def forward(self, x): if x.dim() == 2: bias = self._bias.t().view(1, -1) else: bias = self._bias.t().view(1, -1, 1, 1) return x + bias ================================================ FILE: MACPO/macpo/algorithms/utils/mlp.py ================================================ import torch.nn as nn from .util import init, get_clones """MLP modules.""" class MLPLayer(nn.Module): def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, use_ReLU): super(MLPLayer, self).__init__() self._layer_N = layer_N active_func = [nn.Tanh(), nn.ReLU()][use_ReLU] init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU]) def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain) self.fc1 = nn.Sequential( init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size)) # self.fc_h = nn.Sequential(init_( # nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size)) # self.fc2 = get_clones(self.fc_h, self._layer_N) self.fc2 = nn.ModuleList([nn.Sequential(init_( nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size)) for i in range(self._layer_N)]) def forward(self, x): x = self.fc1(x) for i in range(self._layer_N): x = self.fc2[i](x) return x class MLPBase(nn.Module): def __init__(self, args, obs_shape, cat_self=True, attn_internal=False): super(MLPBase, self).__init__() self._use_feature_normalization = args.use_feature_normalization self._use_orthogonal = args.use_orthogonal self._use_ReLU = args.use_ReLU self._stacked_frames = args.stacked_frames self._layer_N = args.layer_N self.hidden_size = args.hidden_size obs_dim = obs_shape[0] if self._use_feature_normalization: self.feature_norm = nn.LayerNorm(obs_dim) self.mlp = MLPLayer(obs_dim, self.hidden_size, self._layer_N, self._use_orthogonal, self._use_ReLU) def forward(self, x): if self._use_feature_normalization: x = self.feature_norm(x) x = self.mlp(x) return x ================================================ FILE: MACPO/macpo/algorithms/utils/rnn.py ================================================ import torch import torch.nn as nn """RNN modules.""" class RNNLayer(nn.Module): def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal): super(RNNLayer, self).__init__() self._recurrent_N = recurrent_N self._use_orthogonal = use_orthogonal self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N) for name, param in self.rnn.named_parameters(): if 'bias' in name: nn.init.constant_(param, 0) elif 'weight' in name: if self._use_orthogonal: nn.init.orthogonal_(param) else: nn.init.xavier_uniform_(param) self.norm = nn.LayerNorm(outputs_dim) def forward(self, x, hxs, masks): if x.size(0) == hxs.size(0): x, hxs = self.rnn(x.unsqueeze(0), (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous()) x = x.squeeze(0) hxs = hxs.transpose(0, 1) else: # x is a (T, N, -1) tensor that has been flatten to (T * N, -1) N = hxs.size(0) T = int(x.size(0) / N) # unflatten x = x.view(T, N, x.size(1)) # Same deal with masks masks = masks.view(T, N) # Let's figure out which steps in the sequence have a zero for any agent # We will always assume t=0 has a zero in it as that makes the logic cleaner has_zeros = ((masks[1:] == 0.0) .any(dim=-1) .nonzero() .squeeze() .cpu()) # +1 to correct the masks[1:] if has_zeros.dim() == 0: # Deal with scalar has_zeros = [has_zeros.item() + 1] else: has_zeros = (has_zeros + 1).numpy().tolist() # add t=0 and t=T to the list has_zeros = [0] + has_zeros + [T] hxs = hxs.transpose(0, 1) outputs = [] for i in range(len(has_zeros) - 1): # We can now process steps that don't have any zeros in masks together! # This is much faster start_idx = has_zeros[i] end_idx = has_zeros[i + 1] temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous() rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp) outputs.append(rnn_scores) # assert len(outputs) == T # x is a (T, N, -1) tensor x = torch.cat(outputs, dim=0) # flatten x = x.reshape(T * N, -1) hxs = hxs.transpose(0, 1) x = self.norm(x) return x, hxs ================================================ FILE: MACPO/macpo/algorithms/utils/util.py ================================================ import copy import numpy as np import torch import torch.nn as nn def init(module, weight_init, bias_init, gain=1): weight_init(module.weight.data, gain=gain) bias_init(module.bias.data) return module def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def check(input): output = torch.from_numpy(input) if type(input) == np.ndarray else input return output ================================================ FILE: MACPO/macpo/config.py ================================================ import argparse def get_config(): """ The configuration parser for common hyperparameters of all environment. Please reach each `scripts/train/_runner.py` file to find private hyperparameters only used in . Prepare parameters: --algorithm_name specifiy the algorithm, including `["rmappo", "mappo", "rmappg", "mappg", "trpo"]` --experiment_name an identifier to distinguish different experiment. --seed set seed for numpy and torch --cuda by default True, will use GPU to train; or else will use CPU; --cuda_deterministic by default, make sure random seed effective. if set, bypass such function. --n_training_threads number of training threads working in parallel. by default 1 --n_rollout_threads number of parallel envs for training rollout. by default 32 --n_eval_rollout_threads number of parallel envs for evaluating rollout. by default 1 --n_render_rollout_threads number of parallel envs for rendering, could only be set as 1 for some environments. --num_env_steps number of env steps to train (default: 10e6) --user_name [for wandb usage], to specify user's name for simply collecting training data. --use_wandb [for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data. Env parameters: --env_name specify the name of environment --use_obs_instead_of_state [only for some env] by default False, will use global state; or else will use concatenated local obs. Replay Buffer parameters: --episode_length the max length of episode in the buffer. Network parameters: --share_policy by default True, all agents will share the same network; set to make training agents use different policies. --use_centralized_V by default True, use centralized training mode; or else will decentralized training mode. --stacked_frames Number of input frames which should be stack together. --hidden_size Dimension of hidden layers for actor/critic networks --layer_N Number of layers for actor/critic networks --use_ReLU by default True, will use ReLU. or else will use Tanh. --use_popart by default True, use running mean and std to normalize rewards. --use_feature_normalization by default True, apply layernorm to normalize inputs. --use_orthogonal by default True, use Orthogonal initialization for weights and 0 initialization for biases. or else, will use xavier uniform inilialization. --gain by default 0.01, use the gain # of last action layer --use_naive_recurrent_policy by default False, use the whole trajectory to calculate hidden states. --use_recurrent_policy by default, use Recurrent Policy. If set, do not use. --recurrent_N The number of recurrent layers ( default 1). --data_chunk_length Time length of chunks used to train a recurrent_policy, default 10. Optimizer parameters: --lr learning rate parameter, (default: 5e-4, fixed). --critic_lr learning rate of critic (default: 5e-4, fixed) --opti_eps RMSprop optimizer epsilon (default: 1e-5) --weight_decay coefficience of weight decay (default: 0) PPO parameters: --ppo_epoch number of ppo epochs (default: 15) --use_clipped_value_loss by default, clip loss value. If set, do not clip loss value. --clip_param ppo clip parameter (default: 0.2) --num_mini_batch number of batches for ppo (default: 1) --entropy_coef entropy term coefficient (default: 0.01) --use_max_grad_norm by default, use max norm of gradients. If set, do not use. --max_grad_norm max norm of gradients (default: 0.5) --use_gae by default, use generalized advantage estimation. If set, do not use gae. --gamma discount factor for rewards (default: 0.99) --gae_lambda gae lambda parameter (default: 0.95) --use_proper_time_limits by default, the return value does consider limits of time. If set, compute returns with considering time limits factor. --use_huber_loss by default, use huber loss. If set, do not use huber loss. --use_value_active_masks by default True, whether to mask useless data in value loss. --huber_delta coefficient of huber loss. PPG parameters: --aux_epoch number of auxiliary epochs. (default: 4) --clone_coef clone term coefficient (default: 0.01) Run parameters: --use_linear_lr_decay by default, do not apply linear decay to learning rate. If set, use a linear schedule on the learning rate Save & Log parameters: --save_interval time duration between contiunous twice models saving. --log_interval time duration between contiunous twice log printing. Eval parameters: --use_eval by default, do not start evaluation. If set`, start evaluation alongside with training. --eval_interval time duration between contiunous twice evaluation progress. --eval_episodes number of episodes of a single evaluation. Render parameters: --save_gifs by default, do not save render video. If set, save video. --use_render by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam. --render_episodes the number of episodes to render a given env --ifi the play interval of each rendered image in saved video. Pretrained parameters: --model_dir by default None. set the path to pretrained model. """ parser = argparse.ArgumentParser( description='macpo', formatter_class=argparse.RawDescriptionHelpFormatter) # prepare parameters parser.add_argument("--algorithm_name", type=str, default=' ', choices=["macpo"]) parser.add_argument("--experiment_name", type=str, default="check", help="an identifier to distinguish different experiment.") parser.add_argument("--seed", type=int, default=1, help="Random seed for numpy/torch") parser.add_argument("--cuda", action='store_false', default=False, help="by default True, will use GPU to train; or else will use CPU;") parser.add_argument("--cuda_deterministic", action='store_false', default=True, help="by default, make sure random seed effective. if set, bypass such function.") parser.add_argument("--n_training_threads", type=int, default=1, help="Number of torch threads for training") parser.add_argument("--n_rollout_threads", type=int, default=32, help="Number of parallel envs for training rollouts") parser.add_argument("--n_eval_rollout_threads", type=int, default=1, help="Number of parallel envs for evaluating rollouts") parser.add_argument("--n_render_rollout_threads", type=int, default=1, help="Number of parallel envs for rendering rollouts") parser.add_argument("--num_env_steps", type=int, default=10e6, help='Number of environment steps to train (default: 10e6)') parser.add_argument("--user_name", type=str, default='marl',help="[for wandb usage], to specify user's name for simply collecting training data.") parser.add_argument("--use_wandb", action='store_false', default=False, help="[for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.") # env parameters parser.add_argument("--env_name", type=str, default='StarCraft2', help="specify the name of environment") parser.add_argument("--use_obs_instead_of_state", action='store_true', default=False, help="Whether to use global state or concatenated obs") # replay buffer parameters parser.add_argument("--episode_length", type=int, default=200, help="Max length for any episode") # network parameters parser.add_argument("--share_policy", action='store_false', default=True, help='Whether agent share the same policy') parser.add_argument("--use_centralized_V", action='store_false', default=True, help="Whether to use centralized V function") parser.add_argument("--stacked_frames", type=int, default=1, help="Dimension of hidden layers for actor/critic networks") parser.add_argument("--use_stacked_frames", action='store_true', default=False, help="Whether to use stacked_frames") parser.add_argument("--hidden_size", type=int, default=64, help="Dimension of hidden layers for actor/critic networks") parser.add_argument("--layer_N", type=int, default=1, help="Number of layers for actor/critic networks") parser.add_argument("--use_ReLU", action='store_false', default=True, help="Whether to use ReLU") parser.add_argument("--use_popart", action='store_false', default=True, help="by default True, use running mean and std to normalize rewards.") parser.add_argument("--use_valuenorm", action='store_false', default=True, help="by default True, use running mean and std to normalize rewards.") parser.add_argument("--use_feature_normalization", action='store_false', default=True, help="Whether to apply layernorm to the inputs") parser.add_argument("--use_orthogonal", action='store_false', default=True, help="Whether to use Orthogonal initialization for weights and 0 initialization for biases") parser.add_argument("--gain", type=float, default=0.01, help="The gain # of last action layer") # recurrent parameters parser.add_argument("--use_naive_recurrent_policy", action='store_true', default=False, help='Whether to use a naive recurrent policy') parser.add_argument("--use_recurrent_policy", action='store_true', default=False, help='use a recurrent policy') parser.add_argument("--recurrent_N", type=int, default=1, help="The number of recurrent layers.") parser.add_argument("--data_chunk_length", type=int, default=10, help="Time length of chunks used to train a recurrent_policy") # optimizer parameters parser.add_argument("--lr", type=float, default=5e-4, help='learning rate (default: 5e-4)') parser.add_argument("--critic_lr", type=float, default=5e-4, help='critic learning rate (default: 5e-4)') parser.add_argument("--opti_eps", type=float, default=1e-5, help='RMSprop optimizer epsilon (default: 1e-5)') parser.add_argument("--weight_decay", type=float, default=0) parser.add_argument("--std_x_coef", type=float, default=1) parser.add_argument("--std_y_coef", type=float, default=0.5) # trpo parameters parser.add_argument("--kl_threshold", type=float, default=0.01, help='the threshold of kl-divergence (default: 0.01)') parser.add_argument("--safety_bound", type=float, default=0.1, help='safety') parser.add_argument("--ls_step", type=int, default=10, help='number of line search (default: 10)') parser.add_argument("--accept_ratio", type=float, default=0.5, help='accept ratio of loss improve (default: 0.5)') parser.add_argument("--EPS", type=float, default=1e-8, help='hyper parameter, close to zero') # ppo parameters parser.add_argument("--ppo_epoch", type=int, default=15, help='number of ppo epochs (default: 15)') parser.add_argument("--use_clipped_value_loss", action='store_false', default=True, help="by default, clip loss value. If set, do not clip loss value.") parser.add_argument("--clip_param", type=float, default=0.2, help='ppo clip parameter (default: 0.2)') parser.add_argument("--num_mini_batch", type=int, default=1, help='number of batches for ppo (default: 1)') parser.add_argument("--entropy_coef", type=float, default=0.01, help='entropy term coefficient (default: 0.01)') # todo: lagrangian_coef is the lagrangian coefficient for mappo_lagrangian parser.add_argument("--lagrangian_coef", type=float, default=0.01, help='entropy term coefficient (default: 0.01)') parser.add_argument("--value_loss_coef", type=float, default=1, help='value loss coefficient (default: 0.5)') parser.add_argument("--use_max_grad_norm", action='store_false', default=True, help="by default, use max norm of gradients. If set, do not use.") parser.add_argument("--max_grad_norm", type=float, default=10.0, help='max norm of gradients (default: 0.5)') parser.add_argument("--use_gae", action='store_false', default=True, help='use generalized advantage estimation') parser.add_argument("--gamma", type=float, default=0.99, help='discount factor for rewards (default: 0.99)') parser.add_argument("--safety_gamma", type=float, default=0.2, help='discount factor for rewards (default: 0.2)') parser.add_argument("--gae_lambda", type=float, default=0.95, help='gae lambda parameter (default: 0.95)') parser.add_argument("--use_proper_time_limits", action='store_true', default=False, help='compute returns taking into account time limits') parser.add_argument("--use_huber_loss", action='store_false', default=True, help="by default, use huber loss. If set, do not use huber loss.") parser.add_argument("--use_value_active_masks", action='store_false', default=True, help="by default True, whether to mask useless data in value loss.") parser.add_argument("--use_policy_active_masks", action='store_false', default=True, help="by default True, whether to mask useless data in policy loss.") parser.add_argument("--huber_delta", type=float, default=10.0, help=" coefficience of huber loss.") # run parameters parser.add_argument("--use_linear_lr_decay", action='store_true', default=False, help='use a linear schedule on the learning rate') # save parameters parser.add_argument("--save_interval", type=int, default=1, help="time duration between contiunous twice models saving.") # log parameters parser.add_argument("--log_interval", type=int, default=5, help="time duration between contiunous twice log printing.") # eval parameters parser.add_argument("--use_eval", action='store_true', default=False, help="by default, do not start evaluation. If set`, start evaluation alongside with training.") parser.add_argument("--eval_interval", type=int, default=25, help="time duration between contiunous twice evaluation progress.") parser.add_argument("--eval_episodes", type=int, default=32, help="number of episodes of a single evaluation.") # render parameters parser.add_argument("--save_gifs", action='store_true', default=False, help="by default, do not save render video. If set, save video.") parser.add_argument("--use_render", action='store_true', default=False, help="by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.") parser.add_argument("--render_episodes", type=int, default=5, help="the number of episodes to render a given env") parser.add_argument("--ifi", type=float, default=0.1, help="the play interval of each rendered image in saved video.") # pretrained parameters parser.add_argument("--model_dir", type=str, default=None, help="by default None. set the path to pretrained model.") # safe parameters fraction parser.add_argument("--safty_bound", type=float, default=0.1, help=" ") parser.add_argument("--line_search_fraction", type=float, default=0.5, help="line search step size") parser.add_argument("--g_step_dir_coef", type=float, default=0.1, help="rescale g") parser.add_argument("--b_step_dir_coef", type=float, default=0.1, help="rescale b") parser.add_argument("--fraction_coef", type=float, default=0.1, help="the coef of line search step size") return parser ================================================ FILE: MACPO/macpo/envs/__init__.py ================================================ import socket from absl import flags FLAGS = flags.FLAGS FLAGS(['train_sc.py']) ================================================ FILE: MACPO/macpo/envs/env_wrappers.py ================================================ """ Modified from OpenAI Baselines code to work with multi-agent envs """ import numpy as np import torch from multiprocessing import Process, Pipe from abc import ABC, abstractmethod from macpo.utils.util import tile_images class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class ShareVecEnv(ABC): """ An abstract asynchronous, vectorized environment. Used to batch data from multiple copies of an environment, so that each observation becomes an batch of observations, and expected action is a batch of actions to be applied per-environment. """ closed = False viewer = None metadata = { 'render.modes': ['human', 'rgb_array'] } def __init__(self, num_envs, observation_space, share_observation_space, action_space): self.num_envs = num_envs self.observation_space = observation_space self.share_observation_space = share_observation_space self.action_space = action_space @abstractmethod def reset(self): """ Reset all the environments and return an array of observations, or a dict of observation arrays. If step_async is still doing work, that work will be cancelled and step_wait() should not be called until step_async() is invoked again. """ pass @abstractmethod def step_async(self, actions): """ Tell all the environments to start taking a step with the given actions. Call step_wait() to get the results of the step. You should not call this if a step_async run is already pending. """ pass @abstractmethod def step_wait(self): """ Wait for the step taken with step_async(). Returns (obs, rews, cos, dones, infos): - obs: an array of observations, or a dict of arrays of observations. - rews: an array of rewards - cos: an array of costs - dones: an array of "episode done" booleans - infos: a sequence of info objects """ pass def close_extras(self): """ Clean up the extra resources, beyond what's in this base class. Only runs when not self.closed. """ pass def close(self): if self.closed: return if self.viewer is not None: self.viewer.close() self.close_extras() self.closed = True def step(self, actions): """ Step the environments synchronously. This is available for backwards compatibility. """ self.step_async(actions) return self.step_wait() def render(self, mode='human'): imgs = self.get_images() bigimg = tile_images(imgs) if mode == 'human': self.get_viewer().imshow(bigimg) return self.get_viewer().isopen elif mode == 'rgb_array': return bigimg else: raise NotImplementedError def get_images(self): """ Return RGB images from each environment """ raise NotImplementedError @property def unwrapped(self): if isinstance(self, VecEnvWrapper): return self.venv.unwrapped else: return self def get_viewer(self): if self.viewer is None: from gym.envs.classic_control import rendering self.viewer = rendering.SimpleImageViewer() return self.viewer def worker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if 'bool' in done.__class__.__name__: if done: ob = env.reset() else: if np.all(done): ob = env.reset() remote.send((ob, reward, info["cost"], done, info)) elif cmd == 'reset': ob = env.reset() remote.send((ob)) elif cmd == 'render': if data == "rgb_array": fr = env.render(mode=data) remote.send(fr) elif data == "human": env.render(mode=data) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': env.close() remote.close() break elif cmd == 'get_spaces': remote.send((env.observation_space, env.share_observation_space, env.action_space)) else: raise NotImplementedError class GuardSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = False # could cause zombie process p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv() ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, cos, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) obs = [remote.recv() for remote in self.remotes] return np.stack(obs) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True class SubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv() ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, cos, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) obs = [remote.recv() for remote in self.remotes] return np.stack(obs) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def render(self, mode="rgb_array"): for remote in self.remotes: remote.send(('render', mode)) if mode == "rgb_array": frame = [remote.recv() for remote in self.remotes] return np.stack(frame) def shareworker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, s_ob, reward, done, info, available_actions = env.step(data) if 'bool' in done.__class__.__name__: if done: ob, s_ob, available_actions = env.reset() else: if np.all(done): ob, s_ob, available_actions = env.reset() remote.send((ob, s_ob, reward, done, info, available_actions)) elif cmd == 'reset': ob, s_ob, available_actions = env.reset() remote.send((ob, s_ob, available_actions)) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'render': if data == "rgb_array": fr = env.render(mode=data) remote.send(fr) elif data == "human": env.render(mode=data) elif cmd == 'close': env.close() remote.close() break elif cmd == 'get_spaces': remote.send( (env.observation_space, env.share_observation_space, env.action_space)) elif cmd == 'render_vulnerability': fr = env.render_vulnerability(data) remote.send((fr)) elif cmd == 'get_num_agents': remote.send((env.n_agents)) else: raise NotImplementedError class ShareSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_num_agents', None)) self.n_agents = self.remotes[0].recv() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv( ) # print("wrapper:", share_observation_space) ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, share_obs, rews, dones, infos, available_actions = zip(*results) cost_x= np.array([item[0]['cost'] for item in infos]) # print("=====cost_x=====: ", cost_x.sum()) # print("=====np.stack(dones)=====: ", np.stack(dones)) return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(cost_x), np.stack(dones), infos, np.stack(available_actions) def reset(self): for remote in self.remotes: remote.send(('reset', None)) results = [remote.recv() for remote in self.remotes] obs, share_obs, available_actions = zip(*results) return np.stack(obs), np.stack(share_obs), np.stack(available_actions) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def choosesimpleworker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) remote.send((ob, reward, info["cost"], done, info)) elif cmd == 'reset': ob = env.reset(data) remote.send((ob)) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': env.close() remote.close() break elif cmd == 'render': if data == "rgb_array": fr = env.render(mode=data) remote.send(fr) elif data == "human": env.render(mode=data) elif cmd == 'get_spaces': remote.send( (env.observation_space, env.share_observation_space, env.action_space)) else: raise NotImplementedError class ChooseSimpleSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=choosesimpleworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv() ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, cos, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos def reset(self, reset_choose): for remote, choose in zip(self.remotes, reset_choose): remote.send(('reset', choose)) obs = [remote.recv() for remote in self.remotes] return np.stack(obs) def render(self, mode="rgb_array"): for remote in self.remotes: remote.send(('render', mode)) if mode == "rgb_array": frame = [remote.recv() for remote in self.remotes] return np.stack(frame) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def chooseworker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, s_ob, reward, done, info, available_actions = env.step(data) remote.send((ob, s_ob, reward, info["cost"], done, info, available_actions)) elif cmd == 'reset': ob, s_ob, available_actions = env.reset(data) remote.send((ob, s_ob, available_actions)) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': env.close() remote.close() break elif cmd == 'render': remote.send(env.render(mode='rgb_array')) elif cmd == 'get_spaces': remote.send( (env.observation_space, env.share_observation_space, env.action_space)) else: raise NotImplementedError class ChooseSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv( ) ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, share_obs, rews, cos, dones, infos, available_actions = zip(*results) return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(cos), np.stack(dones), infos, np.stack(available_actions) def reset(self, reset_choose): for remote, choose in zip(self.remotes, reset_choose): remote.send(('reset', choose)) results = [remote.recv() for remote in self.remotes] obs, share_obs, available_actions = zip(*results) return np.stack(obs), np.stack(share_obs), np.stack(available_actions) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def chooseguardworker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) remote.send((ob, reward, info["cost"], done, info)) elif cmd == 'reset': ob = env.reset(data) remote.send((ob)) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': env.close() remote.close() break elif cmd == 'get_spaces': remote.send( (env.observation_space, env.share_observation_space, env.action_space)) else: raise NotImplementedError class ChooseGuardSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = False # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv( ) ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, cos, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos def reset(self, reset_choose): for remote, choose in zip(self.remotes, reset_choose): remote.send(('reset', choose)) obs = [remote.recv() for remote in self.remotes] return np.stack(obs) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True # single env class DummyVecEnv(ShareVecEnv): def __init__(self, env_fns): self.envs = [fn() for fn in env_fns] env = self.envs[0] ShareVecEnv.__init__(self, len( env_fns), env.observation_space, env.share_observation_space, env.action_space) self.actions = None def step_async(self, actions): self.actions = actions def step_wait(self): results = [env.step(a) for (a, env) in zip(self.actions, self.envs)] obs, rews, cos, dones, infos = map(np.array, zip(*results)) for (i, done) in enumerate(dones): if 'bool' in done.__class__.__name__: if done: obs[i] = self.envs[i].reset() else: if np.all(done): obs[i] = self.envs[i].reset() self.actions = None return obs, rews, cos, dones, infos def reset(self): obs = [env.reset() for env in self.envs] return np.array(obs) def close(self): for env in self.envs: env.close() def render(self, mode="human"): if mode == "rgb_array": return np.array([env.render(mode=mode) for env in self.envs]) elif mode == "human": for env in self.envs: env.render(mode=mode) else: raise NotImplementedError class ShareDummyVecEnv(ShareVecEnv): def __init__(self, env_fns): self.envs = [fn() for fn in env_fns] env = self.envs[0] ShareVecEnv.__init__(self, len( env_fns), env.observation_space, env.share_observation_space, env.action_space) self.actions = None def step_async(self, actions): self.actions = actions def step_wait(self): results = [env.step(a) for (a, env) in zip(self.actions, self.envs)] obs, share_obs, rews, cos, dones, infos, available_actions = map( np.array, zip(*results)) for (i, done) in enumerate(dones): if 'bool' in done.__class__.__name__: if done: obs[i], share_obs[i], available_actions[i] = self.envs[i].reset() else: if np.all(done): obs[i], share_obs[i], available_actions[i] = self.envs[i].reset() self.actions = None return obs, share_obs, rews, cos, dones, infos, available_actions def reset(self): results = [env.reset() for env in self.envs] obs, share_obs, available_actions = map(np.array, zip(*results)) return obs, share_obs, available_actions def close(self): for env in self.envs: env.close() def render(self, mode="human"): if mode == "rgb_array": return np.array([env.render(mode=mode) for env in self.envs]) elif mode == "human": for env in self.envs: env.render(mode=mode) else: raise NotImplementedError class ChooseDummyVecEnv(ShareVecEnv): def __init__(self, env_fns): self.envs = [fn() for fn in env_fns] env = self.envs[0] ShareVecEnv.__init__(self, len( env_fns), env.observation_space, env.share_observation_space, env.action_space) self.actions = None def step_async(self, actions): self.actions = actions def step_wait(self): results = [env.step(a) for (a, env) in zip(self.actions, self.envs)] obs, share_obs, rews, cos, dones, infos, available_actions = map( np.array, zip(*results)) self.actions = None return obs, share_obs, rews, cos, dones, infos, available_actions def reset(self, reset_choose): results = [env.reset(choose) for (env, choose) in zip(self.envs, reset_choose)] obs, share_obs, available_actions = map(np.array, zip(*results)) return obs, share_obs, available_actions def close(self): for env in self.envs: env.close() def render(self, mode="human"): if mode == "rgb_array": return np.array([env.render(mode=mode) for env in self.envs]) elif mode == "human": for env in self.envs: env.render(mode=mode) else: raise NotImplementedError class ChooseSimpleDummyVecEnv(ShareVecEnv): def __init__(self, env_fns): self.envs = [fn() for fn in env_fns] env = self.envs[0] ShareVecEnv.__init__(self, len( env_fns), env.observation_space, env.share_observation_space, env.action_space) self.actions = None def step_async(self, actions): self.actions = actions def step_wait(self): results = [env.step(a) for (a, env) in zip(self.actions, self.envs)] obs, rews, cos, dones, infos = map(np.array, zip(*results)) self.actions = None return obs, rews, cos, dones, infos def reset(self, reset_choose): obs = [env.reset(choose) for (env, choose) in zip(self.envs, reset_choose)] return np.array(obs) def close(self): for env in self.envs: env.close() def render(self, mode="human"): if mode == "rgb_array": return np.array([env.render(mode=mode) for env in self.envs]) elif mode == "human": for env in self.envs: env.render(mode=mode) else: raise NotImplementedError ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/MUJOCO_LOG.TXT ================================================ Sun Aug 29 11:16:41 2021 ERROR: Expired activation key ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/README.md ================================================ #### Safety Multi-agent Mujoco ## 1. Sate Many Agent Ant According to Zanger's work, The reward function is equal to the rewards in the common Ant-v2 environment and comprises the torso velocity in global x-direction, a negative control reward on exerted torque, a negative contact reward and a constant positive reward for survival, which results in ```python xposafter = self.get_body_com("torso_0")[0] forward_reward = (xposafter - xposbefore)/self.dt ctrl_cost = .5 * np.square(a).sum() contact_cost = 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1))) survive_reward = 1.0 reward = forward_reward - ctrl_cost - contact_cost + survive_reward ``` And the cost, ```python yposafter = self.get_body_com("torso_0")[1] ywall = np.array([-5, 5]) if xposafter < 20: y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall elif xposafter>20 and xposafter<60: y_walldist = yposafter + (xposafter-40)*np.tan(30/360*2*np.pi) - ywall elif xposafter>60 and xposafter<100: y_walldist = yposafter - (xposafter-80)*np.tan(30/360*2*np.pi) + ywall else: y_walldist = yposafter - 20*np.tan(30/360*2*np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 body_quat = self.data.get_body_xquat('torso_0') z_rot = 1-2*(body_quat[1]**2+body_quat[2]**2) ### normally xx-rotation, not sure what axes mujoco uses state = self.state_vector() notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0\ and z_rot>=-0.7 #ADDED done = not notdone done_cost = done * 1.0 cost = np.clip(obj_cost + done_cost, 0, 1) ``` [1] Zanger, Moritz A., Karam Daaboul, and J. Marius Zöllner. 2021. “Safe Continuous Control with Constrained Model-Based Policy Optimization.” arXiv [cs.LG]. arXiv. http://arxiv.org/abs/2104.06922. ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/__init__.py ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/__init__.py ================================================ from .mujoco_multi import MujocoMulti from .coupled_half_cheetah import CoupledHalfCheetah from .manyagent_swimmer import ManyAgentSwimmerEnv from .manyagent_ant import ManyAgentAntEnv ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/ant.py ================================================ import numpy as np # from mujoco_safety_gym.envs import mujoco_env from macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env from gym import utils import mujoco_py as mjp class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5) utils.EzPickle.__init__(self) def step(self, a): xposbefore = self.get_body_com("torso")[0] self.do_simulation(a, self.frame_skip) mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 xposafter = self.get_body_com("torso")[0] forward_reward = (xposafter - xposbefore) / self.dt ctrl_cost = .5 * np.square(a).sum() contact_cost = 0.5 * 1e-3 * np.sum( np.square(np.clip(self.sim.data.cfrc_ext, -1, 1))) survive_reward = 1.0 ### safety stuff yposafter = self.get_body_com("torso")[1] ywall = np.array([-5, 5]) if xposafter < 20: y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall elif xposafter > 20 and xposafter < 60: y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall elif xposafter > 60 and xposafter < 100: y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall else: y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 reward = forward_reward - ctrl_cost - contact_cost + survive_reward body_quat = self.data.get_body_xquat('torso') z_rot = 1 - 2 * ( body_quat[1] ** 2 + body_quat[2] ** 2) ### normally xx-rotation, not sure what axes mujoco uses state = self.state_vector() notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0 \ and z_rot >= -0.7 done = not notdone done_cost = done * 1.0 cost = np.clip(obj_cost + done_cost, 0, 1) ob = self._get_obs() return ob, reward, done, dict( reward_forward=forward_reward, reward_ctrl=-ctrl_cost, reward_contact=-contact_cost, reward_survive=survive_reward, cost_obj=obj_cost, cost_done=done_cost, cost=cost, ) def _get_obs(self): x = self.sim.data.qpos.flat[0] y = self.sim.data.qpos.flat[1] if x < 20: y_off = y - x * np.tan(30 / 360 * 2 * np.pi) elif x > 20 and x < 60: y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi) elif x > 60 and x < 100: y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi) else: y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi) return np.concatenate([ self.sim.data.qpos.flat[2:-42], self.sim.data.qvel.flat[:-36], [x / 5], [y_off], # np.clip(self.sim.data.cfrc_ext, -1, 1).flat, ]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1) qpos[-42:] = self.init_qpos[-42:] qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 qvel[-36:] = self.init_qvel[-36:] self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/.gitignore ================================================ *.auto.xml ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/__init__.py ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/ant.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/coupled_half_cheetah.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/half_cheetah.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/hopper.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/humanoid.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant.xml.template ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant__stage1.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer.xml.template ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer__bckp2.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer_bckp.xml ================================================ ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py ================================================ import numpy as np from gym import utils from gym.envs.mujoco import mujoco_env from macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env import os import mujoco_py as mjp from gym import error, spaces class CoupledHalfCheetah(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): mujoco_env.MujocoEnv.__init__(self, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'coupled_half_cheetah.xml'), 5) utils.EzPickle.__init__(self) def step(self, action): #ADDED # xposbefore = self.sim.data.qpos[1] # t = self.data.time # wall_act = .02 * np.sin(t / 3) ** 2 - .004 # mjp.functions.mj_rnePostConstraint(self.sim.model, # self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 # action_p_wall = np.concatenate((np.squeeze(action), [wall_act])) # self.do_simulation(action_p_wall, self.frame_skip) # xposafter = self.sim.data.qpos[1] # wallpos = self.data.get_geom_xpos("obj_geom")[0] # wallvel = self.data.get_body_xvelp("obj1")[0] # xdist = wallpos - xposafter # obj_cost = int(np.abs(xdist) < 2) # if obj_cost > 0: # self.model.geom_rgba[9] = [1.0, 0, 0, 1.0] # else: # self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8] # ob = self._get_obs() # reward_ctrl = - 0.1 * np.square(action).sum() # reward_run = (xposafter - xposbefore) / self.dt # reward = reward_ctrl + reward_run # done = False # xposbefore1 = self.sim.data.qpos[0] # xposbefore2 = self.sim.data.qpos[len(self.sim.data.qpos) // 2] # print("self.sim.data.qpos", self.sim.data.qpos) xposbefore1 = self.get_body_com("torso")[0] xposbefore2 = self.get_body_com("torso2")[0] yposbefore1 = self.get_body_com("torso")[1] yposbefore2 = self.get_body_com("torso2")[1] # ADDED t = self.data.time wall_act = .02 * np.sin(t / 3) ** 2 - .004 mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 action_p_wall = np.concatenate((np.squeeze(action), [wall_act])) # print("action_p_wall", np.array(action_p_wall).shape) # print("action", np.array(action).shape) # print("self.frame_skip", self.frame_skip) self.do_simulation(action_p_wall, self.frame_skip) # self.do_simulation(action, self.frame_skip) # xposafter1 = self.sim.data.qpos[0] # xposafter2 = self.sim.data.qpos[len(self.sim.data.qpos)//2] xposafter1 = self.get_body_com("torso")[0] xposafter2 = self.get_body_com("torso2")[0] yposafter1 = self.get_body_com("torso")[1] yposafter2 = self.get_body_com("torso2")[1] # ADDED wallpos = self.data.get_geom_xpos("obj_geom")[0] # wallpos1 = self.data.get_geom_xpos("obj_geom1")[0] y_wallpos1 = self.data.get_geom_xpos("wall1")[1] y_wallpos2 = self.data.get_geom_xpos("wall2")[1] # print("x_wallpos1 = self.data.get_geom_xpos", x_wallpos1) # print("x_wallpos2 = self.data.get_geom_xpos", x_wallpos2) wallvel = self.data.get_body_xvelp("obj1")[0] xdist = np.abs(wallpos - xposafter1) #+ np.abs(wallpos - xposafter2) #+ (wallpos1 - xposafter1) + (wallpos1 - xposafter2) obj_cost = 0 # or int(np.abs(wallpos1 - xposafter2) < 5) or int(np.abs(wallpos1 - xposafter2) < 5)\ # if int(np.abs(wallpos - xposafter1) < 5) or int(np.abs(wallpos - xposafter2) < 5) \ or int(np.abs(y_wallpos1 - yposafter1) < 5) or int(np.abs(y_wallpos2 - yposafter2) < 5): obj_cost = 1 # obj_cost = int(np.abs(xdist) < 5) # print("xposbefore1", xposbefore1) # print("xposbefore2", xposbefore2) # print("yposafter1", yposafter1) # print("yposafter2", yposafter2) # print("np.abs(x_wallpos1 - yposafter1)", np.abs(x_wallpos1 - yposafter1)) # print("xposafter1", xposafter1) # print("xposafter2", xposafter2) # print("wallpos", wallpos) # print("wallpos1", wallpos1) # print("xdist", xdist) # print("(wallpos1 - xposafter2)", (wallpos1 - xposafter2)) # print("(wallpos - xposafter1)", (wallpos - xposafter1)) # print("(wallpos - xposafter2)", (wallpos - xposafter2)) if obj_cost > 0: self.model.geom_rgba[9] = [1.0, 0, 0, 1.0] else: self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8] ob = self._get_obs() ob = self._get_obs() reward_ctrl1 = - 0.1 * np.square(action[0:len(action)//2]).sum() reward_ctrl2 = - 0.1 * np.square(action[len(action)//2:]).sum() reward_run1 = (xposafter1 - xposbefore1)/self.dt reward_run2 = (xposafter2 - xposbefore2) / self.dt reward = (reward_ctrl1 + reward_ctrl2)/2.0 + (reward_run1 + reward_run2)/2.0 done = False return ob, reward, done, dict(cost=obj_cost, reward_run1=reward_run1, reward_ctrl1=reward_ctrl1, reward_run2=reward_run2, reward_ctrl2=reward_ctrl2) def _get_obs(self): #AADED wallvel = self.data.get_body_xvelp("obj1")[0] wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004 xdist = (self.data.get_geom_xpos("obj_geom")[0] - self.sim.data.qpos[1]) / 10 return np.concatenate([ self.sim.data.qpos.flat[2:], self.sim.data.qvel.flat[1:], [wallvel], [wall_f], np.clip([xdist], -5, 5), ]) # return np.concatenate([ # self.sim.data.qpos.flat[1:], # self.sim.data.qvel.flat, # ]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq) qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 def get_env_info(self): return {"episode_limit": self.episode_limit} def _set_action_space(self): bounds = self.model.actuator_ctrlrange.copy().astype(np.float32) low, high = bounds.T low, high = low[:-1], high[:-1] self.action_space = spaces.Box(low=low, high=high, dtype=np.float32) return self.action_space ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/half_cheetah.py ================================================ import numpy as np from gym import utils # from mujoco_safety_gym.envs import mujoco_env # from gym.envs.mujoco import mujoco_env from macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env import mujoco_py as mjp from gym import error, spaces class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): # print("half_aaaa") mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 5) utils.EzPickle.__init__(self) def step(self, action): xposbefore = self.sim.data.qpos[1] t = self.data.time wall_act = .02 * np.sin(t / 3) ** 2 - .004 mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 action_p_wall = np.concatenate((np.squeeze(action), [wall_act])) self.do_simulation(action_p_wall, self.frame_skip) xposafter = self.sim.data.qpos[1] wallpos = self.data.get_geom_xpos("obj_geom")[0] wallvel = self.data.get_body_xvelp("obj1")[0] xdist = wallpos - xposafter # print("wallpos", wallpos) # print("xposafter", xposafter) # print("xdist", xdist) obj_cost = int(np.abs(xdist) < 9) if obj_cost > 0: self.model.geom_rgba[9] = [1.0, 0, 0, 1.0] else: self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8] ob = self._get_obs() reward_ctrl = - 0.1 * np.square(action).sum() reward_run = (xposafter - xposbefore) / self.dt reward = reward_ctrl + reward_run cost = obj_cost # print("cost1", cost) done = False return ob, reward, done, dict(cost=cost, reward_run=reward_run, reward_ctrl=reward_ctrl) def _get_obs(self): wallvel = self.data.get_body_xvelp("obj1")[0] wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004 xdist = (self.data.get_geom_xpos("obj_geom")[0] - self.sim.data.qpos[1]) / 10 return np.concatenate([ self.sim.data.qpos.flat[2:], self.sim.data.qvel.flat[1:], [wallvel], [wall_f], np.clip([xdist], -5, 5), ]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq) qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 def _set_action_space(self): bounds = self.model.actuator_ctrlrange.copy().astype(np.float32) low, high = bounds.T low, high = low[:-1], high[:-1] self.action_space = spaces.Box(low=low, high=high, dtype=np.float32) return self.action_space ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/hopper.py ================================================ import numpy as np from macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env from gym import utils import mujoco_py as mjp class HopperEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): mujoco_env.MujocoEnv.__init__(self, 'hopper.xml', 4) utils.EzPickle.__init__(self) self.last_mocx = 5 #### vel readings are super noisy for mocap weld def step(self, a): posbefore = self.sim.data.qpos[3] t = self.data.time pos = (t + np.sin(t)) + 3 self.data.set_mocap_pos('mocap1', [pos, 0, 0.5]) mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 self.do_simulation(a, self.frame_skip) posafter, height, ang = self.sim.data.qpos[3:6] alive_bonus = 1.0 mocapx = self.sim.data.qpos[0] xdist = mocapx - posafter cost = int(np.abs(xdist) < 1) reward = (posafter - posbefore) / self.dt reward += alive_bonus reward -= 1e-3 * np.square(a).sum() s = self.state_vector() # done = not (np.isfinite(s).all() and (np.abs(s[5:]) < 100).all() and # (height > .7) and (abs(ang) < .2)) done = not ( np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and (height > 0.7) and (abs(ang) < 0.2) ) print("np.isfinite(s).all()", np.isfinite(s).all()) print("np.abs(s[5:])", (np.abs(s[2:]) < 100).all()) print("height", (height > 0.7)) print("abs(ang) ", (abs(ang) < 0.2)) ob = self._get_obs() return ob, reward, done, dict(cost=cost) def _get_obs(self): x = self.sim.data.qpos[3] mocapx = self.sim.data.qpos[0] mocvel = 1 + np.cos(self.data.time) mocacc = -np.sin(self.data.time) return np.concatenate([ self.sim.data.qpos.flat[4:], np.clip(self.sim.data.qvel[3:].flat, -10, 10), [mocvel], [mocacc], [mocapx - x], ]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq) qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv) self.set_state(qpos, qvel) return self._get_obs() def last_mocap_x(self): return self.last_mocx def viewer_setup(self): self.viewer.cam.trackbodyid = 2 self.viewer.cam.distance = self.model.stat.extent * 0.75 self.viewer.cam.lookat[2] = 1.15 self.viewer.cam.elevation = -20 ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/humanoid.py ================================================ import numpy as np # from mujoco_safety_gym.envs import mujoco_env from macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env from gym import utils import mujoco_py as mjp def mass_center(model, sim): mass = np.expand_dims(model.body_mass, 1) xpos = sim.data.xipos return (np.sum(mass * xpos, 0) / np.sum(mass))[0] class HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): mujoco_env.MujocoEnv.__init__(self, 'humanoid.xml', 5) utils.EzPickle.__init__(self) def _get_obs(self): data = self.sim.data x = data.qpos.flat[0] y = data.qpos.flat[1] if x < 20: y_off = y - x * np.tan(30 / 360 * 2 * np.pi) elif x > 20 and x < 60: y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi) elif x > 60 and x < 100: y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi) else: y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi) return np.concatenate([data.qpos.flat[2:-42], data.qvel.flat[:-36], [x / 5], [y_off]]) # return np.concatenate([data.qpos.flat[2:], # data.qvel.flat, # data.cinert.flat, # data.cvel.flat, # data.qfrc_actuator.flat, # data.cfrc_ext.flat]) def step(self, a): pos_before = mass_center(self.model, self.sim) self.do_simulation(a, self.frame_skip) mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 pos_after = mass_center(self.model, self.sim) alive_bonus = 5.0 data = self.sim.data lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum() quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum() quad_impact_cost = min(quad_impact_cost, 10) reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus yposafter = self.get_body_com("torso")[1] ywall = np.array([-2.3, 2.3]) if pos_after < 20: y_walldist = yposafter - pos_after * np.tan(30 / 360 * 2 * np.pi) + ywall elif pos_after > 20 and pos_after < 60: y_walldist = yposafter + (pos_after - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall elif pos_after > 60 and pos_after < 100: y_walldist = yposafter - (pos_after - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall else: y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 qpos = self.sim.data.qpos done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0)) done_cost = done * 1.0 cost = np.clip(obj_cost + done_cost, 0, 1) return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=-quad_ctrl_cost, reward_alive=alive_bonus, reward_impact=-quad_impact_cost, cost_obj=obj_cost, cost_done=done_cost, cost=cost, ) def reset_model(self): c = 0.01 # self.set_state( # self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq), # self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,) # ) # return self._get_obs() qpos = self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq) qpos[-42:] = self.init_qpos[-42:] qvel = self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv, ) qvel[-36:] = self.init_qvel[-36:] self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.trackbodyid = 1 self.viewer.cam.distance = self.model.stat.extent * 1.0 self.viewer.cam.lookat[2] = 2.0 self.viewer.cam.elevation = -20 ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_ant.py ================================================ import numpy as np from gym import utils from gym.envs.mujoco import mujoco_env from jinja2 import Template import mujoco_py as mjp import os class ManyAgentAntEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): # Return Flag: Distinguish the mujoco and Wrapper env. self.rflag = 0 agent_conf = kwargs.get("agent_conf") n_agents = int(agent_conf.split("x")[0]) n_segs_per_agents = int(agent_conf.split("x")[1]) n_segs = n_agents * n_segs_per_agents # Check whether asset file exists already, otherwise create it asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'manyagent_ant_{}_agents_each_{}_segments.auto.xml'.format(n_agents, n_segs_per_agents)) # if not os.path.exists(asset_path): # print("Auto-Generating Manyagent Ant asset with {} segments at {}.".format(n_segs, asset_path)) self._generate_asset(n_segs=n_segs, asset_path=asset_path) #asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p # 'manyagent_swimmer.xml') mujoco_env.MujocoEnv.__init__(self, asset_path, 4) utils.EzPickle.__init__(self) def _generate_asset(self, n_segs, asset_path): template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'manyagent_ant.xml.template') with open(template_path, "r") as f: t = Template(f.read()) body_str_template = """ """ body_close_str_template ="\n" actuator_str_template = """\t \n""" body_str = "" for i in range(1,n_segs): body_str += body_str_template.format(*([i]*16)) body_str += body_close_str_template*(n_segs-1) actuator_str = "" for i in range(n_segs): actuator_str += actuator_str_template.format(*([i]*8)) rt = t.render(body=body_str, actuators=actuator_str) with open(asset_path, "w") as f: f.write(rt) pass def step(self, a): xposbefore = self.get_body_com("torso_0")[0] self.do_simulation(a, self.frame_skip) #ADDED mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 xposafter = self.get_body_com("torso_0")[0] forward_reward = (xposafter - xposbefore)/self.dt ctrl_cost = .5 * np.square(a).sum() contact_cost = 0.5 * 1e-3 * np.sum( np.square(np.clip(self.sim.data.cfrc_ext, -1, 1))) survive_reward = 1.0 ### ADDED safety stuff yposafter = self.get_body_com("torso_0")[1] ywall = np.array([-4.5, 4.5]) if xposafter < 20: y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall elif xposafter>20 and xposafter<60: y_walldist = yposafter + (xposafter-40)*np.tan(30/360*2*np.pi) - ywall elif xposafter>60 and xposafter<100: y_walldist = yposafter - (xposafter-80)*np.tan(30/360*2*np.pi) + ywall else: y_walldist = yposafter - 20*np.tan(30/360*2*np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 reward = forward_reward - ctrl_cost - contact_cost + survive_reward #### ADDED body_quat = self.data.get_body_xquat('torso_0') z_rot = 1-2*(body_quat[1]**2+body_quat[2]**2) ### normally xx-rotation, not sure what axes mujoco uses state = self.state_vector() notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0\ and z_rot>=-0.7 #ADDED done = not notdone # print("done", done) #ADDED done_cost = done * 1.0 cost = np.clip(obj_cost + done_cost, 0, 1) # print("reward", reward) # print("cost-manyagent_ant.py",cost) ob = self._get_obs() if self.rflag == 0: self.rflag += 1 return ob, reward, done, dict( cost=cost, reward_forward=forward_reward, # reward_ctrl=-ctrl_cost, reward_contact=-contact_cost, reward_survive=survive_reward, cost_obj=obj_cost, # ADDED cost_done=done_cost, # ADDED ) else: return ob, reward, done, dict( cost=cost, reward_forward=forward_reward, # cost = cost, reward_ctrl=-ctrl_cost, reward_contact=-contact_cost, reward_survive=survive_reward, cost_obj=obj_cost, #ADDED cost_done=done_cost, #ADDED ) def _get_obs(self): x = self.sim.data.qpos.flat[0] #ADDED y = self.sim.data.qpos.flat[1] #ADDED #ADDED if x<20: y_off = y - x*np.tan(30/360*2*np.pi) elif x>20 and x<60: y_off = y + (x-40)*np.tan(30/360*2*np.pi) elif x>60 and x<100: y_off = y - (x-80)*np.tan(30/360*2*np.pi) else: y_off = y - 20*np.tan(30/360*2*np.pi) # return np.concatenate([ # self.sim.data.qpos.flat[2:], # self.sim.data.qvel.flat, # # np.clip(self.sim.data.cfrc_ext, -1, 1).flat, # ]) return np.concatenate([ self.sim.data.qpos.flat[2:-42], # size = 3 self.sim.data.qvel.flat[:-36], # size = 6 [x/5], [y_off], # np.clip(self.sim.data.cfrc_ext, -1, 1).flat, ]) # def reset_model(self): # qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1) # qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 # self.set_state(qpos, qvel) # return self._get_obs() def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1) qpos[-42:] = self.init_qpos[-42:] qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 qvel[-36:] = self.init_qvel[-36:] self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_swimmer.py ================================================ import numpy as np from gym import utils from gym.envs.mujoco import mujoco_env import os from jinja2 import Template import mujoco_py as mjp class ManyAgentSwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): agent_conf = kwargs.get("agent_conf") n_agents = int(agent_conf.split("x")[0]) n_segs_per_agents = int(agent_conf.split("x")[1]) n_segs = n_agents * n_segs_per_agents # Check whether asset file exists already, otherwise create it asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'manyagent_swimmer_{}_agents_each_{}_segments.auto.xml'.format(n_agents, n_segs_per_agents)) # if not os.path.exists(asset_path): print("Auto-Generating Manyagent Swimmer asset with {} segments at {}.".format(n_segs, asset_path)) self._generate_asset(n_segs=n_segs, asset_path=asset_path) #asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p # 'manyagent_swimmer.xml') mujoco_env.MujocoEnv.__init__(self, asset_path, 4) utils.EzPickle.__init__(self) def _generate_asset(self, n_segs, asset_path): template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'manyagent_swimmer.xml.template') with open(template_path, "r") as f: t = Template(f.read()) body_str_template = """ """ body_end_str_template = """ """ body_close_str_template ="\n" actuator_str_template = """\t \n""" body_str = "" for i in range(1,n_segs-1): body_str += body_str_template.format(i, (-1)**(i+1), i) body_str += body_end_str_template.format(n_segs-1) body_str += body_close_str_template*(n_segs-2) actuator_str = "" for i in range(n_segs): actuator_str += actuator_str_template.format(i) rt = t.render(body=body_str, actuators=actuator_str) with open(asset_path, "w") as f: f.write(rt) pass def step(self, a): # ctrl_cost_coeff = 0.0001 # xposbefore = self.sim.data.qpos[0] # self.do_simulation(a, self.frame_skip) # xposafter = self.sim.data.qpos[0] # reward_fwd = (xposafter - xposbefore) / self.dt # reward_ctrl = -ctrl_cost_coeff * np.square(a).sum() # reward = reward_fwd + reward_ctrl ctrl_cost_coeff = 0.0001 xposbefore = self.sim.data.qpos[0] # yposbefore = self.sim.data.qpos[1] self.do_simulation(a, self.frame_skip) # ADDED mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) xposafter = self.sim.data.qpos[0] # yposbefore = self.sim.data.qpos[1] y_wallpos1 = self.data.get_geom_xpos("wall1")[1] y_wallpos2 = self.data.get_geom_xpos("wall2")[1] reward_fwd = (xposafter - xposbefore) / self.dt reward_ctrl = - ctrl_cost_coeff * np.square(a).sum() reward = reward_fwd + reward_ctrl ### ADDED safety stuff yposafter = self.get_body_com("torso")[1] ywall = np.array([-2.3, 2.3]) if xposafter < 20: y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall elif xposafter > 20 and xposafter < 60: y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall elif xposafter > 60 and xposafter < 100: y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall else: y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 # print("y_wallpos1-yposafter", y_wallpos1-yposafter) # print("y_wallpos2-yposafter", y_wallpos2-yposafter) #### ADDED # body_quat = self.data.get_body_xquat('torso') # z_rot = 1 - 2 * ( # body_quat[1] ** 2 + body_quat[2] ** 2) ### normally xx-rotation, not sure what axes mujoco uses # # state = self.state_vector() done = False # ADDED # print("y_walldist", y_walldist) # print("obj_cost", obj_cost) # print("done_cost", done_cost) cost = np.clip(obj_cost, 0, 1) #cost = obj_cost ob = self._get_obs() return ob, reward, done, dict(cost=cost, reward_fwd=reward_fwd, reward_ctrl=reward_ctrl) def _get_obs(self): qpos = self.sim.data.qpos qvel = self.sim.data.qvel #ADDED x = self.sim.data.qpos.flat[0] # ADDED y = self.sim.data.qpos.flat[1] # ADDED # ADDED if x < 20: y_off = y - x * np.tan(30 / 360 * 2 * np.pi) elif x > 20 and x < 60: y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi) elif x > 60 and x < 100: y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi) else: y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi) return np.concatenate([qpos.flat[2:], qvel.flat, [x/5], [y_off]]) def reset_model(self): self.set_state( self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq), self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv) ) return self._get_obs() ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_env.py ================================================ from collections import OrderedDict import os from gym import error, spaces from gym.utils import seeding import numpy as np from os import path import gym try: import mujoco_py except ImportError as e: raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e)) DEFAULT_SIZE = 500 def convert_observation_to_space(observation): if isinstance(observation, dict): space = spaces.Dict(OrderedDict([ (key, convert_observation_to_space(value)) for key, value in observation.items() ])) elif isinstance(observation, np.ndarray): low = np.full(observation.shape, -float('inf'), dtype=np.float32) high = np.full(observation.shape, float('inf'), dtype=np.float32) space = spaces.Box(low, high, dtype=observation.dtype) else: raise NotImplementedError(type(observation), observation) return space class MujocoEnv(gym.Env): """Superclass for all MuJoCo environments. """ def __init__(self, model_path, frame_skip): if model_path.startswith("/"): fullpath = model_path else: fullpath = os.path.join(os.path.dirname(__file__), "./assets", model_path) if not path.exists(fullpath): raise IOError("File %s does not exist" % fullpath) self.frame_skip = frame_skip self.model = mujoco_py.load_model_from_path(fullpath) self.sim = mujoco_py.MjSim(self.model) self.data = self.sim.data self.viewer = None self._viewers = {} self.metadata = { 'render.modes': ['human', 'rgb_array', 'depth_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.init_qpos = self.sim.data.qpos.ravel().copy() self.init_qvel = self.sim.data.qvel.ravel().copy() self._set_action_space() action = self.action_space.sample() observation, _reward, done, _info = self.step(action) # assert not done self._set_observation_space(observation) self.seed() def _set_action_space(self): bounds = self.model.actuator_ctrlrange.copy().astype(np.float32) low, high = bounds.T self.action_space = spaces.Box(low=low, high=high, dtype=np.float32) return self.action_space def _set_observation_space(self, observation): self.observation_space = convert_observation_to_space(observation) return self.observation_space def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] # methods to override: # ---------------------------- def reset_model(self): """ Reset the robot degrees of freedom (qpos and qvel). Implement this in each subclass. """ raise NotImplementedError def viewer_setup(self): """ This method is called when the viewer is initialized. Optionally implement this method, if you need to tinker with camera position and so forth. """ pass # ----------------------------- def reset(self): self.sim.reset() ob = self.reset_model() return ob def set_state(self, qpos, qvel): assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,) old_state = self.sim.get_state() new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel, old_state.act, old_state.udd_state) self.sim.set_state(new_state) self.sim.forward() @property def dt(self): return self.model.opt.timestep * self.frame_skip def do_simulation(self, ctrl, n_frames): self.sim.data.ctrl[:] = ctrl for _ in range(n_frames): self.sim.step() def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE, camera_id=None, camera_name=None): if mode == 'rgb_array': if camera_id is not None and camera_name is not None: raise ValueError("Both `camera_id` and `camera_name` cannot be" " specified at the same time.") no_camera_specified = camera_name is None and camera_id is None if no_camera_specified: camera_name = 'track' if camera_id is None and camera_name in self.model._camera_name2id: camera_id = self.model.camera_name2id(camera_name) self._get_viewer(mode).render(width, height, camera_id=camera_id) # window size used for old mujoco-py: data = self._get_viewer(mode).read_pixels(width, height, depth=False) # original image is upside-down, so flip it return data[::-1, :, :] elif mode == 'depth_array': self._get_viewer(mode).render(width, height) # window size used for old mujoco-py: # Extract depth part of the read_pixels() tuple data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1] # original image is upside-down, so flip it return data[::-1, :] elif mode == 'human': self._get_viewer(mode).render() def close(self): if self.viewer is not None: # self.viewer.finish() self.viewer = None self._viewers = {} def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if mode == 'human': self.viewer = mujoco_py.MjViewer(self.sim) elif mode == 'rgb_array' or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] = self.viewer return self.viewer def get_body_com(self, body_name): return self.data.get_body_xpos(body_name) def state_vector(self): return np.concatenate([ self.sim.data.qpos.flat, self.sim.data.qvel.flat ]) def place_random_objects(self): for i in range(9): random_color_array = np.append(np.random.uniform(0, 1, size=3), 1) random_pos_array = np.append(np.random.uniform(-10., 10., size=2), 0.5) site_id = self.sim.model.geom_name2id('obj' + str(i)) self.sim.model.geom_rgba[site_id] = random_color_array self.sim.model.geom_pos[site_id] = random_pos_array ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_multi.py ================================================ from functools import partial import gym from gym.spaces import Box from gym.wrappers import TimeLimit import numpy as np from .multiagentenv import MultiAgentEnv from .manyagent_ant import ManyAgentAntEnv from .manyagent_swimmer import ManyAgentSwimmerEnv from .obsk import get_joints_at_kdist, get_parts_and_edges, build_obs def env_fn(env, **kwargs) -> MultiAgentEnv: # TODO: this may be a more complex function # env_args = kwargs.get("env_args", {}) return env(**kwargs) # env_REGISTRY = {} # env_REGISTRY["manyagent_ant"] = partial(env_fn, env=ManyAgentAntEnv) # # env_REGISTRY = {} # env_REGISTRY["manyagent_swimmer"] = partial(env_fn, env=ManyAgentSwimmerEnv) # using code from https://github.com/ikostrikov/pytorch-ddpg-naf class NormalizedActions(gym.ActionWrapper): def _action(self, action): action = (action + 1) / 2 action *= (self.action_space.high - self.action_space.low) action += self.action_space.low return action def action(self, action_): return self._action(action_) def _reverse_action(self, action): action -= self.action_space.low action /= (self.action_space.high - self.action_space.low) action = action * 2 - 1 return action class MujocoMulti(MultiAgentEnv): def __init__(self, batch_size=None, **kwargs): super().__init__(batch_size, **kwargs) self.scenario = kwargs["env_args"]["scenario"] # e.g. Ant-v2 self.agent_conf = kwargs["env_args"]["agent_conf"] # e.g. '2x3' self.agent_partitions, self.mujoco_edges, self.mujoco_globals = get_parts_and_edges(self.scenario, self.agent_conf) self.n_agents = len(self.agent_partitions) self.n_actions = max([len(l) for l in self.agent_partitions]) self.obs_add_global_pos = kwargs["env_args"].get("obs_add_global_pos", False) self.agent_obsk = kwargs["env_args"].get("agent_obsk", None) # if None, fully observable else k>=0 implies observe nearest k agents or joints self.agent_obsk_agents = kwargs["env_args"].get("agent_obsk_agents", False) # observe full k nearest agents (True) or just single joints (False) if self.agent_obsk is not None: # print("this is agent_obsk") self.k_categories_label = kwargs["env_args"].get("k_categories") if self.k_categories_label is None: if self.scenario in ["Ant-v2", "manyagent_ant"]: self.k_categories_label = "qpos,qvel,cfrc_ext|qpos" # print("this is agent_obsk --- ant") elif self.scenario in ["Swimmer-v2", "manyagent_swimmer"]: self.k_categories_label = "qpos,qvel|qpos" # print("this is agent_obsk --- swimmer") elif self.scenario in ["Humanoid-v2", "HumanoidStandup-v2"]: self.k_categories_label = "qpos,qvel,cfrc_ext,cvel,cinert,qfrc_actuator|qpos" elif self.scenario in ["Reacher-v2"]: self.k_categories_label = "qpos,qvel,fingertip_dist|qpos" elif self.scenario in ["coupled_half_cheetah"]: self.k_categories_label = "qpos,qvel,ten_J,ten_length,ten_velocity|" else: self.k_categories_label = "qpos,qvel|qpos" k_split = self.k_categories_label.split("|") self.k_categories = [k_split[k if k < len(k_split) else -1].split(",") for k in range(self.agent_obsk + 1)] self.global_categories_label = kwargs["env_args"].get("global_categories") self.global_categories = self.global_categories_label.split( ",") if self.global_categories_label is not None else [] if self.agent_obsk is not None: self.k_dicts = [get_joints_at_kdist(agent_id, self.agent_partitions, self.mujoco_edges, k=self.agent_obsk, kagents=False, ) for agent_id in range(self.n_agents)] # load scenario from script self.episode_limit = self.args.episode_limit self.env_version = kwargs["env_args"].get("env_version", 2) if self.env_version == 2: if self.scenario in ["manyagent_ant"]: from .manyagent_ant import ManyAgentAntEnv as this_env elif self.scenario in ["manyagent_swimmer"]: from .manyagent_swimmer import ManyAgentSwimmerEnv as this_env elif self.scenario in ["coupled_half_cheetah"]: from .coupled_half_cheetah import CoupledHalfCheetah as this_env elif self.scenario in ["HalfCheetah-v2"]: from .half_cheetah import HalfCheetahEnv as this_env # print("HalfCheetahEnv1111") Hopper-v2 # elif self.scenario in ["Hopper-v2"]: from .hopper import HopperEnv as this_env # print("Hopper-v2") elif self.scenario in ["Humanoid-v2"]: from .humanoid import HumanoidEnv as this_env # print("Hopper-v2") elif self.scenario in ["Ant-v2"]: from .ant import AntEnv as this_env else: raise NotImplementedError('Custom env not implemented!') # print("self.scenario", self.scenario) # aaa= this_env(**kwargs["env_args"]) # print("aaa", aaa) self.wrapped_env = NormalizedActions( TimeLimit(this_env(**kwargs["env_args"]), max_episode_steps=self.episode_limit)) # try: # self.wrapped_env = NormalizedActions(gym.make(self.scenario)) # print("this managent1") # except gym.error.Error: # if self.scenario in ["manyagent_ant"]: # from .manyagent_ant import ManyAgentAntEnv as this_env # elif self.scenario in ["manyagent_swimmer"]: # from .manyagent_swimmer import ManyAgentSwimmerEnv as this_env # elif self.scenario in ["coupled_half_cheetah"]: # from .coupled_half_cheetah import CoupledHalfCheetah as this_env # elif self.scenario in ["HalfCheetah-v2"]: # from .half_cheetah import HalfCheetahEnv as this_env # print("HalfCheetahEnv1111") # else: # raise NotImplementedError('Custom env not implemented!') # self.wrapped_env = NormalizedActions( # TimeLimit(this_env(**kwargs["env_args"]), max_episode_steps=self.episode_limit)) # if self.scenario == "manyagent_swimmer": # env_REGISTRY = {} # env_REGISTRY["manyagent_swimmer"] = partial(env_fn, env=ManyAgentSwimmerEnv) # print("this is swimmer 2") # elif self.scenario == "manyagent_ant": # env_REGISTRY = {} # env_REGISTRY["manyagent_ant"] = partial(env_fn, env=ManyAgentAntEnv) # print("this managent2") # self.wrapped_env = NormalizedActions( # TimeLimit(partial(env_REGISTRY[self.scenario], **kwargs["env_args"])(), # max_episode_steps=self.episode_limit)) else: assert False, "not implemented!" self.timelimit_env = self.wrapped_env.env self.timelimit_env._max_episode_steps = self.episode_limit self.env = self.timelimit_env.env self.timelimit_env.reset() self.obs_size = self.get_obs_size() self.share_obs_size = self.get_state_size() # COMPATIBILITY self.n = self.n_agents # self.observation_space = [Box(low=np.array([-10]*self.n_agents), high=np.array([10]*self.n_agents)) for _ in range(self.n_agents)] self.observation_space = [Box(low=-10, high=10, shape=(self.obs_size,)) for _ in range(self.n_agents)] self.share_observation_space = [Box(low=-10, high=10, shape=(self.share_obs_size,)) for _ in range(self.n_agents)] acdims = [len(ap) for ap in self.agent_partitions] self.action_space = tuple([Box(self.env.action_space.low[sum(acdims[:a]):sum(acdims[:a + 1])], self.env.action_space.high[sum(acdims[:a]):sum(acdims[:a + 1])]) for a in range(self.n_agents)]) pass def step(self, actions): # need to remove dummy actions that arise due to unequal action vector sizes across agents flat_actions = np.concatenate([actions[i][:self.action_space[i].low.shape[0]] for i in range(self.n_agents)]) obs_n, reward_n, done_n, info_n = self.wrapped_env.step(flat_actions) self.steps += 1 info = {} info.update(info_n) # if done_n: # if self.steps < self.episode_limit: # info["episode_limit"] = False # the next state will be masked out # else: # info["episode_limit"] = True # the next state will not be masked out if done_n: if self.steps < self.episode_limit: info["bad_transition"] = False # the next state will be masked out else: info["bad_transition"] = True # the next state will not be masked out # return reward_n, done_n, info rewards = [[reward_n]] * self.n_agents # print("self.n_agents", self.n_agents) info["cost"] = [[info["cost"]]] * self.n_agents dones = [done_n] * self.n_agents infos = [info for _ in range(self.n_agents)] return self.get_obs(), self.get_state(), rewards, dones, infos, self.get_avail_actions() def get_obs(self): """ Returns all agent observat3ions in a list """ state = self.env._get_obs() obs_n = [] for a in range(self.n_agents): agent_id_feats = np.zeros(self.n_agents, dtype=np.float32) agent_id_feats[a] = 1.0 # obs_n.append(self.get_obs_agent(a)) # obs_n.append(np.concatenate([state, self.get_obs_agent(a), agent_id_feats])) # obs_n.append(np.concatenate([self.get_obs_agent(a), agent_id_feats])) obs_i = np.concatenate([state, agent_id_feats]) obs_i = (obs_i - np.mean(obs_i)) / np.std(obs_i) obs_n.append(obs_i) return obs_n def get_obs_agent(self, agent_id): if self.agent_obsk is None: return self.env._get_obs() else: # return build_obs(self.env, # self.k_dicts[agent_id], # self.k_categories, # self.mujoco_globals, # self.global_categories, # vec_len=getattr(self, "obs_size", None)) return build_obs(self.env, self.k_dicts[agent_id], self.k_categories, self.mujoco_globals, self.global_categories) def get_obs_size(self): """ Returns the shape of the observation """ if self.agent_obsk is None: return self.get_obs_agent(0).size else: return len(self.get_obs()[0]) # return max([len(self.get_obs_agent(agent_id)) for agent_id in range(self.n_agents)]) def get_state(self, team=None): # TODO: May want global states for different teams (so cannot see what the other team is communicating e.g.) state = self.env._get_obs() share_obs = [] for a in range(self.n_agents): agent_id_feats = np.zeros(self.n_agents, dtype=np.float32) agent_id_feats[a] = 1.0 # share_obs.append(np.concatenate([state, self.get_obs_agent(a), agent_id_feats])) state_i = np.concatenate([state, agent_id_feats]) state_i = (state_i - np.mean(state_i)) / np.std(state_i) share_obs.append(state_i) return share_obs def get_state_size(self): """ Returns the shape of the state""" return len(self.get_state()[0]) def get_avail_actions(self): # all actions are always available return np.ones(shape=(self.n_agents, self.n_actions,)) def get_avail_agent_actions(self, agent_id): """ Returns the available actions for agent_id """ return np.ones(shape=(self.n_actions,)) def get_total_actions(self): """ Returns the total number of actions an agent could ever take """ return self.n_actions # CAREFUL! - for continuous dims, this is action space dim rather # return self.env.action_space.shape[0] def get_stats(self): return {} # TODO: Temp hack def get_agg_stats(self, stats): return {} def reset(self, **kwargs): """ Returns initial observations and states""" self.steps = 0 self.timelimit_env.reset() return self.get_obs(), self.get_state(), self.get_avail_actions() def render(self, **kwargs): self.env.render(**kwargs) def close(self): pass def seed(self, args): pass def get_env_info(self): env_info = {"state_shape": self.get_state_size(), "obs_shape": self.get_obs_size(), "n_actions": self.get_total_actions(), "n_agents": self.n_agents, "episode_limit": self.episode_limit, "action_spaces": self.action_space, "actions_dtype": np.float32, "normalise_actions": False } return env_info ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/multiagentenv.py ================================================ from collections import namedtuple import numpy as np def convert(dictionary): return namedtuple('GenericDict', dictionary.keys())(**dictionary) class MultiAgentEnv(object): def __init__(self, batch_size=None, **kwargs): # Unpack arguments from sacred args = kwargs["env_args"] if isinstance(args, dict): args = convert(args) self.args = args if getattr(args, "seed", None) is not None: self.seed = args.seed self.rs = np.random.RandomState(self.seed) # initialise numpy random state def step(self, actions): """ Returns reward, terminated, info """ raise NotImplementedError def get_obs(self): """ Returns all agent observations in a list """ raise NotImplementedError def get_obs_agent(self, agent_id): """ Returns observation for agent_id """ raise NotImplementedError def get_obs_size(self): """ Returns the shape of the observation """ raise NotImplementedError def get_state(self): raise NotImplementedError def get_state_size(self): """ Returns the shape of the state""" raise NotImplementedError def get_avail_actions(self): raise NotImplementedError def get_avail_agent_actions(self, agent_id): """ Returns the available actions for agent_id """ raise NotImplementedError def get_total_actions(self): """ Returns the total number of actions an agent could ever take """ # TODO: This is only suitable for a discrete 1 dimensional action space for each agent raise NotImplementedError def get_stats(self): raise NotImplementedError # TODO: Temp hack def get_agg_stats(self, stats): return {} def reset(self): """ Returns initial observations and states""" raise NotImplementedError def render(self): raise NotImplementedError def close(self): raise NotImplementedError def seed(self, seed): raise NotImplementedError def get_env_info(self): env_info = {"state_shape": self.get_state_size(), "obs_shape": self.get_obs_size(), "n_actions": self.get_total_actions(), "n_agents": self.n_agents, "episode_limit": self.episode_limit} return env_info ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/obsk.py ================================================ import itertools import numpy as np from copy import deepcopy class Node(): def __init__(self, label, qpos_ids, qvel_ids, act_ids, body_fn=None, bodies=None, extra_obs=None, tendons=None): self.label = label self.qpos_ids = qpos_ids self.qvel_ids = qvel_ids self.act_ids = act_ids self.bodies = bodies self.extra_obs = {} if extra_obs is None else extra_obs self.body_fn = body_fn self.tendons = tendons pass def __str__(self): return self.label def __repr__(self): return self.label class HyperEdge(): def __init__(self, *edges): self.edges = set(edges) def __contains__(self, item): return item in self.edges def __str__(self): return "HyperEdge({})".format(self.edges) def __repr__(self): return "HyperEdge({})".format(self.edges) def get_joints_at_kdist(agent_id, agent_partitions, hyperedges, k=0, kagents=False,): """ Identify all joints at distance <= k from agent agent_id :param agent_id: id of agent to be considered :param agent_partitions: list of joint tuples in order of agentids :param edges: list of tuples (joint1, joint2) :param k: kth degree :param kagents: True (observe all joints of an agent if a single one is) or False (individual joint granularity) :return: dict with k as key, and list of joints at that distance """ assert not kagents, "kagents not implemented!" agent_joints = agent_partitions[agent_id] def _adjacent(lst, kagents=False): # return all sets adjacent to any element in lst ret = set([]) for l in lst: ret = ret.union(set(itertools.chain(*[e.edges.difference({l}) for e in hyperedges if l in e]))) return ret seen = set([]) new = set([]) k_dict = {} for _k in range(k+1): if not _k: new = set(agent_joints) else: print(hyperedges) new = _adjacent(new) - seen seen = seen.union(new) k_dict[_k] = sorted(list(new), key=lambda x:x.label) return k_dict def build_obs(env, k_dict, k_categories, global_dict, global_categories, vec_len=None): """Given a k_dict from get_joints_at_kdist, extract observation vector. :param k_dict: k_dict :param qpos: qpos numpy array :param qvel: qvel numpy array :param vec_len: if None no padding, else zero-pad to vec_len :return: observation vector """ # TODO: This needs to be fixed, it was designed for half-cheetah only! #if add_global_pos: # obs_qpos_lst.append(global_qpos) # obs_qvel_lst.append(global_qvel) body_set_dict = {} obs_lst = [] # Add parts attributes for k in sorted(list(k_dict.keys())): cats = k_categories[k] for _t in k_dict[k]: for c in cats: if c in _t.extra_obs: items = _t.extra_obs[c](env).tolist() obs_lst.extend(items if isinstance(items, list) else [items]) else: if c in ["qvel","qpos"]: # this is a "joint position/velocity" item items = getattr(env.sim.data, c)[getattr(_t, "{}_ids".format(c))] obs_lst.extend(items if isinstance(items, list) else [items]) elif c in ["qfrc_actuator"]: # this is a "vel position" item items = getattr(env.sim.data, c)[getattr(_t, "{}_ids".format("qvel"))] obs_lst.extend(items if isinstance(items, list) else [items]) elif c in ["cvel", "cinert", "cfrc_ext"]: # this is a "body position" item if _t.bodies is not None: for b in _t.bodies: if c not in body_set_dict: body_set_dict[c] = set() if b not in body_set_dict[c]: items = getattr(env.sim.data, c)[b].tolist() items = getattr(_t, "body_fn", lambda _id,x:x)(b, items) obs_lst.extend(items if isinstance(items, list) else [items]) body_set_dict[c].add(b) # Add global attributes body_set_dict = {} for c in global_categories: if c in ["qvel", "qpos"]: # this is a "joint position" item for j in global_dict.get("joints", []): items = getattr(env.sim.data, c)[getattr(j, "{}_ids".format(c))] obs_lst.extend(items if isinstance(items, list) else [items]) else: for b in global_dict.get("bodies", []): if c not in body_set_dict: body_set_dict[c] = set() if b not in body_set_dict[c]: obs_lst.extend(getattr(env.sim.data, c)[b].tolist()) body_set_dict[c].add(b) if vec_len is not None: pad = np.array((vec_len - len(obs_lst))*[0]) if len(pad): return np.concatenate([np.array(obs_lst), pad]) return np.array(obs_lst) def build_actions(agent_partitions, k_dict): # Composes agent actions output from networks # into coherent joint action vector to be sent to the env. pass def get_parts_and_edges(label, partitioning): if label in ["half_cheetah", "HalfCheetah-v2"]: # define Mujoco graph bthigh = Node("bthigh", -6, -6, 0) bshin = Node("bshin", -5, -5, 1) bfoot = Node("bfoot", -4, -4, 2) fthigh = Node("fthigh", -3, -3, 3) fshin = Node("fshin", -2, -2, 4) ffoot = Node("ffoot", -1, -1, 5) edges = [HyperEdge(bfoot, bshin), HyperEdge(bshin, bthigh), HyperEdge(bthigh, fthigh), HyperEdge(fthigh, fshin), HyperEdge(fshin, ffoot)] root_x = Node("root_x", 0, 0, -1, extra_obs={"qpos": lambda env: np.array([])}) root_z = Node("root_z", 1, 1, -1) root_y = Node("root_y", 2, 2, -1) globals = {"joints":[root_x, root_y, root_z]} if partitioning == "2x3": parts = [(bfoot, bshin, bthigh), (ffoot, fshin, fthigh)] elif partitioning == "6x1": parts = [(bfoot,), (bshin,), (bthigh,), (ffoot,), (fshin,), (fthigh,)] elif partitioning == "3x2": parts = [(bfoot, bshin,), (bthigh, ffoot,), (fshin, fthigh,)] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Ant-v2"]: # define Mujoco graph torso = 1 front_left_leg = 2 aux_1 = 3 ankle_1 = 4 front_right_leg = 5 aux_2 = 6 ankle_2 = 7 back_leg = 8 aux_3 = 9 ankle_3 = 10 right_back_leg = 11 aux_4 = 12 ankle_4 = 13 hip1 = Node("hip1", -8, -8, 2, bodies=[torso, front_left_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) # ankle1 = Node("ankle1", -7, -7, 3, bodies=[front_left_leg, aux_1, ankle_1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, hip2 = Node("hip2", -6, -6, 4, bodies=[torso, front_right_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, ankle2 = Node("ankle2", -5, -5, 5, bodies=[front_right_leg, aux_2, ankle_2], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, hip3 = Node("hip3", -4, -4, 6, bodies=[torso, back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, ankle3 = Node("ankle3", -3, -3, 7, bodies=[back_leg, aux_3, ankle_3], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, hip4 = Node("hip4", -2, -2, 0, bodies=[torso, right_back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, ankle4 = Node("ankle4", -1, -1, 1, bodies=[right_back_leg, aux_4, ankle_4], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, edges = [HyperEdge(ankle4, hip4), HyperEdge(ankle1, hip1), HyperEdge(ankle2, hip2), HyperEdge(ankle3, hip3), HyperEdge(hip4, hip1, hip2, hip3), ] free_joint = Node("free", 0, 0, -1, extra_obs={"qpos": lambda env: env.sim.data.qpos[:7], "qvel": lambda env: env.sim.data.qvel[:6], "cfrc_ext": lambda env: np.clip(env.sim.data.cfrc_ext[0:1], -1, 1)}) globals = {"joints": [free_joint]} if partitioning == "2x4": # neighbouring legs together parts = [(hip1, ankle1, hip2, ankle2), (hip3, ankle3, hip4, ankle4)] elif partitioning == "2x4d": # diagonal legs together parts = [(hip1, ankle1, hip3, ankle3), (hip2, ankle2, hip4, ankle4)] elif partitioning == "4x2": parts = [(hip1, ankle1), (hip2, ankle2), (hip3, ankle3), (hip4, ankle4)] elif partitioning == "8x1": parts = [(hip1,), (ankle1,), (hip2,), (ankle2,), (hip3,), (ankle3,), (hip4,), (ankle4,)] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Hopper-v2"]: # define Mujoco-Graph thigh_joint = Node("thigh_joint", -3, -3, 0, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[-3]]), -10, 10)}) leg_joint = Node("leg_joint", -2, -2, 1, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[-2]]), -10, 10)}) foot_joint = Node("foot_joint", -1, -1, 2, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[-1]]), -10, 10)}) edges = [HyperEdge(foot_joint, leg_joint), HyperEdge(leg_joint, thigh_joint)] root_x = Node("root_x", 0, 0, -1, extra_obs={"qpos": lambda env: np.array([]), "qvel": lambda env: np.clip(np.array([env.sim.data.qvel[1]]), -10, 10)}) root_z = Node("root_z", 1, 1, -1, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[1]]), -10, 10)}) root_y = Node("root_y", 2, 2, -1, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[2]]), -10, 10)}) globals = {"joints":[root_x, root_y, root_z]} if partitioning == "3x1": parts = [(thigh_joint,), (leg_joint,), (foot_joint,)] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Humanoid-v2", "HumanoidStandup-v2"]: # define Mujoco-Graph abdomen_y = Node("abdomen_y", -16, -16, 0) # act ordering bug in env -- double check! abdomen_z = Node("abdomen_z", -17, -17, 1) abdomen_x = Node("abdomen_x", -15, -15, 2) right_hip_x = Node("right_hip_x", -14, -14, 3) right_hip_z = Node("right_hip_z", -13, -13, 4) right_hip_y = Node("right_hip_y", -12, -12, 5) right_knee = Node("right_knee", -11, -11, 6) left_hip_x = Node("left_hip_x", -10, -10, 7) left_hip_z = Node("left_hip_z", -9, -9, 8) left_hip_y = Node("left_hip_y", -8, -8, 9) left_knee = Node("left_knee", -7, -7, 10) right_shoulder1 = Node("right_shoulder1", -6, -6, 11) right_shoulder2 = Node("right_shoulder2", -5, -5, 12) right_elbow = Node("right_elbow", -4, -4, 13) left_shoulder1 = Node("left_shoulder1", -3, -3, 14) left_shoulder2 = Node("left_shoulder2", -2, -2, 15) left_elbow = Node("left_elbow", -1, -1, 16) edges = [HyperEdge(abdomen_x, abdomen_y, abdomen_z), HyperEdge(right_hip_x, right_hip_y, right_hip_z), HyperEdge(left_hip_x, left_hip_y, left_hip_z), HyperEdge(left_elbow, left_shoulder1, left_shoulder2), HyperEdge(right_elbow, right_shoulder1, right_shoulder2), HyperEdge(left_knee, left_hip_x, left_hip_y, left_hip_z), HyperEdge(right_knee, right_hip_x, right_hip_y, right_hip_z), HyperEdge(left_shoulder1, left_shoulder2, abdomen_x, abdomen_y, abdomen_z), HyperEdge(right_shoulder1, right_shoulder2, abdomen_x, abdomen_y, abdomen_z), HyperEdge(abdomen_x, abdomen_y, abdomen_z, left_hip_x, left_hip_y, left_hip_z), HyperEdge(abdomen_x, abdomen_y, abdomen_z, right_hip_x, right_hip_y, right_hip_z), ] globals = {} if partitioning == "9|8": # 17 in total, so one action is a dummy (to be handled by pymarl) # isolate upper and lower body parts = [(left_shoulder1, left_shoulder2, abdomen_x, abdomen_y, abdomen_z, right_shoulder1, right_shoulder2, right_elbow, left_elbow), (left_hip_x, left_hip_y, left_hip_z, right_hip_x, right_hip_y, right_hip_z, right_knee, left_knee)] # TODO: There could be tons of decompositions here elif partitioning == "17x1": # 17 in total, so one action is a dummy (to be handled by pymarl) # isolate upper and lower body parts = [(left_shoulder1,), (left_shoulder2,), (abdomen_x,), (abdomen_y,), (abdomen_z,), (right_shoulder1,), (right_shoulder2,), (right_elbow,), (left_elbow,), (left_hip_x,), (left_hip_y,), (left_hip_z,), (right_hip_x,), (right_hip_y,), (right_hip_z,), (right_knee,), (left_knee,)] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Reacher-v2"]: # define Mujoco-Graph body0 = 1 body1 = 2 fingertip = 3 joint0 = Node("joint0", -4, -4, 0, bodies=[body0, body1], extra_obs={"qpos":(lambda env:np.array([np.sin(env.sim.data.qpos[-4]), np.cos(env.sim.data.qpos[-4])]))}) joint1 = Node("joint1", -3, -3, 1, bodies=[body1, fingertip], extra_obs={"fingertip_dist":(lambda env:env.get_body_com("fingertip") - env.get_body_com("target")), "qpos":(lambda env:np.array([np.sin(env.sim.data.qpos[-3]), np.cos(env.sim.data.qpos[-3])]))}) edges = [HyperEdge(joint0, joint1)] worldbody = 0 target = 4 target_x = Node("target_x", -2, -2, -1, extra_obs={"qvel":(lambda env:np.array([]))}) target_y = Node("target_y", -1, -1, -1, extra_obs={"qvel":(lambda env:np.array([]))}) globals = {"bodies":[worldbody, target], "joints":[target_x, target_y]} if partitioning == "2x1": # isolate upper and lower arms parts = [(joint0,), (joint1,)] # TODO: There could be tons of decompositions here else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Swimmer-v2"]: # define Mujoco-Graph joint0 = Node("rot2", -2, -2, 0) # TODO: double-check ids joint1 = Node("rot3", -1, -1, 1) edges = [HyperEdge(joint0, joint1)] globals = {} if partitioning == "2x1": # isolate upper and lower body parts = [(joint0,), (joint1,)] # TODO: There could be tons of decompositions here else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Walker2d-v2"]: # define Mujoco-Graph thigh_joint = Node("thigh_joint", -6, -6, 0) leg_joint = Node("leg_joint", -5, -5, 1) foot_joint = Node("foot_joint", -4, -4, 2) thigh_left_joint = Node("thigh_left_joint", -3, -3, 3) leg_left_joint = Node("leg_left_joint", -2, -2, 4) foot_left_joint = Node("foot_left_joint", -1, -1, 5) edges = [HyperEdge(foot_joint, leg_joint), HyperEdge(leg_joint, thigh_joint), HyperEdge(foot_left_joint, leg_left_joint), HyperEdge(leg_left_joint, thigh_left_joint), HyperEdge(thigh_joint, thigh_left_joint) ] globals = {} if partitioning == "2x3": # isolate upper and lower body parts = [(foot_joint, leg_joint, thigh_joint), (foot_left_joint, leg_left_joint, thigh_left_joint,)] # TODO: There could be tons of decompositions here else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["coupled_half_cheetah"]: # define Mujoco graph tendon = 0 bthigh = Node("bthigh", -6, -6, 0, tendons=[tendon], extra_obs = {"ten_J": lambda env: env.sim.data.ten_J[tendon], "ten_length": lambda env: env.sim.data.ten_length, "ten_velocity": lambda env: env.sim.data.ten_velocity}) bshin = Node("bshin", -5, -5, 1) bfoot = Node("bfoot", -4, -4, 2) fthigh = Node("fthigh", -3, -3, 3) fshin = Node("fshin", -2, -2, 4) ffoot = Node("ffoot", -1, -1, 5) bthigh2 = Node("bthigh2", -6, -6, 0, tendons=[tendon], extra_obs={"ten_J": lambda env: env.sim.data.ten_J[tendon], "ten_length": lambda env: env.sim.data.ten_length, "ten_velocity": lambda env: env.sim.data.ten_velocity}) bshin2 = Node("bshin2", -5, -5, 1) bfoot2 = Node("bfoot2", -4, -4, 2) fthigh2 = Node("fthigh2", -3, -3, 3) fshin2 = Node("fshin2", -2, -2, 4) ffoot2 = Node("ffoot2", -1, -1, 5) edges = [HyperEdge(bfoot, bshin), HyperEdge(bshin, bthigh), HyperEdge(bthigh, fthigh), HyperEdge(fthigh, fshin), HyperEdge(fshin, ffoot), HyperEdge(bfoot2, bshin2), HyperEdge(bshin2, bthigh2), HyperEdge(bthigh2, fthigh2), HyperEdge(fthigh2, fshin2), HyperEdge(fshin2, ffoot2) ] globals = {} root_x = Node("root_x", 0, 0, -1, extra_obs={"qpos": lambda env: np.array([])}) root_z = Node("root_z", 1, 1, -1) root_y = Node("root_y", 2, 2, -1) globals = {"joints":[root_x, root_y, root_z]} if partitioning == "1p1": parts = [(bfoot, bshin, bthigh, ffoot, fshin, fthigh), (bfoot2, bshin2, bthigh2, ffoot2, fshin2, fthigh2) ] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["manyagent_swimmer"]: # Generate asset file try: n_agents = int(partitioning.split("x")[0]) n_segs_per_agents = int(partitioning.split("x")[1]) n_segs = n_agents * n_segs_per_agents except Exception as e: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) # Note: Default Swimmer corresponds to n_segs = 3 # define Mujoco-Graph joints = [Node("rot{:d}".format(i), -n_segs + i, -n_segs + i, i) for i in range(0, n_segs)] edges = [HyperEdge(joints[i], joints[i+1]) for i in range(n_segs-1)] globals = {} parts = [tuple(joints[i * n_segs_per_agents:(i + 1) * n_segs_per_agents]) for i in range(n_agents)] return parts, edges, globals elif label in ["manyagent_ant"]: # TODO: FIX! # Generate asset file try: n_agents = int(partitioning.split("x")[0]) n_segs_per_agents = int(partitioning.split("x")[1]) n_segs = n_agents * n_segs_per_agents except Exception as e: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) # # define Mujoco graph # torso = 1 # front_left_leg = 2 # aux_1 = 3 # ankle_1 = 4 # right_back_leg = 11 # aux_4 = 12 # ankle_4 = 13 # # off = -4*(n_segs-1) # hip1 = Node("hip1", -4-off, -4-off, 2, bodies=[torso, front_left_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) # # ankle1 = Node("ankle1", -3-off, -3-off, 3, bodies=[front_left_leg, aux_1, ankle_1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, # hip4 = Node("hip4", -2-off, -2-off, 0, bodies=[torso, right_back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, # ankle4 = Node("ankle4", -1-off, -1-off, 1, bodies=[right_back_leg, aux_4, ankle_4], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, # # edges = [HyperEdge(ankle4, hip4), # HyperEdge(ankle1, hip1), # HyperEdge(hip4, hip1), # ] edges = [] joints = [] for si in range(n_segs): torso = 1 + si*7 front_right_leg = 2 + si*7 aux1 = 3 + si*7 ankle1 = 4 + si*7 back_leg = 5 + si*7 aux2 = 6 + si*7 ankle2 = 7 + si*7 off = -4 * (n_segs - 1 - si) hip1n = Node("hip1_{:d}".format(si), -4-off, -4-off, 2+4*si, bodies=[torso, front_right_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) ankle1n = Node("ankle1_{:d}".format(si), -3-off, -3-off, 3+4*si, bodies=[front_right_leg, aux1, ankle1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) hip2n = Node("hip2_{:d}".format(si), -2-off, -2-off, 0+4*si, bodies=[torso, back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) ankle2n = Node("ankle2_{:d}".format(si), -1-off, -1-off, 1+4*si, bodies=[back_leg, aux2, ankle2], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) edges += [HyperEdge(ankle1n, hip1n), HyperEdge(ankle2n, hip2n), HyperEdge(hip1n, hip2n)] if si: edges += [HyperEdge(hip1m, hip2m, hip1n, hip2n)] hip1m = deepcopy(hip1n) hip2m = deepcopy(hip2n) joints.append([hip1n, ankle1n, hip2n, ankle2n]) free_joint = Node("free", 0, 0, -1, extra_obs={"qpos": lambda env: env.sim.data.qpos[:7], "qvel": lambda env: env.sim.data.qvel[:6], "cfrc_ext": lambda env: np.clip(env.sim.data.cfrc_ext[0:1], -1, 1)}) globals = {"joints": [free_joint]} parts = [[x for sublist in joints[i * n_segs_per_agents:(i + 1) * n_segs_per_agents] for x in sublist] for i in range(n_agents)] return parts, edges, globals ================================================ FILE: MACPO/macpo/envs/safety_ma_mujoco/test.py ================================================ from safety_multiagent_mujoco.mujoco_multi import MujocoMulti import numpy as np import time def main(): # Swimmer # env_args = {"scenario": "manyagent_swimmer", # "agent_conf": "10x2", # "agent_obsk": 1, # "episode_limit": 1000} # coupled_half_cheetah # env_args = {"scenario": "coupled_half_cheetah", # "agent_conf": "1p1", # "agent_obsk": 1, # "episode_limit": 1000} # ANT 4 # env_args = {"scenario": "manyagent_ant", # "agent_conf": "3x2", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "manyagent_swimmer", # "agent_conf": "10x2", # "agent_obsk": 1, # "episode_limit": 1000} env_args = {"scenario": "HalfCheetah-v2", "agent_conf": "2x3", "agent_obsk": 1, "episode_limit": 1000} # env_args = {"scenario": "Hopper-v2", # "agent_conf": "3x1", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Humanoid-v2", # "agent_conf": "9|8", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Humanoid-v2", # "agent_conf": "17x1", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Ant-v2", # "agent_conf": "2x4", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Ant-v2", # "agent_conf": "2x4d", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Ant-v2", # "agent_conf": "4x2", # "agent_obsk": 1, # "episode_limit": 1000} env = MujocoMulti(env_args=env_args) env_info = env.get_env_info() n_actions = env_info["n_actions"] n_agents = env_info["n_agents"] n_episodes = 10 for e in range(n_episodes): ob=env.reset() terminated = False episode_reward = 0 while not terminated: obs = env.get_obs() state = env.get_state() actions = [] for agent_id in range(n_agents): avail_actions = env.get_avail_agent_actions(agent_id) avail_actions_ind = np.nonzero(avail_actions)[0] action = np.random.uniform(-10, 0.0, n_actions) actions.append(action) # reward, terminated, _ = env.step(actions) # print("env.step(actions): ", env.step(actions)) get_obs, get_state, reward, dones, infos, get_avail_actions= env.step(actions) # episode_reward += reward # print("reward: ", reward) cost_x= [[item['cost']] for item in infos] print("cost_x:", cost_x) print("reward:", reward) # time.sleep(0.1) env.render() # print("Total reward in episode {} = {}".format(e, episode_reward)) env.close() if __name__ == "__main__": main() """ infos[cost]: [{'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964, 'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}, {'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964, 'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}, {'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964, 'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}] """ ================================================ FILE: MACPO/macpo/runner/__init__.py ================================================ from macpo.runner import separated __all__=[ "separated" ] ================================================ FILE: MACPO/macpo/runner/separated/__init__.py ================================================ from macpo.runner.separated import base_runner __all__=[ "base_runner" ] ================================================ FILE: MACPO/macpo/runner/separated/base_runner.py ================================================ import time import wandb import os import numpy as np from itertools import chain import torch from tensorboardX import SummaryWriter from macpo.utils.separated_buffer import SeparatedReplayBuffer from macpo.utils.util import update_linear_schedule def _t2n(x): return x.detach().cpu().numpy() class Runner(object): def __init__(self, config): self.all_args = config['all_args'] self.envs = config['envs'] self.eval_envs = config['eval_envs'] self.device = config['device'] self.num_agents = config['num_agents'] # parameters self.env_name = self.all_args.env_name self.algorithm_name = self.all_args.algorithm_name self.experiment_name = self.all_args.experiment_name self.use_centralized_V = self.all_args.use_centralized_V self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state self.num_env_steps = self.all_args.num_env_steps self.episode_length = self.all_args.episode_length self.n_rollout_threads = self.all_args.n_rollout_threads self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads self.use_linear_lr_decay = self.all_args.use_linear_lr_decay self.hidden_size = self.all_args.hidden_size self.use_wandb = self.all_args.use_wandb self.use_render = self.all_args.use_render self.recurrent_N = self.all_args.recurrent_N self.use_single_network = self.all_args.use_single_network # interval self.save_interval = self.all_args.save_interval self.use_eval = self.all_args.use_eval self.eval_interval = self.all_args.eval_interval self.log_interval = self.all_args.log_interval # dir self.model_dir = self.all_args.model_dir if self.use_render: import imageio self.run_dir = config["run_dir"] self.gif_dir = str(self.run_dir / 'gifs') if not os.path.exists(self.gif_dir): os.makedirs(self.gif_dir) else: if self.use_wandb: self.save_dir = str(wandb.run.dir) else: self.run_dir = config["run_dir"] self.log_dir = str(self.run_dir / 'logs') if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.writter = SummaryWriter(self.log_dir) self.save_dir = str(self.run_dir / 'models') if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) from macpo.algorithms.r_mappo.r_mappo import R_MAPPO as TrainAlgo from macpo.algorithms.r_mappo.algorithm.rMAPPOPolicy import R_MAPPOPolicy as Policy print("share_observation_space: ", self.envs.share_observation_space) print("observation_space: ", self.envs.observation_space) print("action_space: ", self.envs.action_space) self.policy = [] for agent_id in range(self.num_agents): share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id] # policy network po = Policy(self.all_args, self.envs.observation_space[agent_id], share_observation_space, self.envs.action_space[agent_id], device = self.device) self.policy.append(po) if self.model_dir is not None: self.restore() self.trainer = [] self.buffer = [] for agent_id in range(self.num_agents): # algorithm tr = TrainAlgo(self.all_args, self.policy[agent_id], device = self.device) # buffer share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id] bu = SeparatedReplayBuffer(self.all_args, self.envs.observation_space[agent_id], share_observation_space, self.envs.action_space[agent_id]) self.buffer.append(bu) self.trainer.append(tr) def run(self): raise NotImplementedError def warmup(self): raise NotImplementedError def collect(self, step): raise NotImplementedError def insert(self, data): raise NotImplementedError @torch.no_grad() def compute(self): for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1], self.buffer[agent_id].rnn_states_critic[-1], self.buffer[agent_id].masks[-1]) next_value = _t2n(next_value) self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer) def train(self): # have modified for SAD_PPO train_infos = [] for agent_id in torch.randperm(self.num_agents): self.trainer[agent_id].prep_training() train_info = self.trainer[agent_id].train(self.buffer[agent_id]) train_infos.append(train_info) self.buffer[agent_id].after_update() return train_infos def save(self): for agent_id in range(self.num_agents): if self.use_single_network: policy_model = self.trainer[agent_id].policy.model torch.save(policy_model.state_dict(), str(self.save_dir) + "/model_agent" + str(agent_id) + ".pt") else: policy_actor = self.trainer[agent_id].policy.actor torch.save(policy_actor.state_dict(), str(self.save_dir) + "/actor_agent" + str(agent_id) + ".pt") policy_critic = self.trainer[agent_id].policy.critic torch.save(policy_critic.state_dict(), str(self.save_dir) + "/critic_agent" + str(agent_id) + ".pt") def restore(self): for agent_id in range(self.num_agents): if self.use_single_network: policy_model_state_dict = torch.load(str(self.model_dir) + '/model_agent' + str(agent_id) + '.pt') self.policy[agent_id].model.load_state_dict(policy_model_state_dict) else: policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt') self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict) policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt') self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict) def log_train(self, train_infos, total_num_steps): for agent_id in range(self.num_agents): for k, v in train_infos[agent_id].items(): agent_k = "agent%i/" % agent_id + k if self.use_wandb: wandb.log({agent_k: v}, step=total_num_steps) else: self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps) def log_env(self, env_infos, total_num_steps): for k, v in env_infos.items(): if len(v) > 0: if self.use_wandb: wandb.log({k: np.mean(v)}, step=total_num_steps) else: self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps) ================================================ FILE: MACPO/macpo/runner/separated/base_runner_macpo.py ================================================ import copy import time import wandb import os import numpy as np from itertools import chain import torch from tensorboardX import SummaryWriter from macpo.utils.separated_buffer import SeparatedReplayBuffer from macpo.utils.util import update_linear_schedule def _t2n(x): return x.detach().cpu().numpy() class Runner(object): def __init__(self, config): self.all_args = config['all_args'] self.envs = config['envs'] self.eval_envs = config['eval_envs'] self.device = config['device'] self.num_agents = config['num_agents'] # parameters self.env_name = self.all_args.env_name self.algorithm_name = self.all_args.algorithm_name self.experiment_name = self.all_args.experiment_name self.use_centralized_V = self.all_args.use_centralized_V self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state self.num_env_steps = self.all_args.num_env_steps self.episode_length = self.all_args.episode_length self.n_rollout_threads = self.all_args.n_rollout_threads self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads self.use_linear_lr_decay = self.all_args.use_linear_lr_decay self.hidden_size = self.all_args.hidden_size self.use_wandb = self.all_args.use_wandb self.use_render = self.all_args.use_render self.recurrent_N = self.all_args.recurrent_N self.use_single_network = self.all_args.use_single_network # interval self.save_interval = self.all_args.save_interval self.use_eval = self.all_args.use_eval self.eval_interval = self.all_args.eval_interval self.log_interval = self.all_args.log_interval self.gamma = self.all_args.gamma self.use_popart = self.all_args.use_popart self.safty_bound = self.all_args.safty_bound # dir self.model_dir = self.all_args.model_dir if self.use_render: import imageio self.run_dir = config["run_dir"] self.gif_dir = str(self.run_dir / 'gifs') if not os.path.exists(self.gif_dir): os.makedirs(self.gif_dir) else: if self.use_wandb: self.save_dir = str(wandb.run.dir) else: self.run_dir = config["run_dir"] self.log_dir = str(self.run_dir / 'logs') if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.writter = SummaryWriter(self.log_dir) self.save_dir = str(self.run_dir / 'models') if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) from macpo.algorithms.r_mappo.r_macpo import R_MACTRPO_CPO as TrainAlgo from macpo.algorithms.r_mappo.algorithm.MACPPOPolicy import MACPPOPolicy as Policy self.policy = [] for agent_id in range(self.num_agents): share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else \ self.envs.observation_space[agent_id] # policy network po = Policy(self.all_args, self.envs.observation_space[agent_id], share_observation_space, self.envs.action_space[agent_id], device=self.device) self.policy.append(po) if self.model_dir is not None: self.restore() self.trainer = [] self.buffer = [] # todo: revise this for trpo for agent_id in range(self.num_agents): # algorithm tr = TrainAlgo(self.all_args, self.policy[agent_id], device=self.device) # buffer share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else \ self.envs.observation_space[agent_id] bu = SeparatedReplayBuffer(self.all_args, self.envs.observation_space[agent_id], share_observation_space, self.envs.action_space[agent_id]) self.buffer.append(bu) self.trainer.append(tr) def run(self): raise NotImplementedError def warmup(self): raise NotImplementedError def collect(self, step): raise NotImplementedError def insert(self, data): raise NotImplementedError @torch.no_grad() def compute(self): for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1], self.buffer[agent_id].rnn_states_critic[-1], self.buffer[agent_id].masks[-1]) next_value = _t2n(next_value) self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer) next_costs = self.trainer[agent_id].policy.get_cost_values(self.buffer[agent_id].share_obs[-1], self.buffer[agent_id].rnn_states_cost[-1], self.buffer[agent_id].masks[-1]) next_costs = _t2n(next_costs) self.buffer[agent_id].compute_cost_returns(next_costs, self.trainer[agent_id].value_normalizer) def train(self): # have modified for SAD_PPO train_infos = [] cost_train_infos = [] # random update order action_dim = self.buffer[0].actions.shape[-1] factor = np.ones((self.episode_length, self.n_rollout_threads, action_dim), dtype=np.float32) for agent_id in torch.randperm(self.num_agents): self.trainer[agent_id].prep_training() self.buffer[agent_id].update_factor(factor) available_actions = None if self.buffer[agent_id].available_actions is None \ else self.buffer[agent_id].available_actions[:-1].reshape(-1, *self.buffer[ agent_id].available_actions.shape[ 2:]) if self.all_args.algorithm_name == "macpo": old_actions_logprob, _, _, _ = self.trainer[agent_id].policy.actor.evaluate_actions( self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]), self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]), self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]), self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]), available_actions, self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:])) else: old_actions_logprob, _ = self.trainer[agent_id].policy.actor.evaluate_actions( self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]), self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]), self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]), self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]), available_actions, self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:])) # safe_buffer, cost_adv = self.buffer_filter(agent_id) # train_info = self.trainer[agent_id].train(safe_buffer, cost_adv) train_info = self.trainer[agent_id].train(self.buffer[agent_id]) new_actions_logprob, dist_entropy, action_mu, action_std = self.trainer[agent_id].policy.actor.evaluate_actions( self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]), self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]), self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]), self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]), available_actions, self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:])) factor = factor * _t2n(torch.exp(new_actions_logprob - old_actions_logprob).reshape(self.episode_length, self.n_rollout_threads, action_dim)) train_infos.append(train_info) self.buffer[agent_id].after_update() return train_infos, cost_train_infos # episode length of envs is exactly equal to buffer size, that is, num_thread = num_episode def buffer_filter(self, agent_id): episode_length = len(self.buffer[0].rewards) # J constraints for all agents, just a toy example J = np.zeros((self.n_rollout_threads, 1), dtype=np.float32) for t in reversed(range(episode_length)): J = self.buffer[agent_id].costs[t] + self.gamma * J factor = self.buffer[agent_id].factor if self.use_popart: cost_adv = self.buffer[agent_id].cost_returns[:-1] - \ self.trainer[agent_id].value_normalizer.denormalize(self.buffer[agent_id].cost_preds[:-1]) else: cost_adv = self.buffer[agent_id].cost_returns[:-1] - self.buffer[agent_id].cost_preds[:-1] expectation = np.mean(factor * cost_adv, axis=(0, 2)) constraints_value = J + np.expand_dims(expectation, -1) del_id = [] for i in range(self.n_rollout_threads): if constraints_value[i][0] > self.safty_bound: del_id.append(i) buffer_filterd = self.remove_episodes(agent_id, del_id) return buffer_filterd, cost_adv def remove_episodes(self, agent_id, del_ids): buffer = copy.deepcopy(self.buffer[agent_id]) buffer.share_obs = np.delete(buffer.share_obs, del_ids, 1) buffer.obs = np.delete(buffer.obs, del_ids, 1) buffer.rnn_states = np.delete(buffer.rnn_states, del_ids, 1) buffer.rnn_states_critic = np.delete(buffer.rnn_states_critic, del_ids, 1) buffer.rnn_states_cost = np.delete(buffer.rnn_states_cost, del_ids, 1) buffer.value_preds = np.delete(buffer.value_preds, del_ids, 1) buffer.returns = np.delete(buffer.returns, del_ids, 1) if buffer.available_actions is not None: buffer.available_actions = np.delete(buffer.available_actions, del_ids, 1) buffer.actions = np.delete(buffer.actions, del_ids, 1) buffer.action_log_probs = np.delete(buffer.action_log_probs, del_ids, 1) buffer.rewards = np.delete(buffer.rewards, del_ids, 1) # todo: cost should be calculated entirely buffer.costs = np.delete(buffer.costs, del_ids, 1) buffer.cost_preds = np.delete(buffer.cost_preds, del_ids, 1) buffer.cost_returns = np.delete(buffer.cost_returns, del_ids, 1) buffer.masks = np.delete(buffer.masks, del_ids, 1) buffer.bad_masks = np.delete(buffer.bad_masks, del_ids, 1) buffer.active_masks = np.delete(buffer.active_masks, del_ids, 1) if buffer.factor is not None: buffer.factor = np.delete(buffer.factor, del_ids, 1) return buffer def save(self): for agent_id in range(self.num_agents): if self.use_single_network: policy_model = self.trainer[agent_id].policy.model torch.save(policy_model.state_dict(), str(self.save_dir) + "/model_agent" + str(agent_id) + ".pt") else: policy_actor = self.trainer[agent_id].policy.actor torch.save(policy_actor.state_dict(), str(self.save_dir) + "/actor_agent" + str(agent_id) + ".pt") policy_critic = self.trainer[agent_id].policy.critic torch.save(policy_critic.state_dict(), str(self.save_dir) + "/critic_agent" + str(agent_id) + ".pt") def restore(self): for agent_id in range(self.num_agents): if self.use_single_network: policy_model_state_dict = torch.load(str(self.model_dir) + '/model_agent' + str(agent_id) + '.pt') self.policy[agent_id].model.load_state_dict(policy_model_state_dict) else: policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt') self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict) policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt') self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict) def log_train(self, train_infos, total_num_steps): for agent_id in range(self.num_agents): for k, v in train_infos[agent_id].items(): agent_k = "agent%i/" % agent_id + k if self.use_wandb: wandb.log({agent_k: v}, step=total_num_steps) else: self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps) def log_env(self, env_infos, total_num_steps): for k, v in env_infos.items(): if len(v) > 0: if self.use_wandb: wandb.log({k: np.mean(v)}, step=total_num_steps) else: self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps) ================================================ FILE: MACPO/macpo/runner/separated/mujoco_runner.py ================================================ import time import wandb import numpy as np from functools import reduce import torch from macpo.runner.separated.base_runner import Runner def _t2n(x): return x.detach().cpu().numpy() class MujocoRunner(Runner): """Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details.""" def __init__(self, config): super(MujocoRunner, self).__init__(config) def run(self): self.warmup() start = time.time() episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads train_episode_rewards = [0 for _ in range(self.n_rollout_threads)] for episode in range(episodes): if self.use_linear_lr_decay: self.trainer.policy.lr_decay(episode, episodes) done_episodes_rewards = [] for step in range(self.episode_length): # Sample actions values, actions, action_log_probs, rnn_states, rnn_states_critic = self.collect(step) # Obser reward and next obs obs, share_obs, rewards, dones, infos, _ = self.envs.step(actions) dones_env = np.all(dones, axis=1) reward_env = np.mean(rewards, axis=1).flatten() train_episode_rewards += reward_env for t in range(self.n_rollout_threads): if dones_env[t]: done_episodes_rewards.append(train_episode_rewards[t]) train_episode_rewards[t] = 0 data = obs, share_obs, rewards, dones, infos, \ values, actions, action_log_probs, \ rnn_states, rnn_states_critic # insert data into buffer self.insert(data) # compute return and update network self.compute() train_infos = self.train() # post process total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads # save model if (episode % self.save_interval == 0 or episode == episodes - 1): self.save() # log information if episode % self.log_interval == 0: end = time.time() print("\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n" .format(self.all_args.scenario, self.algorithm_name, self.experiment_name, episode, episodes, total_num_steps, self.num_env_steps, int(total_num_steps / (end - start)))) self.log_train(train_infos, total_num_steps) if len(done_episodes_rewards) > 0: aver_episode_rewards = np.mean(done_episodes_rewards) print("some episodes done, average rewards: ", aver_episode_rewards) self.writter.add_scalars("train_episode_rewards", {"aver_rewards": aver_episode_rewards}, total_num_steps) # eval if episode % self.eval_interval == 0 and self.use_eval: self.eval(total_num_steps) def warmup(self): # reset env obs, share_obs, _ = self.envs.reset() # replay buffer if not self.use_centralized_V: share_obs = obs for agent_id in range(self.num_agents): self.buffer[agent_id].share_obs[0] = share_obs[:, agent_id].copy() self.buffer[agent_id].obs[0] = obs[:, agent_id].copy() @torch.no_grad() def collect(self, step): value_collector = [] action_collector = [] action_log_prob_collector = [] rnn_state_collector = [] rnn_state_critic_collector = [] for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() value, action, action_log_prob, rnn_state, rnn_state_critic \ = self.trainer[agent_id].policy.get_actions(self.buffer[agent_id].share_obs[step], self.buffer[agent_id].obs[step], self.buffer[agent_id].rnn_states[step], self.buffer[agent_id].rnn_states_critic[step], self.buffer[agent_id].masks[step]) value_collector.append(_t2n(value)) action_collector.append(_t2n(action)) action_log_prob_collector.append(_t2n(action_log_prob)) rnn_state_collector.append(_t2n(rnn_state)) rnn_state_critic_collector.append(_t2n(rnn_state_critic)) # [self.envs, agents, dim] values = np.array(value_collector).transpose(1, 0, 2) actions = np.array(action_collector).transpose(1, 0, 2) action_log_probs = np.array(action_log_prob_collector).transpose(1, 0, 2) rnn_states = np.array(rnn_state_collector).transpose(1, 0, 2, 3) rnn_states_critic = np.array(rnn_state_critic_collector).transpose(1, 0, 2, 3) return values, actions, action_log_probs, rnn_states, rnn_states_critic def insert(self, data): obs, share_obs, rewards, dones, infos, \ values, actions, action_log_probs, rnn_states, rnn_states_critic = data dones_env = np.all(dones, axis=1) rnn_states[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) rnn_states_critic[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_critic.shape[2:]), dtype=np.float32) masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32) masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32) active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32) active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) if not self.use_centralized_V: share_obs = obs for agent_id in range(self.num_agents): self.buffer[agent_id].insert(share_obs[:, agent_id], obs[:, agent_id], rnn_states[:, agent_id], rnn_states_critic[:, agent_id], actions[:, agent_id], action_log_probs[:, agent_id], values[:, agent_id], rewards[:, agent_id], masks[:, agent_id], None, active_masks[:, agent_id], None) def log_train(self, train_infos, total_num_steps): print("average_step_rewards is {}.".format(np.mean(self.buffer[0].rewards))) for agent_id in range(self.num_agents): train_infos[agent_id]["average_step_rewards"] = np.mean(self.buffer[agent_id].rewards) for k, v in train_infos[agent_id].items(): agent_k = "agent%i/" % agent_id + k if self.use_wandb: wandb.log({agent_k: v}, step=total_num_steps) else: self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps) @torch.no_grad() def eval(self, total_num_steps): eval_episode = 0 eval_episode_rewards = [] one_episode_rewards = [] for eval_i in range(self.n_eval_rollout_threads): one_episode_rewards.append([]) eval_episode_rewards.append([]) eval_obs, eval_share_obs, _ = self.eval_envs.reset() eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32) while True: eval_actions_collector = [] eval_rnn_states_collector = [] for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() eval_actions, temp_rnn_state = \ self.trainer[agent_id].policy.act(eval_obs[:, agent_id], eval_rnn_states[:, agent_id], eval_masks[:, agent_id], deterministic=True) eval_rnn_states[:, agent_id] = _t2n(temp_rnn_state) eval_actions_collector.append(_t2n(eval_actions)) eval_actions = np.array(eval_actions_collector).transpose(1, 0, 2) # Obser reward and next obs eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, _ = self.eval_envs.step( eval_actions) for eval_i in range(self.n_eval_rollout_threads): one_episode_rewards[eval_i].append(eval_rewards[eval_i]) eval_dones_env = np.all(eval_dones, axis=1) eval_rnn_states[eval_dones_env == True] = np.zeros( ((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32) eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) for eval_i in range(self.n_eval_rollout_threads): if eval_dones_env[eval_i]: eval_episode += 1 eval_episode_rewards[eval_i].append(np.sum(one_episode_rewards[eval_i], axis=0)) one_episode_rewards[eval_i] = [] if eval_episode >= self.all_args.eval_episodes: eval_episode_rewards = np.concatenate(eval_episode_rewards) eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards, 'eval_max_episode_rewards': [np.max(eval_episode_rewards)]} self.log_env(eval_env_infos, total_num_steps) print("eval_average_episode_rewards is {}.".format(np.mean(eval_episode_rewards))) break ================================================ FILE: MACPO/macpo/runner/separated/mujoco_runner_macpo.py ================================================ import time from itertools import chain import wandb import numpy as np from functools import reduce import torch from macpo.runner.separated.base_runner_macpo import Runner def _t2n(x): return x.detach().cpu().numpy() class MujocoRunner(Runner): """Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details.""" def __init__(self, config): super(MujocoRunner, self).__init__(config) self.retrun_average_cost = 0 def run(self): self.warmup() start = time.time() episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads train_episode_rewards = [0 for _ in range(self.n_rollout_threads)] train_episode_costs = [0 for _ in range(self.n_rollout_threads)] for episode in range(episodes): if self.use_linear_lr_decay: self.trainer.policy.lr_decay(episode, episodes) done_episodes_rewards = [] done_episodes_costs = [] for step in range(self.episode_length): # Sample actions values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, \ rnn_states_cost = self.collect(step) # Obser reward cost and next obs obs, share_obs, rewards, costs, dones, infos, _ = self.envs.step(actions) dones_env = np.all(dones, axis=1) reward_env = np.mean(rewards, axis=1).flatten() cost_env = np.mean(costs, axis=1).flatten() train_episode_rewards += reward_env train_episode_costs += cost_env # print("reward_env--mujoco_runner_mappo_lagr", reward_env) # print("cost_env--mujoco_runner_mappo_lagr", cost_env) for t in range(self.n_rollout_threads): # print("dones_env--mujoco_runner_mappo_lagr", dones_env) if dones_env[t]: done_episodes_rewards.append(train_episode_rewards[t]) train_episode_rewards[t] = 0 done_episodes_costs.append(train_episode_costs[t]) train_episode_costs[t] = 0 # print("done_episodes_rewards--mujoco_runner_mappo_lagr", done_episodes_rewards) # print("done_episodes_costs--mujoco_runner_mappo_lagr", done_episodes_costs) done_episodes_costs_aver = np.mean(train_episode_costs) # print("train_episode_costs_aver",train_episode_costs_aver) data = obs, share_obs, rewards, costs, dones, infos, \ values, actions, action_log_probs, \ rnn_states, rnn_states_critic, cost_preds, rnn_states_cost, done_episodes_costs_aver # fixme: it's important!!! # insert data into buffer self.insert(data) # compute return and update network self.compute() train_infos = self.train() # post process total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads # save model if (episode % self.save_interval == 0 or episode == episodes - 1): self.save() # log information if episode % self.log_interval == 0: end = time.time() print("\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n" .format(self.all_args.scenario, self.algorithm_name, self.experiment_name, episode, episodes, total_num_steps, self.num_env_steps, int(total_num_steps / (end - start)))) self.log_train(train_infos, total_num_steps) if len(done_episodes_rewards) > 0: aver_episode_rewards = np.mean(done_episodes_rewards) aver_episode_costs = np.mean(done_episodes_costs) # self.retrun_average_cost = aver_episode_costs self.return_aver_cost(aver_episode_costs) # self.insert(data, aver_episode_costs=aver_episode_costs) # print("+++++++=aver_episode_costs++++++++=", aver_episode_costs) # print("+++++++=data++++++++=", data) print("some episodes done, average rewards: {}, average costs: {}".format(aver_episode_rewards, aver_episode_costs)) self.writter.add_scalars("train_episode_rewards", {"aver_rewards": aver_episode_rewards}, total_num_steps) self.writter.add_scalars("train_episode_costs", {"aver_costs": aver_episode_costs}, total_num_steps) # eval if episode % self.eval_interval == 0 and self.use_eval: self.eval(total_num_steps) def return_aver_cost(self, aver_episode_costs): for agent_id in range(self.num_agents): self.buffer[agent_id].return_aver_insert(aver_episode_costs) # pass def warmup(self): # reset env obs, share_obs, _ = self.envs.reset() # replay buffer if not self.use_centralized_V: share_obs = obs for agent_id in range(self.num_agents): # print(share_obs[:, agent_id]) self.buffer[agent_id].share_obs[0] = share_obs[:, agent_id].copy() self.buffer[agent_id].obs[0] = obs[:, agent_id].copy() @torch.no_grad() def collect(self, step): # values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, \ # rnn_states_cost = self.collect(step) value_collector = [] action_collector = [] action_log_prob_collector = [] rnn_state_collector = [] rnn_state_critic_collector = [] cost_preds_collector = [] rnn_states_cost_collector = [] for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() value, action, action_log_prob, rnn_state, rnn_state_critic, cost_pred, rnn_state_cost \ = self.trainer[agent_id].policy.get_actions(self.buffer[agent_id].share_obs[step], self.buffer[agent_id].obs[step], self.buffer[agent_id].rnn_states[step], self.buffer[agent_id].rnn_states_critic[step], self.buffer[agent_id].masks[step], rnn_states_cost=self.buffer[agent_id].rnn_states_cost[step] ) value_collector.append(_t2n(value)) action_collector.append(_t2n(action)) action_log_prob_collector.append(_t2n(action_log_prob)) rnn_state_collector.append(_t2n(rnn_state)) rnn_state_critic_collector.append(_t2n(rnn_state_critic)) cost_preds_collector.append(_t2n(cost_pred)) rnn_states_cost_collector.append(_t2n(rnn_state_cost)) # [self.envs, agents, dim] values = np.array(value_collector).transpose(1, 0, 2) actions = np.array(action_collector).transpose(1, 0, 2) action_log_probs = np.array(action_log_prob_collector).transpose(1, 0, 2) rnn_states = np.array(rnn_state_collector).transpose(1, 0, 2, 3) rnn_states_critic = np.array(rnn_state_critic_collector).transpose(1, 0, 2, 3) cost_preds = np.array(cost_preds_collector).transpose(1, 0, 2) rnn_states_cost = np.array(rnn_states_cost_collector).transpose(1, 0, 2, 3) return values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, rnn_states_cost def insert(self, data, aver_episode_costs = 0): aver_episode_costs = aver_episode_costs # print("self.insert(data, aver_episode_costs)", aver_episode_costs) obs, share_obs, rewards, costs, dones, infos, \ values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, rnn_states_cost, done_episodes_costs_aver = data # fixme:!!! # print("insert--rewards", rewards) dones_env = np.all(dones, axis=1) rnn_states[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) rnn_states_critic[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_critic.shape[2:]), dtype=np.float32) rnn_states_cost[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_cost.shape[2:]), dtype=np.float32) masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32) masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32) active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32) active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) if not self.use_centralized_V: share_obs = obs for agent_id in range(self.num_agents): self.buffer[agent_id].insert(share_obs[:, agent_id], obs[:, agent_id], rnn_states[:, agent_id], rnn_states_critic[:, agent_id], actions[:, agent_id], action_log_probs[:, agent_id], values[:, agent_id], rewards[:, agent_id], masks[:, agent_id], None, active_masks[:, agent_id], None, costs=costs[:, agent_id], cost_preds=cost_preds[:, agent_id], rnn_states_cost=rnn_states_cost[:, agent_id], done_episodes_costs_aver=done_episodes_costs_aver, aver_episode_costs=aver_episode_costs) def log_train(self, train_infos, total_num_steps): print("average_step_rewards is {}.".format(np.mean(self.buffer[0].rewards))) train_infos[0][0]["average_step_rewards"] = 0 for agent_id in range(self.num_agents): train_infos[0][agent_id]["average_step_rewards"]= np.mean(self.buffer[agent_id].rewards) for k, v in train_infos[0][agent_id].items(): agent_k = "agent%i/" % agent_id + k if self.use_wandb: wandb.log({agent_k: v}, step=total_num_steps) else: self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps) @torch.no_grad() def eval(self, total_num_steps): eval_episode = 0 eval_episode_rewards = [] one_episode_rewards = [] eval_episode_costs = [] one_episode_costs = [] for eval_i in range(self.n_eval_rollout_threads): one_episode_rewards.append([]) eval_episode_rewards.append([]) one_episode_costs.append([]) eval_episode_costs.append([]) eval_obs, eval_share_obs, _ = self.eval_envs.reset() eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32) while True: eval_actions_collector = [] eval_rnn_states_collector = [] for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() eval_actions, temp_rnn_state = \ self.trainer[agent_id].policy.act(eval_obs[:, agent_id], eval_rnn_states[:, agent_id], eval_masks[:, agent_id], deterministic=True) eval_rnn_states[:, agent_id] = _t2n(temp_rnn_state) eval_actions_collector.append(_t2n(eval_actions)) eval_actions = np.array(eval_actions_collector).transpose(1, 0, 2) # Obser reward and next obs eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, _ = self.eval_envs.step( eval_actions) for eval_i in range(self.n_eval_rollout_threads): one_episode_rewards[eval_i].append(eval_rewards[eval_i]) one_episode_costs[eval_i].append(eval_costs[eval_i]) eval_dones_env = np.all(eval_dones, axis=1) eval_rnn_states[eval_dones_env == True] = np.zeros( ((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32) eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) for eval_i in range(self.n_eval_rollout_threads): if eval_dones_env[eval_i]: eval_episode += 1 eval_episode_rewards[eval_i].append(np.sum(one_episode_rewards[eval_i], axis=0)) one_episode_rewards[eval_i] = [] if eval_episode >= self.all_args.eval_episodes: eval_episode_rewards = np.concatenate(eval_episode_rewards) eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards, 'eval_max_episode_rewards': [np.max(eval_episode_rewards)]} self.log_env(eval_env_infos, total_num_steps) print("eval_average_episode_rewards is {}.".format(np.mean(eval_episode_rewards))) break ================================================ FILE: MACPO/macpo/scripts/__init__.py ================================================ ================================================ FILE: MACPO/macpo/scripts/train/__init__.py ================================================ ================================================ FILE: MACPO/macpo/scripts/train/train_mujoco.py ================================================ #!/usr/bin/env python import sys import os curPath = os.path.abspath(__file__) if len(curPath.split('/'))==1: rootPath = '\\'.join(curPath.split('\\')[:-3]) else: rootPath = '/'.join(curPath.split('/')[:-3]) sys.path.append(os.path.split(rootPath)[0]) import wandb import socket import setproctitle import numpy as np from pathlib import Path import torch from macpo.config import get_config from macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import MujocoMulti from macpo.envs.env_wrappers import ShareSubprocVecEnv, ShareDummyVecEnv def make_train_env(all_args): def get_env_fn(rank): def init_env(): if all_args.env_name == "mujoco": env_args = {"scenario": all_args.scenario, "agent_conf": all_args.agent_conf, "agent_obsk": all_args.agent_obsk, "episode_limit": 1000} env = MujocoMulti(env_args=env_args) else: print("Can not support the " + all_args.env_name + "environment.") raise NotImplementedError env.seed(all_args.seed + rank * 1000) return env return init_env if all_args.n_rollout_threads == 1: return ShareDummyVecEnv([get_env_fn(0)]) else: return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_rollout_threads)]) def make_eval_env(all_args): def get_env_fn(rank): def init_env(): if all_args.env_name == "mujoco": env_args = {"scenario": all_args.scenario, "agent_conf": all_args.agent_conf, "agent_obsk": all_args.agent_obsk, "episode_limit": 1000} env = MujocoMulti(env_args=env_args) else: print("Can not support the " + all_args.env_name + "environment.") raise NotImplementedError env.seed(all_args.seed * 50000 + rank * 10000) return env return init_env if all_args.n_eval_rollout_threads == 1: return ShareDummyVecEnv([get_env_fn(0)]) else: return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)]) def parse_args(args, parser): parser.add_argument('--scenario', type=str, default='Hopper-v2', help="Which mujoco task to run on") parser.add_argument('--agent_conf', type=str, default='3x1') parser.add_argument('--agent_obsk', type=int, default=0) parser.add_argument("--add_move_state", action='store_true', default=False) parser.add_argument("--add_local_obs", action='store_true', default=False) parser.add_argument("--add_distance_state", action='store_true', default=False) parser.add_argument("--add_enemy_action_state", action='store_true', default=False) parser.add_argument("--add_agent_id", action='store_true', default=False) parser.add_argument("--add_visible_state", action='store_true', default=False) parser.add_argument("--add_xy_state", action='store_true', default=False) # agent-specific state should be designed carefully parser.add_argument("--use_state_agent", action='store_true', default=False) parser.add_argument("--use_mustalive", action='store_false', default=True) parser.add_argument("--add_center_xy", action='store_true', default=False) parser.add_argument("--use_single_network", action='store_true', default=False) all_args = parser.parse_known_args(args)[0] return all_args def main(args): parser = get_config() all_args = parse_args(args, parser) print("mumu config: ", all_args) if all_args.algorithm_name == "macpo": all_args.share_policy=False else: raise NotImplementedError # cuda # all_args.cuda = True if all_args.cuda and torch.cuda.is_available(): print("choose to use gpu...") device = torch.device("cuda:0") torch.set_num_threads(all_args.n_training_threads) if all_args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True else: print("cuda flag: ", all_args.cuda, "Torch: ", torch.cuda.is_available()) print("choose to use cpu...") device = torch.device("cpu") torch.set_num_threads(all_args.n_training_threads) run_dir = Path(os.path.split(os.path.dirname(os.path.abspath(__file__)))[ 0] + "/results") / all_args.env_name / all_args.scenario / all_args.algorithm_name / all_args.experiment_name if not run_dir.exists(): os.makedirs(str(run_dir)) if all_args.use_wandb: run = wandb.init(config=all_args, project=all_args.env_name, entity=all_args.user_name, notes=socket.gethostname(), name=str(all_args.algorithm_name) + "_" + str(all_args.experiment_name) + "_seed" + str(all_args.seed), group=all_args.map_name, dir=str(run_dir), job_type="training", reinit=True) else: if not run_dir.exists(): curr_run = 'run1' else: exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if str(folder.name).startswith('run')] if len(exst_run_nums) == 0: curr_run = 'run1' else: curr_run = 'run%i' % (max(exst_run_nums) + 1) run_dir = run_dir / curr_run if not run_dir.exists(): os.makedirs(str(run_dir)) setproctitle.setproctitle( str(all_args.algorithm_name) + "-" + str(all_args.env_name) + "-" + str(all_args.experiment_name) + "@" + str( all_args.user_name)) # seed torch.manual_seed(all_args.seed) torch.cuda.manual_seed_all(all_args.seed) np.random.seed(all_args.seed) # env envs = make_train_env(all_args) eval_envs = make_eval_env(all_args) if all_args.use_eval else None num_agents = envs.n_agents config = { "all_args": all_args, "envs": envs, "eval_envs": eval_envs, "num_agents": num_agents, "device": device, "run_dir": run_dir } # run experiments if all_args.share_policy: from macpo.runner.shared.mujoco_runner import MujocoRunner as Runner else: #in origin code not implement this method if all_args.algorithm_name == "macpo": from macpo.runner.separated.mujoco_runner_macpo import MujocoRunner as Runner else: from macpo.runner.separated.mujoco_runner import MujocoRunner as Runner runner = Runner(config) runner.run() # post process envs.close() if all_args.use_eval and eval_envs is not envs: eval_envs.close() if all_args.use_wandb: run.finish() else: runner.writter.export_scalars_to_json(str(runner.log_dir + '/summary.json')) runner.writter.close() if __name__ == "__main__": main(sys.argv[1:]) ================================================ FILE: MACPO/macpo/scripts/train_mujoco.sh ================================================ #!/bin/sh env="mujoco" scenario="Ant-v2" agent_conf="2x4" agent_obsk=1 algo="macpo" exp="rnn" seed_max=1 echo "env is ${env}, scenario is ${scenario}, algo is ${algo}, exp is ${exp}, max seed is ${seed_max}" for seed in `seq ${seed_max}`; do echo "seed is ${seed}:" CUDA_VISIBLE_DEVICES=0 python train/train_mujoco.py --env_name ${env} --algorithm_name ${algo} --experiment_name ${exp} --scenario ${scenario} --agent_conf ${agent_conf} --agent_obsk ${agent_obsk} --lr 9e-5 --critic_lr 5e-3 --std_x_coef 1 --std_y_coef 5e-1 --seed 50 --n_training_threads 4 --n_rollout_threads 16 --num_mini_batch 40 --episode_length 1000 --num_env_steps 10000000 --ppo_epoch 1 --use_value_active_masks --add_center_xy --use_state_agent --kl_threshold 0.0065 --safety_bound 10 --safety_gamma 0.09 --line_search_fraction 0.5 --fraction_coef 0.27 done ================================================ FILE: MACPO/macpo/utils/__init__.py ================================================ ================================================ FILE: MACPO/macpo/utils/multi_discrete.py ================================================ import gym import numpy as np # An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates) # (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py) class MultiDiscrete(gym.Space): """ - The multi-discrete action space consists of a series of discrete action spaces with different parameters - It can be adapted to both a Discrete action space or a continuous (Box) action space - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space - It is parametrized by passing an array of arrays containing [min, max] for each discrete action space where the discrete action space can take any integers from `min` to `max` (both inclusive) Note: A value of 0 always need to represent the NOOP action. e.g. Nintendo Game Controller - Can be conceptualized as 3 discrete action spaces: 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1 - Can be initialized as MultiDiscrete([ [0,4], [0,1], [0,1] ]) """ def __init__(self, array_of_param_array): self.low = np.array([x[0] for x in array_of_param_array]) self.high = np.array([x[1] for x in array_of_param_array]) self.num_discrete_space = self.low.shape[0] self.n = np.sum(self.high) + 2 def sample(self): """ Returns a array with one sample from each discrete action space """ # For each row: round(random .* (max - min) + min, 0) random_array = np.random.rand(self.num_discrete_space) return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)] def contains(self, x): return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all() @property def shape(self): return self.num_discrete_space def __repr__(self): return "MultiDiscrete" + str(self.num_discrete_space) def __eq__(self, other): return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high) ================================================ FILE: MACPO/macpo/utils/popart.py ================================================ import numpy as np import torch import torch.nn as nn class PopArt(nn.Module): """ Normalize a vector of observations - across the first norm_axes dimensions""" def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device("cpu")): super(PopArt, self).__init__() self.input_shape = input_shape self.norm_axes = norm_axes self.epsilon = epsilon self.beta = beta self.per_element_update = per_element_update self.tpdv = dict(dtype=torch.float32, device=device) self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv) self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv) self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv) def reset_parameters(self): self.running_mean.zero_() self.running_mean_sq.zero_() self.debiasing_term.zero_() def running_mean_var(self): debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon) debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon) debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2) return debiased_mean, debiased_var def forward(self, input_vector, train=True): # Make sure input is float32 if type(input_vector) == np.ndarray: input_vector = torch.from_numpy(input_vector) input_vector = input_vector.to(**self.tpdv) if train: # Detach input before adding it to running means to avoid backpropping through it on # subsequent batches. detached_input = input_vector.detach() batch_mean = detached_input.mean(dim=tuple(range(self.norm_axes))) batch_sq_mean = (detached_input ** 2).mean(dim=tuple(range(self.norm_axes))) if self.per_element_update: batch_size = np.prod(detached_input.size()[:self.norm_axes]) weight = self.beta ** batch_size else: weight = self.beta self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight)) self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight)) self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight)) mean, var = self.running_mean_var() out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes] return out def denormalize(self, input_vector): """ Transform normalized data back into original distribution """ if type(input_vector) == np.ndarray: input_vector = torch.from_numpy(input_vector) input_vector = input_vector.to(**self.tpdv) mean, var = self.running_mean_var() out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes] out = out.cpu().numpy() return out ================================================ FILE: MACPO/macpo/utils/separated_buffer.py ================================================ import torch import numpy as np from collections import defaultdict from macpo.utils.util import check, get_shape_from_obs_space, get_shape_from_act_space def _flatten(T, N, x): return x.reshape(T * N, *x.shape[2:]) def _cast(x): return x.transpose(1,0,2).reshape(-1, *x.shape[2:]) class SeparatedReplayBuffer(object): def __init__(self, args, obs_space, share_obs_space, act_space): self.episode_length = args.episode_length self.n_rollout_threads = args.n_rollout_threads self.rnn_hidden_size = args.hidden_size self.recurrent_N = args.recurrent_N self.gamma = args.gamma self.gae_lambda = args.gae_lambda self._use_gae = args.use_gae self._use_popart = args.use_popart self._use_valuenorm = args.use_valuenorm self._use_proper_time_limits = args.use_proper_time_limits self.algo = args.algorithm_name obs_shape = get_shape_from_obs_space(obs_space) share_obs_shape = get_shape_from_obs_space(share_obs_space) if type(obs_shape[-1]) == list: obs_shape = obs_shape[:1] if type(share_obs_shape[-1]) == list: share_obs_shape = share_obs_shape[:1] self.aver_episode_costs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape), dtype=np.float32) self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *share_obs_shape), dtype=np.float32) self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape), dtype=np.float32) self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, self.recurrent_N, self.rnn_hidden_size), dtype=np.float32) self.rnn_states_critic = np.zeros_like(self.rnn_states) self.rnn_states_cost = np.zeros_like(self.rnn_states) self.value_preds = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32) self.returns = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32) if act_space.__class__.__name__ == 'Discrete': self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, act_space.n), dtype=np.float32) else: self.available_actions = None act_shape = get_shape_from_act_space(act_space) self.actions = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32) self.action_log_probs = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32) self.rewards = np.zeros((self.episode_length, self.n_rollout_threads, 1), dtype=np.float32) self.costs = np.zeros_like(self.rewards) self.cost_preds = np.zeros_like(self.value_preds) self.cost_returns = np.zeros_like(self.returns) self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32) self.bad_masks = np.ones_like(self.masks) self.active_masks = np.ones_like(self.masks) self.factor = None self.step = 0 def update_factor(self, factor): self.factor = factor.copy() def return_aver_insert(self, aver_episode_costs): self.aver_episode_costs = aver_episode_costs.copy() def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs, value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None, costs=None, cost_preds=None, rnn_states_cost=None, done_episodes_costs_aver=None, aver_episode_costs = 0): # print("separated_buffer--aver_episode_costs:", aver_episode_costs) self.share_obs[self.step + 1] = share_obs.copy() self.obs[self.step + 1] = obs.copy() self.rnn_states[self.step + 1] = rnn_states.copy() self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy() self.actions[self.step] = actions.copy() self.action_log_probs[self.step] = action_log_probs.copy() self.value_preds[self.step] = value_preds.copy() self.rewards[self.step] = rewards.copy() self.masks[self.step + 1] = masks.copy() if bad_masks is not None: self.bad_masks[self.step + 1] = bad_masks.copy() if active_masks is not None: self.active_masks[self.step + 1] = active_masks.copy() if available_actions is not None: self.available_actions[self.step + 1] = available_actions.copy() if costs is not None: self.costs[self.step] = costs.copy() if cost_preds is not None: self.cost_preds[self.step] = cost_preds.copy() if rnn_states_cost is not None: self.rnn_states_cost[self.step + 1] = rnn_states_cost.copy() # if train_episode_costs_aver is not None: # self.train_episode_costs_aver[self.step + 1] = train_episode_costs_aver.copy() self.step = (self.step + 1) % self.episode_length def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs, value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None): self.share_obs[self.step] = share_obs.copy() self.obs[self.step] = obs.copy() self.rnn_states[self.step + 1] = rnn_states.copy() self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy() self.actions[self.step] = actions.copy() self.action_log_probs[self.step] = action_log_probs.copy() self.value_preds[self.step] = value_preds.copy() self.rewards[self.step] = rewards.copy() self.masks[self.step + 1] = masks.copy() if bad_masks is not None: self.bad_masks[self.step + 1] = bad_masks.copy() if active_masks is not None: self.active_masks[self.step] = active_masks.copy() if available_actions is not None: self.available_actions[self.step] = available_actions.copy() self.step = (self.step + 1) % self.episode_length def after_update(self): self.share_obs[0] = self.share_obs[-1].copy() self.obs[0] = self.obs[-1].copy() self.rnn_states[0] = self.rnn_states[-1].copy() self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy() self.rnn_states_cost[0] = self.rnn_states_cost[-1].copy() self.masks[0] = self.masks[-1].copy() self.bad_masks[0] = self.bad_masks[-1].copy() self.active_masks[0] = self.active_masks[-1].copy() if self.available_actions is not None: self.available_actions[0] = self.available_actions[-1].copy() def chooseafter_update(self): self.rnn_states[0] = self.rnn_states[-1].copy() self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy() self.masks[0] = self.masks[-1].copy() self.bad_masks[0] = self.bad_masks[-1].copy() def compute_returns(self, next_value, value_normalizer=None): """ use proper time limits, the difference of use or not is whether use bad_mask """ if self._use_proper_time_limits: if self._use_gae: self.value_preds[-1] = next_value gae = 0 for step in reversed(range(self.rewards.shape[0])): if self._use_popart or self._use_valuenorm: delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[ step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae gae = gae * self.bad_masks[step + 1] self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step]) else: delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae gae = gae * self.bad_masks[step + 1] self.returns[step] = gae + self.value_preds[step] else: self.returns[-1] = next_value for step in reversed(range(self.rewards.shape[0])): if self._use_popart: self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \ + (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step]) else: self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \ + (1 - self.bad_masks[step + 1]) * self.value_preds[step] else: if self._use_gae: self.value_preds[-1] = next_value gae = 0 for step in reversed(range(self.rewards.shape[0])): if self._use_popart or self._use_valuenorm: delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step]) else: delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.returns[step] = gae + self.value_preds[step] else: self.returns[-1] = next_value for step in reversed(range(self.rewards.shape[0])): self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step] def compute_cost_returns(self, next_cost, value_normalizer=None): if self._use_proper_time_limits: if self._use_gae: self.cost_preds[-1] = next_cost gae = 0 for step in reversed(range(self.costs.shape[0])): if self._use_popart or self._use_valuenorm: delta = self.costs[step] + self.gamma * value_normalizer.denormalize(self.cost_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.cost_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae gae = gae * self.bad_masks[step + 1] self.cost_returns[step] = gae + value_normalizer.denormalize(self.cost_preds[step]) else: delta = self.costs[step] + self.gamma * self.cost_preds[step + 1] * self.masks[step + 1] - self.cost_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae gae = gae * self.bad_masks[step + 1] self.cost_returns[step] = gae + self.cost_preds[step] else: self.cost_returns[-1] = next_cost for step in reversed(range(self.costs.shape[0])): if self._use_popart: self.cost_returns[step] = (self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]) * self.bad_masks[step + 1] \ + (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.cost_preds[step]) else: self.cost_returns[step] = (self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]) * self.bad_masks[step + 1] \ + (1 - self.bad_masks[step + 1]) * self.cost_preds[step] else: if self._use_gae: self.cost_preds[-1] = next_cost gae = 0 for step in reversed(range(self.costs.shape[0])): if self._use_popart or self._use_valuenorm: delta = self.costs[step] + self.gamma * value_normalizer.denormalize(self.cost_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.cost_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.cost_returns[step] = gae + value_normalizer.denormalize(self.cost_preds[step]) else: delta = self.costs[step] + self.gamma * self.cost_preds[step + 1] * self.masks[step + 1] - self.cost_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.cost_returns[step] = gae + self.cost_preds[step] else: self.cost_returns[-1] = next_cost for step in reversed(range(self.costs.shape[0])): self.cost_returns[step] = self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step] def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None, cost_adv=None): episode_length, n_rollout_threads = self.rewards.shape[0:2] batch_size = n_rollout_threads * episode_length if mini_batch_size is None: assert batch_size >= num_mini_batch, ( "PPO requires the number of processes ({}) " "* number of steps ({}) = {} " "to be greater than or equal to the number of PPO mini batches ({})." "".format(n_rollout_threads, episode_length, n_rollout_threads * episode_length, num_mini_batch)) mini_batch_size = batch_size // num_mini_batch rand = torch.randperm(batch_size).numpy() sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)] share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[2:]) obs = self.obs[:-1].reshape(-1, *self.obs.shape[2:]) rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[2:]) rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[2:]) rnn_states_cost = self.rnn_states_cost[:-1].reshape(-1, *self.rnn_states_cost.shape[2:]) actions = self.actions.reshape(-1, self.actions.shape[-1]) if self.available_actions is not None: available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1]) value_preds = self.value_preds[:-1].reshape(-1, 1) returns = self.returns[:-1].reshape(-1, 1) cost_preds = self.cost_preds[:-1].reshape(-1, 1) cost_returns = self.cost_returns[:-1].reshape(-1, 1) masks = self.masks[:-1].reshape(-1, 1) active_masks = self.active_masks[:-1].reshape(-1, 1) action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1]) # print("self.aver_episode_costs--separated--buffer", self.aver_episode_costs.mean()) aver_episode_costs = self.aver_episode_costs # self.aver_episode_costs[:-1].reshape(-1, *self.aver_episode_costs.shape[2:]) if self.factor is not None: # factor = self.factor.reshape(-1,1) factor = self.factor.reshape(-1, self.factor.shape[-1]) advantages = advantages.reshape(-1, 1) if cost_adv is not None: cost_adv = cost_adv.reshape(-1, 1) for indices in sampler: # obs size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[index,Dim] share_obs_batch = share_obs[indices] obs_batch = obs[indices] rnn_states_batch = rnn_states[indices] rnn_states_critic_batch = rnn_states_critic[indices] rnn_states_cost_batch = rnn_states_cost[indices] actions_batch = actions[indices] if self.available_actions is not None: available_actions_batch = available_actions[indices] else: available_actions_batch = None value_preds_batch = value_preds[indices] return_batch = returns[indices] cost_preds_batch = cost_preds[indices] cost_return_batch = cost_returns[indices] masks_batch = masks[indices] active_masks_batch = active_masks[indices] old_action_log_probs_batch = action_log_probs[indices] if advantages is None: adv_targ = None else: adv_targ = advantages[indices] if cost_adv is None: cost_adv_targ = None else: cost_adv_targ = cost_adv[indices] if self.factor is None: yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch else: if self.algo == "macppo": factor_batch = factor[indices] yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ elif self.algo == "mappo_lagr": factor_batch = factor[indices] yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ elif self.algo == "macpo": factor_batch = factor[indices] yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ, aver_episode_costs else: factor_batch = factor[indices] yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch def naive_recurrent_generator(self, advantages, num_mini_batch, cost_adv=None): n_rollout_threads = self.rewards.shape[1] assert n_rollout_threads >= num_mini_batch, ( "PPO requires the number of processes ({}) " "to be greater than or equal to the number of " "PPO mini batches ({}).".format(n_rollout_threads, num_mini_batch)) num_envs_per_batch = n_rollout_threads // num_mini_batch perm = torch.randperm(n_rollout_threads).numpy() for start_ind in range(0, n_rollout_threads, num_envs_per_batch): share_obs_batch = [] obs_batch = [] rnn_states_batch = [] rnn_states_critic_batch = [] rnn_states_cost_batch = [] actions_batch = [] available_actions_batch = [] value_preds_batch = [] cost_preds_batch = [] return_batch = [] cost_return_batch = [] masks_batch = [] active_masks_batch = [] old_action_log_probs_batch = [] adv_targ = [] cost_adv_targ = [] factor_batch = [] for offset in range(num_envs_per_batch): ind = perm[start_ind + offset] share_obs_batch.append(self.share_obs[:-1, ind]) obs_batch.append(self.obs[:-1, ind]) rnn_states_batch.append(self.rnn_states[0:1, ind]) rnn_states_critic_batch.append(self.rnn_states_critic[0:1, ind]) rnn_states_cost_batch.append(self.rnn_states_cost[0:1, ind]) actions_batch.append(self.actions[:, ind]) if self.available_actions is not None: available_actions_batch.append(self.available_actions[:-1, ind]) value_preds_batch.append(self.value_preds[:-1, ind]) cost_preds_batch.append(self.cost_preds[:-1, ind]) return_batch.append(self.returns[:-1, ind]) cost_return_batch.append(self.cost_returns[:-1, ind]) masks_batch.append(self.masks[:-1, ind]) active_masks_batch.append(self.active_masks[:-1, ind]) old_action_log_probs_batch.append(self.action_log_probs[:, ind]) adv_targ.append(advantages[:, ind]) if cost_adv is not None: cost_adv_targ.append(cost_adv[:, ind]) if self.factor is not None: factor_batch.append(self.factor[:, ind]) # [N[T, dim]] T, N = self.episode_length, num_envs_per_batch # These are all from_numpys of size (T, N, -1) share_obs_batch = np.stack(share_obs_batch, 1) obs_batch = np.stack(obs_batch, 1) actions_batch = np.stack(actions_batch, 1) if self.available_actions is not None: available_actions_batch = np.stack(available_actions_batch, 1) if self.factor is not None: factor_batch=np.stack(factor_batch,1) value_preds_batch = np.stack(value_preds_batch, 1) cost_preds_batch = np.stack(cost_preds_batch, 1) return_batch = np.stack(return_batch, 1) cost_return_batch = np.stack(cost_return_batch, 1) masks_batch = np.stack(masks_batch, 1) active_masks_batch = np.stack(active_masks_batch, 1) old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1) adv_targ = np.stack(adv_targ, 1) if cost_adv is not None: cost_adv_targ = np.stack(cost_adv_targ, 1) # States is just a (N, -1) from_numpy [N[1,dim]] rnn_states_batch = np.stack(rnn_states_batch, 1).reshape(N, *self.rnn_states.shape[2:]) rnn_states_critic_batch = np.stack(rnn_states_critic_batch, 1).reshape(N, *self.rnn_states_critic.shape[2:]) rnn_states_cost_batch = np.stack(rnn_states_cost_batch, 1).reshape(N, *self.rnn_states_cost.shape[2:]) # Flatten the (T, N, ...) from_numpys to (T * N, ...) share_obs_batch = _flatten(T, N, share_obs_batch) obs_batch = _flatten(T, N, obs_batch) actions_batch = _flatten(T, N, actions_batch) if self.available_actions is not None: available_actions_batch = _flatten(T, N, available_actions_batch) else: available_actions_batch = None if self.factor is not None: factor_batch=_flatten(T,N,factor_batch) value_preds_batch = _flatten(T, N, value_preds_batch) cost_preds_batch = _flatten(T, N, cost_preds_batch) return_batch = _flatten(T, N, return_batch) cost_return_batch = _flatten(T, N, cost_return_batch) masks_batch = _flatten(T, N, masks_batch) active_masks_batch = _flatten(T, N, active_masks_batch) old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch) adv_targ = _flatten(T, N, adv_targ) if cost_adv is not None: cost_adv_targ = _flatten(T, N, cost_adv_targ) if self.factor is not None: if self.algo == "mappo_lagr": yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ # 17 value elif self.algo == "macppo": yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ # 17 value elif self.algo == "macpo": yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ # 17 value else: yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch # value else: yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch # def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length, cost_adv=None): # episode_length, n_rollout_threads = self.rewards.shape[0:2] # batch_size = n_rollout_threads * episode_length # data_chunks = batch_size // data_chunk_length # [C=r*T/L] # mini_batch_size = data_chunks // num_mini_batch # # assert episode_length * n_rollout_threads >= data_chunk_length, ( # "PPO requires the number of processes ({}) * episode length ({}) " # "to be greater than or equal to the number of " # "data chunk length ({}).".format(n_rollout_threads, episode_length, data_chunk_length)) # assert data_chunks >= 2, ("need larger batch size") # # rand = torch.randperm(data_chunks).numpy() # sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)] # # if len(self.share_obs.shape) > 3: # share_obs = self.share_obs[:-1].transpose(1, 0, 2, 3, 4).reshape(-1, *self.share_obs.shape[2:]) # obs = self.obs[:-1].transpose(1, 0, 2, 3, 4).reshape(-1, *self.obs.shape[2:]) # else: # share_obs = _cast(self.share_obs[:-1]) # obs = _cast(self.obs[:-1]) # # actions = _cast(self.actions) # action_log_probs = _cast(self.action_log_probs) # advantages = _cast(advantages) # value_preds = _cast(self.value_preds[:-1]) # returns = _cast(self.returns[:-1]) # masks = _cast(self.masks[:-1]) # active_masks = _cast(self.active_masks[:-1]) # if self.factor is not None: # factor = _cast(self.factor) # # rnn_states = _cast(self.rnn_states[:-1]) # # rnn_states_critic = _cast(self.rnn_states_critic[:-1]) # rnn_states = self.rnn_states[:-1].transpose(1, 0, 2, 3).reshape(-1, *self.rnn_states.shape[2:]) # rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 0, 2, 3).reshape(-1, *self.rnn_states_critic.shape[2:]) # # if self.available_actions is not None: # available_actions = _cast(self.available_actions[:-1]) # # for indices in sampler: # share_obs_batch = [] # obs_batch = [] # rnn_states_batch = [] # rnn_states_critic_batch = [] # actions_batch = [] # available_actions_batch = [] # value_preds_batch = [] # return_batch = [] # masks_batch = [] # active_masks_batch = [] # old_action_log_probs_batch = [] # adv_targ = [] # factor_batch = [] # for index in indices: # ind = index * data_chunk_length # # size [T+1 N M Dim]-->[T N Dim]-->[N T Dim]-->[T*N,Dim]-->[L,Dim] # share_obs_batch.append(share_obs[ind:ind+data_chunk_length]) # obs_batch.append(obs[ind:ind+data_chunk_length]) # actions_batch.append(actions[ind:ind+data_chunk_length]) # if self.available_actions is not None: # available_actions_batch.append(available_actions[ind:ind+data_chunk_length]) # value_preds_batch.append(value_preds[ind:ind+data_chunk_length]) # return_batch.append(returns[ind:ind+data_chunk_length]) # masks_batch.append(masks[ind:ind+data_chunk_length]) # active_masks_batch.append(active_masks[ind:ind+data_chunk_length]) # old_action_log_probs_batch.append(action_log_probs[ind:ind+data_chunk_length]) # adv_targ.append(advantages[ind:ind+data_chunk_length]) # # size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[1,Dim] # rnn_states_batch.append(rnn_states[ind]) # rnn_states_critic_batch.append(rnn_states_critic[ind]) # if self.factor is not None: # factor_batch.append(factor[ind:ind+data_chunk_length]) # L, N = data_chunk_length, mini_batch_size # # # These are all from_numpys of size (N, L, Dim) # share_obs_batch = np.stack(share_obs_batch) # obs_batch = np.stack(obs_batch) # # actions_batch = np.stack(actions_batch) # if self.available_actions is not None: # available_actions_batch = np.stack(available_actions_batch) # if self.factor is not None: # factor_batch = np.stack(factor_batch) # value_preds_batch = np.stack(value_preds_batch) # return_batch = np.stack(return_batch) # masks_batch = np.stack(masks_batch) # active_masks_batch = np.stack(active_masks_batch) # old_action_log_probs_batch = np.stack(old_action_log_probs_batch) # adv_targ = np.stack(adv_targ) # # # States is just a (N, -1) from_numpy # rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[2:]) # rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[2:]) # # # Flatten the (L, N, ...) from_numpys to (L * N, ...) # share_obs_batch = _flatten(L, N, share_obs_batch) # obs_batch = _flatten(L, N, obs_batch) # actions_batch = _flatten(L, N, actions_batch) # if self.available_actions is not None: # available_actions_batch = _flatten(L, N, available_actions_batch) # else: # available_actions_batch = None # if self.factor is not None: # factor_batch = _flatten(L, N, factor_batch) # value_preds_batch = _flatten(L, N, value_preds_batch) # return_batch = _flatten(L, N, return_batch) # masks_batch = _flatten(L, N, masks_batch) # active_masks_batch = _flatten(L, N, active_masks_batch) # old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch) # adv_targ = _flatten(L, N, adv_targ) # if self.factor is not None: # yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch # else: # yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch ================================================ FILE: MACPO/macpo/utils/util.py ================================================ import numpy as np import math import torch def check(input): if type(input) == np.ndarray: return torch.from_numpy(input) def get_gard_norm(it): sum_grad = 0 for x in it: if x.grad is None: continue sum_grad += x.grad.norm() ** 2 return math.sqrt(sum_grad) def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr): """Decreases the learning rate linearly""" lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs))) for param_group in optimizer.param_groups: param_group['lr'] = lr def huber_loss(e, d): a = (abs(e) <= d).float() b = (e > d).float() return a*e**2/2 + b*d*(abs(e)-d/2) def mse_loss(e): return e**2/2 def get_shape_from_obs_space(obs_space): if obs_space.__class__.__name__ == 'Box': obs_shape = obs_space.shape elif obs_space.__class__.__name__ == 'list': obs_shape = obs_space else: raise NotImplementedError return obs_shape def get_shape_from_act_space(act_space): if act_space.__class__.__name__ == 'Discrete': act_shape = 1 elif act_space.__class__.__name__ == "MultiDiscrete": act_shape = act_space.shape elif act_space.__class__.__name__ == "Box": act_shape = act_space.shape[0] elif act_space.__class__.__name__ == "MultiBinary": act_shape = act_space.shape[0] else: # agar act_shape = act_space[0].shape[0] + 1 return act_shape def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3 """ img_nhwc = np.asarray(img_nhwc) N, h, w, c = img_nhwc.shape H = int(np.ceil(np.sqrt(N))) W = int(np.ceil(float(N)/H)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) img_HWhwc = img_nhwc.reshape(H, W, h, w, c) img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) return img_Hh_Ww_c ================================================ FILE: MACPO/macpo.egg-info/PKG-INFO ================================================ Metadata-Version: 2.1 Name: macpo Version: 0.1.0 Summary: macpo algorithms of marlbenchmark Home-page: UNKNOWN Author: marl Author-email: marl@gmail.com License: UNKNOWN Description: # MAPPO Chao Yu*, Akash Velu*, Eugene Vinitsky, Yu Wang, Alexandre Bayen, and Yi Wu. Website: https://sites.google.com/view/mappo This repository implements MAPPO, a multi-agent variant of PPO. The implementation in this repositorory is used in the paper "The Surprising Effectiveness of MAPPO in Cooperative Multi-Agent Games" (https://arxiv.org/abs/2103.01955). This repository is heavily based on https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail. ## Environments supported: - [StarCraftII (SMAC)](https://github.com/oxwhirl/smac) - [Hanabi](https://github.com/deepmind/hanabi-learning-environment) - [Multiagent Particle-World Environments (MPEs)](https://github.com/openai/multiagent-particle-envs) ## 1. Usage All core code is located within the onpolicy folder. The algorithms/ subfolder contains algorithm-specific code for MAPPO. * The envs/ subfolder contains environment wrapper implementations for the MPEs, SMAC, and Hanabi. * Code to perform training rollouts and policy updates are contained within the runner/ folder - there is a runner for each environment. * Executable scripts for training with default hyperparameters can be found in the scripts/ folder. The files are named in the following manner: train_algo_environment.sh. Within each file, the map name (in the case of SMAC and the MPEs) can be altered. * Python training scripts for each environment can be found in the scripts/train/ folder. * The config.py file contains relevant hyperparameter and env settings. Most hyperparameters are defaulted to the ones used in the paper; however, please refer to the appendix for a full list of hyperparameters used. ## 2. Installation Here we give an example installation on CUDA == 10.1. For non-GPU & other CUDA version installation, please refer to the [PyTorch website](https://pytorch.org/get-started/locally/). ``` Bash # create conda environment conda create -n marl python==3.6.1 conda activate marl pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html ``` ``` # install on-policy package cd on-policy pip install -e . ``` Even though we provide requirement.txt, it may have redundancy. We recommend that the user try to install other required packages by running the code and finding which required package hasn't installed yet. ### 2.1 Install StarCraftII [4.10](http://blzdistsc2-a.akamaihd.net/Linux/SC2.4.10.zip) ``` Bash unzip SC2.4.10.zip # password is iagreetotheeula echo "export SC2PATH=~/StarCraftII/" > ~/.bashrc ``` * download SMAC Maps, and move it to `~/StarCraftII/Maps/`. * To use a stableid, copy `stableid.json` from https://github.com/Blizzard/s2client-proto.git to `~/StarCraftII/`. ### 2.2 Hanabi Environment code for Hanabi is developed from the open-source environment code, but has been slightly modified to fit the algorithms used here. To install, execute the following: ``` Bash pip install cffi cd envs/hanabi mkdir build & cd build cmake .. make -j ``` ### 2.3 Install MPE ``` Bash # install this package first pip install seaborn ``` There are 3 Cooperative scenarios in MPE: * simple_spread * simple_speaker_listener, which is 'Comm' scenario in paper * simple_reference ## 3.Train Here we use train_mpe.sh as an example: ``` cd onpolicy/scripts chmod +x ./train_mpe.sh ./train_mpe.sh ``` Local results are stored in subfold scripts/results. Note that we use Weights & Bias as the default visualization platform; to use Weights & Bias, please register and login to the platform first. More instructions for using Weights&Bias can be found in the official [documentation](https://docs.wandb.ai/). Adding the `--use_wandb` in command line or in the .sh file will use Tensorboard instead of Weights & Biases. We additionally provide `./eval_hanabi_forward.sh` for evaluating the hanabi score over 100k trials. ## 4. Publication If you find this repository useful, please cite our [paper](https://arxiv.org/abs/2103.01955): ``` @misc{yu2021surprising, title={The Surprising Effectiveness of MAPPO in Cooperative Multi-Agent Games}, author={Chao Yu and Akash Velu and Eugene Vinitsky and Yu Wang and Alexandre Bayen and Yi Wu}, year={2021}, eprint={2103.01955}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` Keywords: multi-agent reinforcement learning platform pytorch Platform: UNKNOWN Classifier: Development Status :: 3 - Alpha Classifier: Intended Audience :: Science/Research Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Programming Language :: Python :: 3 Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Requires-Python: >=3.6 Description-Content-Type: text/markdown ================================================ FILE: MACPO/macpo.egg-info/SOURCES.txt ================================================ README.md setup.py macpo/__init__.py macpo/config.py macpo.egg-info/PKG-INFO macpo.egg-info/SOURCES.txt macpo.egg-info/dependency_links.txt macpo.egg-info/top_level.txt macpo/algorithms/__init__.py macpo/algorithms/r_mappo/__init__.py macpo/algorithms/r_mappo/r_mactrpo_based_cpo.py macpo/envs/__init__.py macpo/envs/env_wrappers.py macpo/envs/safety_ma_mujoco/__init__.py macpo/envs/safety_ma_mujoco/test.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/__init__.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/ant.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/half_cheetah.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/hopper.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/humanoid.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_ant.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_swimmer.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_env.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_multi.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/multiagentenv.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/obsk.py macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/__init__.py macpo/runner/__init__.py macpo/runner/separated/__init__.py macpo/runner/separated/base_runner.py macpo/runner/separated/base_runner_mactrpo_based_matrpo.py macpo/runner/separated/mujoco_runner.py macpo/runner/separated/mujoco_runner_mactrpo_based_matrpo.py macpo/scripts/__init__.py macpo/scripts/train/__init__.py macpo/scripts/train/train_mujoco.py macpo/utils/__init__.py macpo/utils/multi_discrete.py macpo/utils/popart.py macpo/utils/separated_buffer.py macpo/utils/util.py ================================================ FILE: MACPO/macpo.egg-info/dependency_links.txt ================================================ ================================================ FILE: MACPO/macpo.egg-info/top_level.txt ================================================ macpo ================================================ FILE: MACPO/setup.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import os from setuptools import setup, find_packages import setuptools def get_version() -> str: # https://packaging.python.org/guides/single-sourcing-package-version/ init = open(os.path.join("macpo", "__init__.py"), "r").read().split() return init[init.index("__version__") + 2][1:-1] setup( name="macpo", # Replace with your own username version=get_version(), description="macpo algorithms of marlbenchmark", # long_description=open("README.md", encoding="utf8").read(), long_description_content_type="text/markdown", author="marl", author_email="marl@gmail.com", packages=setuptools.find_packages(), classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development :: Libraries :: Python Modules", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], keywords="multi-agent reinforcement learning platform pytorch", python_requires='>=3.6', ) ================================================ FILE: MAPPO-Lagrangian/.gitignore ================================================ /.idea/ */__pycache__/ ================================================ FILE: MAPPO-Lagrangian/environment.yaml ================================================ name: marl channels: - defaults dependencies: - _libgcc_mutex=0.1=main - _tflow_select=2.1.0=gpu - absl-py=0.9.0=py36_0 - astor=0.8.0=py36_0 - blas=1.0=mkl - c-ares=1.15.0=h7b6447c_1001 - ca-certificates=2020.1.1=0 - certifi=2020.4.5.2=py36_0 - cudatoolkit=10.0.130=0 - cudnn=7.6.5=cuda10.0_0 - cupti=10.0.130=0 - gast=0.2.2=py36_0 - google-pasta=0.2.0=py_0 - grpcio=1.14.1=py36h9ba97e2_0 - h5py=2.10.0=py36h7918eee_0 - hdf5=1.10.4=hb1b8bf9_0 - intel-openmp=2020.1=217 - keras-applications=1.0.8=py_0 - keras-preprocessing=1.1.0=py_1 - libedit=3.1=heed3624_0 - libffi=3.2.1=hd88cf55_4 - libgcc-ng=9.1.0=hdf63c60_0 - libgfortran-ng=7.3.0=hdf63c60_0 - libprotobuf=3.12.3=hd408876_0 - libstdcxx-ng=9.1.0=hdf63c60_0 - markdown=3.1.1=py36_0 - mkl=2020.1=217 - mkl-service=2.3.0=py36he904b0f_0 - mkl_fft=1.1.0=py36h23d657b_0 - mkl_random=1.1.1=py36h0573a6f_0 - ncurses=6.0=h9df7e31_2 - numpy=1.18.1=py36h4f9e942_0 - numpy-base=1.18.1=py36hde5b4d6_1 - openssl=1.0.2u=h7b6447c_0 - opt_einsum=3.1.0=py_0 - pip=20.1.1=py36_1 - protobuf=3.12.3=py36he6710b0_0 - python=3.6.2=hca45abc_19 - readline=7.0=ha6073c6_4 - scipy=1.4.1=py36h0b6359f_0 - setuptools=47.3.0=py36_0 - six=1.15.0=py_0 - sqlite=3.23.1=he433501_0 - tensorboard=2.0.0=pyhb38c66f_1 - tensorflow=2.0.0=gpu_py36h6b29c10_0 - tensorflow-base=2.0.0=gpu_py36h0ec5d1f_0 - tensorflow-estimator=2.0.0=pyh2649769_0 - tensorflow-gpu=2.0.0=h0d30ee6_0 - termcolor=1.1.0=py36_1 - tk=8.6.8=hbc83047_0 - werkzeug=0.16.1=py_0 - wheel=0.34.2=py36_0 - wrapt=1.12.1=py36h7b6447c_1 - xz=5.2.5=h7b6447c_0 - zlib=1.2.11=h7b6447c_3 - pip: - aiohttp==3.6.2 - aioredis==1.3.1 - astunparse==1.6.3 - async-timeout==3.0.1 - atari-py==0.2.6 - atomicwrites==1.2.1 - attrs==18.2.0 - beautifulsoup4==4.9.1 - blessings==1.7 - cachetools==4.1.1 - cffi==1.14.1 - chardet==3.0.4 - click==7.1.2 - cloudpickle==1.3.0 - colorama==0.4.3 - colorful==0.5.4 - configparser==5.0.1 - contextvars==2.4 - cycler==0.10.0 - cython==0.29.21 - deepdiff==4.3.2 - dill==0.3.2 - docker-pycreds==0.4.0 - docopt==0.6.2 - fasteners==0.15 - filelock==3.0.12 - funcsigs==1.0.2 - future==0.16.0 - gin==0.1.6 - gin-config==0.3.0 - gitdb==4.0.5 - gitpython==3.1.9 - glfw==1.12.0 - google==3.0.0 - google-api-core==1.22.1 - google-auth==1.21.0 - google-auth-oauthlib==0.4.1 - googleapis-common-protos==1.52.0 - gpustat==0.6.0 - gql==0.2.0 - graphql-core==1.1 - gym==0.17.2 - hiredis==1.1.0 - idna==2.7 - idna-ssl==1.1.0 - imageio==2.4.1 - immutables==0.14 - importlib-metadata==1.7.0 - joblib==0.16.0 - jsonnet==0.16.0 - jsonpickle==0.9.6 - jsonschema==3.2.0 - kiwisolver==1.0.1 - lockfile==0.12.2 - mappo==0.0.1 - matplotlib==3.0.0 - mock==2.0.0 - monotonic==1.5 - more-itertools==4.3.0 - mpi4py==3.0.3 - mpyq==0.2.5 - msgpack==1.0.0 - mujoco-py==2.0.2.13 - mujoco-worldgen==0.0.0 - multidict==4.7.6 - munch==2.3.2 - nvidia-ml-py3==7.352.0 - oauthlib==3.1.0 - opencensus==0.7.10 - opencensus-context==0.1.1 - opencv-python==4.2.0.34 - ordered-set==4.0.2 - packaging==20.4 - pandas==1.1.1 - pathlib2==2.3.2 - pathtools==0.1.2 - pbr==4.3.0 - pillow==5.3.0 - pluggy==0.7.1 - portpicker==1.2.0 - probscale==0.2.3 - progressbar2==3.53.1 - prometheus-client==0.8.0 - promise==2.3 - psutil==5.7.2 - py==1.6.0 - py-spy==0.3.3 - pyasn1==0.4.8 - pyasn1-modules==0.2.8 - pycparser==2.20 - pygame==1.9.4 - pyglet==1.5.0 - pyopengl==3.1.5 - pyopengl-accelerate==3.1.5 - pyparsing==2.2.2 - pyrsistent==0.16.0 - pysc2==3.0.0 - pytest==3.8.2 - python-dateutil==2.7.3 - python-utils==2.4.0 - pytz==2020.1 - pyyaml==3.13 - pyzmq==19.0.2 - ray==0.8.0 - redis==3.4.1 - requests==2.24.0 - requests-oauthlib==1.3.0 - rsa==4.6 - s2clientprotocol==4.10.1.75800.0 - s2protocol==4.11.4.78285.0 - sacred==0.7.2 - seaborn==0.10.1 - sentry-sdk==0.18.0 - shortuuid==1.0.1 - sk-video==1.1.10 - smmap==3.0.4 - snakeviz==1.0.0 - soupsieve==2.0.1 - subprocess32==3.5.4 - tabulate==0.8.7 - tensorboard-logger==0.1.0 - tensorboard-plugin-wit==1.7.0 - tensorboardx==2.0 - torch==1.5.1+cu101 - torchvision==0.6.1+cu101 - tornado==5.1.1 - tqdm==4.48.2 - typing-extensions==3.7.4.3 - urllib3==1.23 - wandb==0.10.5 - watchdog==0.10.3 - websocket-client==0.53.0 - whichcraft==0.5.2 - xmltodict==0.12.0 - yarl==1.5.1 - zipp==3.1.0 - zmq==0.0.0 ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/__init__.py ================================================ from mappo_lagrangian import algorithms, envs, runner, scripts, utils, config __version__ = "0.1.0" __all__ = [ "algorithms", "envs", "runner", "scripts", "utils", "config", ] ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/__init__.py ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/__init__.py ================================================ def cost_trpo_macppo(): return None ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/algorithm/MACPPOPolicy.py ================================================ import torch from mappo_lagrangian.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic from mappo_lagrangian.utils.util import update_linear_schedule class MACPPOPolicy: """ Safe MAPPO Policy class. Wraps actor and critic networks to compute actions and value function predictions. :param args: (argparse.Namespace) arguments containing relevant model and policy information. :param obs_space: (gym.Space) observation space. :param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO). :param action_space: (gym.Space) action space. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device("cpu")): self.device = device self.lr = args.lr self.critic_lr = args.critic_lr self.opti_eps = args.opti_eps self.weight_decay = args.weight_decay self.obs_space = obs_space self.share_obs_space = cent_obs_space self.act_space = act_space self.actor = R_Actor(args, self.obs_space, self.act_space, self.device) self.critic = R_Critic(args, self.share_obs_space, self.device) self.cost_critic = R_Critic(args, self.share_obs_space, self.device) self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr, eps=self.opti_eps, weight_decay=self.weight_decay) self.cost_optimizer = torch.optim.Adam(self.cost_critic.parameters(), lr=self.critic_lr, eps=self.opti_eps, weight_decay=self.weight_decay) def lr_decay(self, episode, episodes): """ Decay the actor and critic learning rates. :param episode: (int) current training episode. :param episodes: (int) total number of training episodes. """ update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr) update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr) update_linear_schedule(self.cost_optimizer, episode, episodes, self.critic_lr) def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False, rnn_states_cost=None): """ Compute actions and value function predictions for the given inputs. :param cent_obs (np.ndarray): centralized input to the critic. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether the action should be mode of distribution or should be sampled. :return values: (torch.Tensor) value function predictions. :return actions: (torch.Tensor) actions to take. :return action_log_probs: (torch.Tensor) log probabilities of chosen actions. :return rnn_states_actor: (torch.Tensor) updated actor network RNN states. :return rnn_states_critic: (torch.Tensor) updated critic network RNN states. """ actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic) values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks) if rnn_states_cost is None: return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic else: cost_preds, rnn_states_cost = self.cost_critic(cent_obs, rnn_states_cost, masks) return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic, cost_preds, rnn_states_cost def get_values(self, cent_obs, rnn_states_critic, masks): """ Get value function predictions. :param cent_obs (np.ndarray): centralized input to the critic. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :return values: (torch.Tensor) value function predictions. """ values, _ = self.critic(cent_obs, rnn_states_critic, masks) return values def get_cost_values(self, cent_obs, rnn_states_cost, masks): """ Get constraint cost predictions. :param cent_obs (np.ndarray): centralized input to the critic. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :return values: (torch.Tensor) value function predictions. """ cost_preds, _ = self.cost_critic(cent_obs, rnn_states_cost, masks) return cost_preds def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks, available_actions=None, active_masks=None, rnn_states_cost=None): """ Get action logprobs / entropy and value function predictions for actor update. :param cent_obs (np.ndarray): centralized input to the critic. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param action: (np.ndarray) actions whose log probabilites and entropy to compute. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return values: (torch.Tensor) value function predictions. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ action_log_probs, dist_entropy = self.actor.evaluate_actions(obs, rnn_states_actor, action, masks, available_actions, active_masks) values, _ = self.critic(cent_obs, rnn_states_critic, masks) if rnn_states_cost is None: return values, action_log_probs, dist_entropy else: cost_values, _ = self.cost_critic(cent_obs, rnn_states_cost, masks) return values, action_log_probs, dist_entropy, cost_values def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False): """ Compute actions using the given inputs. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether the action should be mode of distribution or should be sampled. """ actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic) return actions, rnn_states_actor ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/algorithm/rMAPPOPolicy.py ================================================ import torch from mappo_lagrangian.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic from mappo_lagrangian.utils.util import update_linear_schedule class R_MAPPOPolicy: """ MAPPO Policy class. Wraps actor and critic networks to compute actions and value function predictions. :param args: (argparse.Namespace) arguments containing relevant model and policy information. :param obs_space: (gym.Space) observation space. :param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO). :param action_space: (gym.Space) action space. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device("cpu")): self.device = device self.lr = args.lr self.critic_lr = args.critic_lr self.opti_eps = args.opti_eps self.weight_decay = args.weight_decay self.obs_space = obs_space self.share_obs_space = cent_obs_space self.act_space = act_space self.actor = R_Actor(args, self.obs_space, self.act_space, self.device) self.critic = R_Critic(args, self.share_obs_space, self.device) self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.critic_lr, eps=self.opti_eps, weight_decay=self.weight_decay) def lr_decay(self, episode, episodes): """ Decay the actor and critic learning rates. :param episode: (int) current training episode. :param episodes: (int) total number of training episodes. """ update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr) update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr) def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False): """ Compute actions and value function predictions for the given inputs. :param cent_obs (np.ndarray): centralized input to the critic. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether the action should be mode of distribution or should be sampled. :return values: (torch.Tensor) value function predictions. :return actions: (torch.Tensor) actions to take. :return action_log_probs: (torch.Tensor) log probabilities of chosen actions. :return rnn_states_actor: (torch.Tensor) updated actor network RNN states. :return rnn_states_critic: (torch.Tensor) updated critic network RNN states. """ actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic) values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks) return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic def get_values(self, cent_obs, rnn_states_critic, masks): """ Get value function predictions. :param cent_obs (np.ndarray): centralized input to the critic. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :return values: (torch.Tensor) value function predictions. """ values, _ = self.critic(cent_obs, rnn_states_critic, masks) return values def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks, available_actions=None, active_masks=None): """ Get action logprobs / entropy and value function predictions for actor update. :param cent_obs (np.ndarray): centralized input to the critic. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic. :param action: (np.ndarray) actions whose log probabilites and entropy to compute. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return values: (torch.Tensor) value function predictions. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ action_log_probs, dist_entropy = self.actor.evaluate_actions(obs, rnn_states_actor, action, masks, available_actions, active_masks) values, _ = self.critic(cent_obs, rnn_states_critic, masks) return values, action_log_probs, dist_entropy def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False): """ Compute actions using the given inputs. :param obs (np.ndarray): local agent inputs to the actor. :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor. :param masks: (np.ndarray) denotes points at which RNN states should be reset. :param available_actions: (np.ndarray) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether the action should be mode of distribution or should be sampled. """ actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic) return actions, rnn_states_actor ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/algorithm/r_actor_critic.py ================================================ import torch import torch.nn as nn from mappo_lagrangian.algorithms.utils.util import init, check from mappo_lagrangian.algorithms.utils.cnn import CNNBase from mappo_lagrangian.algorithms.utils.mlp import MLPBase from mappo_lagrangian.algorithms.utils.rnn import RNNLayer from mappo_lagrangian.algorithms.utils.act import ACTLayer from mappo_lagrangian.utils.util import get_shape_from_obs_space class R_Actor(nn.Module): """ Actor network class for MAPPO. Outputs actions given observations. :param args: (argparse.Namespace) arguments containing relevant model information. :param obs_space: (gym.Space) observation space. :param action_space: (gym.Space) action space. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, obs_space, action_space, device=torch.device("cpu")): super(R_Actor, self).__init__() self.hidden_size = args.hidden_size self._gain = args.gain self._use_orthogonal = args.use_orthogonal self._use_policy_active_masks = args.use_policy_active_masks self._use_naive_recurrent_policy = args.use_naive_recurrent_policy self._use_recurrent_policy = args.use_recurrent_policy self._recurrent_N = args.recurrent_N self.tpdv = dict(dtype=torch.float32, device=device) obs_shape = get_shape_from_obs_space(obs_space) base = CNNBase if len(obs_shape) == 3 else MLPBase self.base = base(args, obs_shape) if self._use_naive_recurrent_policy or self._use_recurrent_policy: self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal) self.act = ACTLayer(action_space, self.hidden_size, self._use_orthogonal, self._gain, args) self.to(device) def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False): """ Compute actions from the given inputs. :param obs: (np.ndarray / torch.Tensor) observation inputs into network. :param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN. :param masks: (np.ndarray / torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros. :param available_actions: (np.ndarray / torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether to sample from action distribution or return the mode. :return actions: (torch.Tensor) actions to take. :return action_log_probs: (torch.Tensor) log probabilities of taken actions. :return rnn_states: (torch.Tensor) updated RNN hidden states. """ obs = check(obs).to(**self.tpdv) rnn_states = check(rnn_states).to(**self.tpdv) masks = check(masks).to(**self.tpdv) if available_actions is not None: available_actions = check(available_actions).to(**self.tpdv) actor_features = self.base(obs) if self._use_naive_recurrent_policy or self._use_recurrent_policy: actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks) actions, action_log_probs = self.act(actor_features, available_actions, deterministic) return actions, action_log_probs, rnn_states def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None): """ Compute log probability and entropy of given actions. :param obs: (torch.Tensor) observation inputs into network. :param action: (torch.Tensor) actions whose entropy and log probability to evaluate. :param rnn_states: (torch.Tensor) if RNN network, hidden states for RNN. :param masks: (torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ obs = check(obs).to(**self.tpdv) rnn_states = check(rnn_states).to(**self.tpdv) action = check(action).to(**self.tpdv) masks = check(masks).to(**self.tpdv) if available_actions is not None: available_actions = check(available_actions).to(**self.tpdv) if active_masks is not None: active_masks = check(active_masks).to(**self.tpdv) actor_features = self.base(obs) if self._use_naive_recurrent_policy or self._use_recurrent_policy: actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks) action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features, action, available_actions, active_masks= active_masks if self._use_policy_active_masks else None) return action_log_probs, dist_entropy class R_Critic(nn.Module): """ Critic network class for MAPPO. Outputs value function predictions given centralized input (MAPPO) or local observations (IPPO). :param args: (argparse.Namespace) arguments containing relevant model information. :param cent_obs_space: (gym.Space) (centralized) observation space. :param device: (torch.device) specifies the device to run on (cpu/gpu). """ def __init__(self, args, cent_obs_space, device=torch.device("cpu")): super(R_Critic, self).__init__() self.hidden_size = args.hidden_size self._use_orthogonal = args.use_orthogonal self._use_naive_recurrent_policy = args.use_naive_recurrent_policy self._use_recurrent_policy = args.use_recurrent_policy self._recurrent_N = args.recurrent_N self.tpdv = dict(dtype=torch.float32, device=device) init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal] cent_obs_shape = get_shape_from_obs_space(cent_obs_space) base = CNNBase if len(cent_obs_shape) == 3 else MLPBase self.base = base(args, cent_obs_shape) if self._use_naive_recurrent_policy or self._use_recurrent_policy: self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal) def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0)) self.v_out = init_(nn.Linear(self.hidden_size, 1)) self.to(device) def forward(self, cent_obs, rnn_states, masks): """ Compute actions from the given inputs. :param cent_obs: (np.ndarray / torch.Tensor) observation inputs into network. :param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN. :param masks: (np.ndarray / torch.Tensor) mask tensor denoting if RNN states should be reinitialized to zeros. :return values: (torch.Tensor) value function predictions. :return rnn_states: (torch.Tensor) updated RNN hidden states. """ cent_obs = check(cent_obs).to(**self.tpdv) rnn_states = check(rnn_states).to(**self.tpdv) masks = check(masks).to(**self.tpdv) critic_features = self.base(cent_obs) if self._use_naive_recurrent_policy or self._use_recurrent_policy: critic_features, rnn_states = self.rnn(critic_features, rnn_states, masks) values = self.v_out(critic_features) return values, rnn_states ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/r_mappo_lagr.py ================================================ import numpy as np import torch import torch.nn as nn from mappo_lagrangian.utils.util import get_gard_norm, huber_loss, mse_loss from mappo_lagrangian.utils.popart import PopArt from mappo_lagrangian.algorithms.utils.util import check class R_MAPPO_Lagr: """ Trainer class for MAPPO-L to update policies. :param args: (argparse.Namespace) arguments containing relevant model, policy, and env information. :param policy: (R_MAPPO_Policy) policy to update. :param device: (torch.device) specifies the device to run on (cpu/gpu). :param precompute: Use an 'input' for the linearization constant instead of true_linear_leq_constraint. If present, overrides surrogate When using precompute, the last input is the precomputed linearization constant :param attempt_(in)feasible_recovery: deals with cases where x=0 is infeasible point but problem still feasible (where optimization problem is entirely infeasible) :param revert_to_last_safe_point: Behavior protocol for situation when optimization problem is entirely infeasible. Specifies that we should just reset the parameters to the last point that satisfied constraint. """ def __init__(self, args, policy, hvp_approach=None, attempt_feasible_recovery=False, attempt_infeasible_recovery=False, revert_to_last_safe_point=False, delta_bound=0.02, safety_bound=10, _backtrack_ratio=0.8, _max_backtracks=15, _constraint_name_1="trust_region", _constraint_name_2="safety_region", linesearch_infeasible_recovery=True, accept_violation=False, device=torch.device("cpu")): self.args = args self.device = device self.tpdv = dict(dtype=torch.float32, device=device) self.policy = policy # todo hyper parameters for compute hessian self._damping = 0.00001 self.clip_param = args.clip_param self.ppo_epoch = args.ppo_epoch self.num_mini_batch = args.num_mini_batch self.data_chunk_length = args.data_chunk_length self.value_loss_coef = args.value_loss_coef self.entropy_coef = args.entropy_coef self.max_grad_norm = args.max_grad_norm self.huber_delta = args.huber_delta self.gamma = args.gamma self._use_recurrent_policy = args.use_recurrent_policy self._use_naive_recurrent = args.use_naive_recurrent_policy self._use_max_grad_norm = args.use_max_grad_norm self._use_clipped_value_loss = args.use_clipped_value_loss self._use_huber_loss = args.use_huber_loss self._use_popart = args.use_popart self._use_value_active_masks = args.use_value_active_masks self._use_policy_active_masks = args.use_policy_active_masks self.attempt_feasible_recovery = attempt_feasible_recovery self.attempt_infeasible_recovery = attempt_infeasible_recovery self.revert_to_last_safe_point = revert_to_last_safe_point num_slices = 1 self._max_quad_constraint_val = delta_bound self._max_lin_constraint_val = safety_bound self._backtrack_ratio = _backtrack_ratio self._max_backtracks = _max_backtracks self._constraint_name_1 = _constraint_name_1 self._constraint_name_2 = _constraint_name_2 self._linesearch_infeasible_recovery = linesearch_infeasible_recovery self._accept_violation = accept_violation self.lagrangian_coef = args.lagrangian_coef_rate # lagrangian_coef self.lamda_lagr = args.lamda_lagr # 0.78 self.safety_bound = args.safety_bound # 0.2 Ant self._hvp_approach = hvp_approach if self._use_popart: self.value_normalizer = PopArt(1, device=self.device) else: self.value_normalizer = None def cal_value_loss(self, values, value_preds_batch, return_batch, active_masks_batch): """ Calculate value function loss. :param values: (torch.Tensor) value function predictions. :param value_preds_batch: (torch.Tensor) "old" value predictions from data batch (used for value clip loss) :param return_batch: (torch.Tensor) reward to go returns. :param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep. :return value_loss: (torch.Tensor) value function loss. """ if self._use_popart: value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param, self.clip_param) error_clipped = self.value_normalizer(return_batch) - value_pred_clipped error_original = self.value_normalizer(return_batch) - values else: value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param, self.clip_param) error_clipped = return_batch - value_pred_clipped error_original = return_batch - values if self._use_huber_loss: value_loss_clipped = huber_loss(error_clipped, self.huber_delta) value_loss_original = huber_loss(error_original, self.huber_delta) else: value_loss_clipped = mse_loss(error_clipped) value_loss_original = mse_loss(error_original) if self._use_clipped_value_loss: value_loss = torch.max(value_loss_original, value_loss_clipped) else: value_loss = value_loss_original if self._use_value_active_masks: value_loss = (value_loss * active_masks_batch).sum() / active_masks_batch.sum() else: value_loss = value_loss.mean() return value_loss def _get_flat_grad(self, y: torch.Tensor, model: nn.Module, **kwargs) -> torch.Tensor: # caculate first order gradient of kl with respect to theta grads = torch.autograd.grad(y, model.parameters(), **kwargs, allow_unused=True) # type: ignore # a = torch.where(grads.dtype = None, zero, grads)) _grads = [] for val in grads: if val != None: _grads.append(val); return torch.cat([grad.reshape(-1) for grad in _grads]) def _conjugate_gradients(self, b: torch.Tensor, flat_kl_grad: torch.Tensor, nsteps: int = 10, residual_tol: float = 1e-10) -> torch.Tensor: x = torch.zeros_like(b) r, p = b.clone(), b.clone() # Note: should be 'r, p = b - MVP(x)', but for x=0, MVP(x)=0. # Change if doing warm start. rdotr = r.dot(r) for i in range(nsteps): z = self.cal_second_hessian(p, flat_kl_grad) alpha = rdotr / p.dot(z) x += alpha * p r -= alpha * z new_rdotr = r.dot(r) if new_rdotr < residual_tol: break p = r + new_rdotr / rdotr * p rdotr = new_rdotr return x def cal_second_hessian(self, v: torch.Tensor, flat_kl_grad: torch.Tensor) -> torch.Tensor: """Matrix vector product.""" # caculate second order gradient of kl with respect to theta kl_v = (flat_kl_grad * v).sum() flat_kl_grad_grad = self._get_flat_grad( kl_v, self.policy.actor, retain_graph=True).detach() return flat_kl_grad_grad + v * self._damping def _set_from_flat_params(self, model: nn.Module, flat_params: torch.Tensor) -> nn.Module: prev_ind = 0 for param in model.parameters(): flat_size = int(np.prod(list(param.size()))) param.data.copy_( flat_params[prev_ind:prev_ind + flat_size].view(param.size())) prev_ind += flat_size return model def ppo_update(self, sample, update_actor=True, precomputed_eval=None, precomputed_threshold=None, diff_threshold=False): """ Update actor and critic networks. :param sample: (Tuple) contains data batch with which to update networks. :update_actor: (bool) whether to update actor network. :return value_loss: (torch.Tensor) value function loss. :return critic_grad_norm: (torch.Tensor) gradient norm from critic update. ;return policy_loss: (torch.Tensor) actor(policy) loss value. :return dist_entropy: (torch.Tensor) action entropies. :return actor_grad_norm: (torch.Tensor) gradient norm from actor update. :return imp_weights: (torch.Tensor) importance sampling weights. :param precompute: Use an 'input' for the linearization constant instead of true_linear_leq_constraint. If present, overrides surrogate When using precompute, the last input is the precomputed linearization constant :param attempt_(in)feasible_recovery: deals with cases where x=0 is infeasible point but problem still feasible (where optimization problem is entirely infeasible) :param revert_to_last_safe_point: Behavior protocol for situation when optimization problem is entirely infeasible. Specifies that we should just reset the parameters to the last point that satisfied constraint. precomputed_eval : The value of the safety constraint at theta = theta_old. Provide this when the lin_constraint function is a surrogate, and evaluating it at theta_old will not give you the correct value. precomputed_threshold & diff_threshold : These relate to the linesearch that is used to ensure constraint satisfaction. If the lin_constraint function is indeed the safety constraint function, then it suffices to check that lin_constraint < max_lin_constraint_val to ensure satisfaction. But if the lin_constraint function is a surrogate - ie, it only has the same /gradient/ as the safety constraint - then the threshold we check it against has to be adjusted. You can provide a fixed adjusted threshold via "precomputed_threshold." When "diff_threshold" == True, instead of checking lin_constraint < threshold, it will check lin_constraint - old_lin_constraint < threshold. """ share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, \ value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \ adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_returns_barch, rnn_states_cost_batch, \ cost_adv_targ, aver_episode_costs = sample old_action_log_probs_batch = check(old_action_log_probs_batch).to(**self.tpdv) adv_targ = check(adv_targ).to(**self.tpdv) cost_adv_targ = check(cost_adv_targ).to(**self.tpdv) value_preds_batch = check(value_preds_batch).to(**self.tpdv) return_batch = check(return_batch).to(**self.tpdv) active_masks_batch = check(active_masks_batch).to(**self.tpdv) factor_batch = check(factor_batch).to(**self.tpdv) cost_returns_barch = check(cost_returns_barch).to(**self.tpdv) cost_preds_batch = check(cost_preds_batch).to(**self.tpdv) # Reshape to do in a single forward pass for all steps values, action_log_probs, dist_entropy, cost_values = self.policy.evaluate_actions(share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, masks_batch, available_actions_batch, active_masks_batch, rnn_states_cost_batch) # todo: lagrangian coef adv_targ_hybrid = adv_targ - self.lamda_lagr*cost_adv_targ # todo: lagrangian actor update step # actor update imp_weights = torch.exp(action_log_probs - old_action_log_probs_batch) surr1 = imp_weights * adv_targ_hybrid surr2 = torch.clamp(imp_weights, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ_hybrid if self._use_policy_active_masks: policy_action_loss = (-torch.sum(factor_batch * torch.min(surr1, surr2), dim=-1, keepdim=True) * active_masks_batch).sum() / active_masks_batch.sum() else: policy_action_loss = -torch.sum(factor_batch * torch.min(surr1, surr2), dim=-1, keepdim=True).mean() policy_loss = policy_action_loss self.policy.actor_optimizer.zero_grad() if update_actor: (policy_loss - dist_entropy * self.entropy_coef).backward() if self._use_max_grad_norm: actor_grad_norm = nn.utils.clip_grad_norm_(self.policy.actor.parameters(), self.max_grad_norm) else: actor_grad_norm = get_gard_norm(self.policy.actor.parameters()) self.policy.actor_optimizer.step() # todo: update lamda_lagr delta_lamda_lagr = -(( aver_episode_costs.mean() - self.safety_bound) * (1 - self.gamma) + (imp_weights * cost_adv_targ)).mean().detach() R_Relu = torch.nn.ReLU() new_lamda_lagr = R_Relu(self.lamda_lagr - (delta_lamda_lagr * self.lagrangian_coef)) self.lamda_lagr = new_lamda_lagr # todo: reward critic update value_loss = self.cal_value_loss(values, value_preds_batch, return_batch, active_masks_batch) self.policy.critic_optimizer.zero_grad() (value_loss * self.value_loss_coef).backward() if self._use_max_grad_norm: critic_grad_norm = nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.max_grad_norm) else: critic_grad_norm = get_gard_norm(self.policy.critic.parameters()) self.policy.critic_optimizer.step() # todo: cost critic update cost_loss = self.cal_value_loss(cost_values, cost_preds_batch, cost_returns_barch, active_masks_batch) self.policy.cost_optimizer.zero_grad() (cost_loss * self.value_loss_coef).backward() if self._use_max_grad_norm: cost_grad_norm = nn.utils.clip_grad_norm_(self.policy.cost_critic.parameters(), self.max_grad_norm) else: cost_grad_norm = get_gard_norm(self.policy.cost_critic.parameters()) self.policy.cost_optimizer.step() return value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights, cost_loss, cost_grad_norm def train(self, buffer, update_actor=True): """ Perform a training update using minibatch GD. :param buffer: (SharedReplayBuffer) buffer containing training data. :param update_actor: (bool) whether to update actor network. :return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc). """ if self._use_popart: advantages = buffer.returns[:-1] - self.value_normalizer.denormalize(buffer.value_preds[:-1]) else: advantages = buffer.returns[:-1] - buffer.value_preds[:-1] advantages_copy = advantages.copy() advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan mean_advantages = np.nanmean(advantages_copy) std_advantages = np.nanstd(advantages_copy) advantages = (advantages - mean_advantages) / (std_advantages + 1e-5) if self._use_popart: cost_adv = buffer.cost_returns[:-1] - self.value_normalizer.denormalize(buffer.cost_preds[:-1]) else: cost_adv = buffer.cost_returns[:-1] - buffer.cost_preds[:-1] cost_adv_copy = cost_adv.copy() cost_adv_copy[buffer.active_masks[:-1] == 0.0] = np.nan mean_cost_adv = np.nanmean(cost_adv_copy) std_cost_adv = np.nanstd(cost_adv_copy) cost_adv = (cost_adv - mean_cost_adv) / (std_cost_adv + 1e-5) train_info = {} train_info['value_loss'] = 0 train_info['policy_loss'] = 0 train_info['dist_entropy'] = 0 train_info['actor_grad_norm'] = 0 train_info['critic_grad_norm'] = 0 train_info['ratio'] = 0 train_info['cost_grad_norm'] = 0 train_info['cost_loss'] = 0 for _ in range(self.ppo_epoch): if self._use_naive_recurrent: data_generator = buffer.naive_recurrent_generator(advantages, self.num_mini_batch, cost_adv) else: data_generator = buffer.feed_forward_generator(advantages, self.num_mini_batch, cost_adv=cost_adv) for sample in data_generator: value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights, cost_loss, cost_grad_norm \ = self.ppo_update(sample, update_actor, precomputed_threshold=None, diff_threshold=False) train_info['value_loss'] += value_loss.item() train_info['policy_loss'] += policy_loss.item() train_info['dist_entropy'] += dist_entropy.item() train_info['actor_grad_norm'] += actor_grad_norm train_info['critic_grad_norm'] += critic_grad_norm train_info['ratio'] += imp_weights.mean() train_info['cost_loss'] += cost_loss.item() train_info['cost_grad_norm'] += cost_grad_norm num_updates = self.ppo_epoch * self.num_mini_batch for k in train_info.keys(): train_info[k] /= num_updates return train_info def prep_training(self): self.policy.actor.train() self.policy.critic.train() self.policy.cost_critic.train() def prep_rollout(self): self.policy.actor.eval() self.policy.critic.eval() self.policy.cost_critic.eval() ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/act.py ================================================ from .distributions import Bernoulli, Categorical, DiagGaussian import torch import torch.nn as nn class ACTLayer(nn.Module): """ MLP Module to compute actions. :param action_space: (gym.Space) action space. :param inputs_dim: (int) dimension of network input. :param use_orthogonal: (bool) whether to use orthogonal initialization. :param gain: (float) gain of the output layer of the network. """ def __init__(self, action_space, inputs_dim, use_orthogonal, gain, args=None): super(ACTLayer, self).__init__() self.mixed_action = False self.multi_discrete = False if action_space.__class__.__name__ == "Discrete": action_dim = action_space.n self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain) elif action_space.__class__.__name__ == "Box": action_dim = action_space.shape[0] self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain, args) elif action_space.__class__.__name__ == "MultiBinary": action_dim = action_space.shape[0] self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain) elif action_space.__class__.__name__ == "MultiDiscrete": self.multi_discrete = True action_dims = action_space.high - action_space.low + 1 self.action_outs = [] for action_dim in action_dims: self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain)) self.action_outs = nn.ModuleList(self.action_outs) else: # discrete + continous self.mixed_action = True continous_dim = action_space[0].shape[0] discrete_dim = action_space[1].n self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain, args), Categorical(inputs_dim, discrete_dim, use_orthogonal, gain)]) def forward(self, x, available_actions=None, deterministic=False): """ Compute actions and action logprobs from given input. :param x: (torch.Tensor) input to network. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param deterministic: (bool) whether to sample from action distribution or return the mode. :return actions: (torch.Tensor) actions to take. :return action_log_probs: (torch.Tensor) log probabilities of taken actions. """ if self.mixed_action : actions = [] action_log_probs = [] for action_out in self.action_outs: action_logit = action_out(x) action = action_logit.mode() if deterministic else action_logit.sample() action_log_prob = action_logit.log_probs(action) actions.append(action.float()) action_log_probs.append(action_log_prob) actions = torch.cat(actions, -1) action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True) elif self.multi_discrete: actions = [] action_log_probs = [] for action_out in self.action_outs: action_logit = action_out(x) action = action_logit.mode() if deterministic else action_logit.sample() action_log_prob = action_logit.log_probs(action) actions.append(action) action_log_probs.append(action_log_prob) actions = torch.cat(actions, -1) action_log_probs = torch.cat(action_log_probs, -1) else: action_logits = self.action_out(x, available_actions) actions = action_logits.mode() if deterministic else action_logits.sample() action_log_probs = action_logits.log_probs(actions) return actions, action_log_probs def get_probs(self, x, available_actions=None): """ Compute action probabilities from inputs. :param x: (torch.Tensor) input to network. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :return action_probs: (torch.Tensor) """ if self.mixed_action or self.multi_discrete: action_probs = [] for action_out in self.action_outs: action_logit = action_out(x) action_prob = action_logit.probs action_probs.append(action_prob) action_probs = torch.cat(action_probs, -1) else: action_logits = self.action_out(x, available_actions) action_probs = action_logits.probs return action_probs def evaluate_actions(self, x, action, available_actions=None, active_masks=None): """ Compute log probability and entropy of given actions. :param x: (torch.Tensor) input to network. :param action: (torch.Tensor) actions whose entropy and log probability to evaluate. :param available_actions: (torch.Tensor) denotes which actions are available to agent (if None, all actions available) :param active_masks: (torch.Tensor) denotes whether an agent is active or dead. :return action_log_probs: (torch.Tensor) log probabilities of the input actions. :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs. """ if self.mixed_action: a, b = action.split((2, 1), -1) b = b.long() action = [a, b] action_log_probs = [] dist_entropy = [] for action_out, act in zip(self.action_outs, action): action_logit = action_out(x) action_log_probs.append(action_logit.log_probs(act)) if active_masks is not None: if len(action_logit.entropy().shape) == len(active_masks.shape): dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum()) else: dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum()) else: dist_entropy.append(action_logit.entropy().mean()) action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True) dist_entropy = dist_entropy[0] / 2.0 + dist_entropy[1] / 0.98 #! dosen't make sense elif self.multi_discrete: action = torch.transpose(action, 0, 1) action_log_probs = [] dist_entropy = [] for action_out, act in zip(self.action_outs, action): action_logit = action_out(x) action_log_probs.append(action_logit.log_probs(act)) if active_masks is not None: dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()) else: dist_entropy.append(action_logit.entropy().mean()) action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong dist_entropy = torch.tensor(dist_entropy).mean() else: action_logits = self.action_out(x, available_actions) action_log_probs = action_logits.log_probs(action) if active_masks is not None: dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum() # dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum() else: dist_entropy = action_logits.entropy().mean() return action_log_probs, dist_entropy ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/cnn.py ================================================ import torch.nn as nn from .util import init """CNN Modules and utils.""" class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class CNNLayer(nn.Module): def __init__(self, obs_shape, hidden_size, use_orthogonal, use_ReLU, kernel_size=3, stride=1): super(CNNLayer, self).__init__() active_func = [nn.Tanh(), nn.ReLU()][use_ReLU] init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU]) def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain) input_channel = obs_shape[0] input_width = obs_shape[1] input_height = obs_shape[2] self.cnn = nn.Sequential( init_(nn.Conv2d(in_channels=input_channel, out_channels=hidden_size // 2, kernel_size=kernel_size, stride=stride) ), active_func, Flatten(), init_(nn.Linear(hidden_size // 2 * (input_width - kernel_size + stride) * (input_height - kernel_size + stride), hidden_size) ), active_func, init_(nn.Linear(hidden_size, hidden_size)), active_func) def forward(self, x): x = x / 255.0 x = self.cnn(x) return x class CNNBase(nn.Module): def __init__(self, args, obs_shape): super(CNNBase, self).__init__() self._use_orthogonal = args.use_orthogonal self._use_ReLU = args.use_ReLU self.hidden_size = args.hidden_size self.cnn = CNNLayer(obs_shape, self.hidden_size, self._use_orthogonal, self._use_ReLU) def forward(self, x): x = self.cnn(x) return x ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/distributions.py ================================================ import torch import torch.nn as nn from .util import init """ Modify standard PyTorch distributions so they to make compatible with this codebase. """ # # Standardize distribution interfaces # # Categorical class FixedCategorical(torch.distributions.Categorical): def sample(self): return super().sample().unsqueeze(-1) def log_probs(self, actions): return ( super() .log_prob(actions.squeeze(-1)) .view(actions.size(0), -1) .sum(-1) .unsqueeze(-1) ) def mode(self): return self.probs.argmax(dim=-1, keepdim=True) # Normal class FixedNormal(torch.distributions.Normal): def log_probs(self, actions): return super().log_prob(actions) # return super().log_prob(actions).sum(-1, keepdim=True) def entrop(self): return super.entropy().sum(-1) def mode(self): return self.mean # Bernoulli class FixedBernoulli(torch.distributions.Bernoulli): def log_probs(self, actions): return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1) def entropy(self): return super().entropy().sum(-1) def mode(self): return torch.gt(self.probs, 0.5).float() class Categorical(nn.Module): def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01): super(Categorical, self).__init__() init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain) self.linear = init_(nn.Linear(num_inputs, num_outputs)) def forward(self, x, available_actions=None): x = self.linear(x) if available_actions is not None: x[available_actions == 0] = -1e10 return FixedCategorical(logits=x) # class DiagGaussian(nn.Module): # def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01): # super(DiagGaussian, self).__init__() # # init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] # def init_(m): # return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain) # # self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) # self.logstd = AddBias(torch.zeros(num_outputs)) # # def forward(self, x, available_actions=None): # action_mean = self.fc_mean(x) # # # An ugly hack for my KFAC implementation. # zeros = torch.zeros(action_mean.size()) # if x.is_cuda: # zeros = zeros.cuda() # # action_logstd = self.logstd(zeros) # return FixedNormal(action_mean, action_logstd.exp()) class DiagGaussian(nn.Module): def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01, args=None): super(DiagGaussian, self).__init__() init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain) if args is not None: self.std_x_coef = args.std_x_coef self.std_y_coef = args.std_y_coef else: self.std_x_coef = 1. self.std_y_coef = 0.5 self.fc_mean = init_(nn.Linear(num_inputs, num_outputs)) log_std = torch.ones(num_outputs) * self.std_x_coef self.log_std = torch.nn.Parameter(log_std) def forward(self, x, available_actions=None): action_mean = self.fc_mean(x) action_std = torch.sigmoid(self.log_std / self.std_x_coef) * self.std_y_coef return FixedNormal(action_mean, action_std) class Bernoulli(nn.Module): def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01): super(Bernoulli, self).__init__() init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain) self.linear = init_(nn.Linear(num_inputs, num_outputs)) def forward(self, x): x = self.linear(x) return FixedBernoulli(logits=x) class AddBias(nn.Module): def __init__(self, bias): super(AddBias, self).__init__() self._bias = nn.Parameter(bias.unsqueeze(1)) def forward(self, x): if x.dim() == 2: bias = self._bias.t().view(1, -1) else: bias = self._bias.t().view(1, -1, 1, 1) return x + bias ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/mlp.py ================================================ import torch.nn as nn from .util import init, get_clones """MLP modules.""" class MLPLayer(nn.Module): def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, use_ReLU): super(MLPLayer, self).__init__() self._layer_N = layer_N active_func = [nn.Tanh(), nn.ReLU()][use_ReLU] init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal] gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU]) def init_(m): return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain) self.fc1 = nn.Sequential( init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size)) self.fc_h = nn.Sequential(init_( nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size)) self.fc2 = get_clones(self.fc_h, self._layer_N) def forward(self, x): x = self.fc1(x) for i in range(self._layer_N): x = self.fc2[i](x) return x class MLPBase(nn.Module): def __init__(self, args, obs_shape, cat_self=True, attn_internal=False): super(MLPBase, self).__init__() self._use_feature_normalization = args.use_feature_normalization self._use_orthogonal = args.use_orthogonal self._use_ReLU = args.use_ReLU self._stacked_frames = args.stacked_frames self._layer_N = args.layer_N self.hidden_size = args.hidden_size obs_dim = obs_shape[0] if self._use_feature_normalization: self.feature_norm = nn.LayerNorm(obs_dim) self.mlp = MLPLayer(obs_dim, self.hidden_size, self._layer_N, self._use_orthogonal, self._use_ReLU) def forward(self, x): if self._use_feature_normalization: x = self.feature_norm(x) x = self.mlp(x) return x ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/rnn.py ================================================ import torch import torch.nn as nn """RNN modules.""" class RNNLayer(nn.Module): def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal): super(RNNLayer, self).__init__() self._recurrent_N = recurrent_N self._use_orthogonal = use_orthogonal self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N) for name, param in self.rnn.named_parameters(): if 'bias' in name: nn.init.constant_(param, 0) elif 'weight' in name: if self._use_orthogonal: nn.init.orthogonal_(param) else: nn.init.xavier_uniform_(param) self.norm = nn.LayerNorm(outputs_dim) def forward(self, x, hxs, masks): if x.size(0) == hxs.size(0): x, hxs = self.rnn(x.unsqueeze(0), (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous()) x = x.squeeze(0) hxs = hxs.transpose(0, 1) else: # x is a (T, N, -1) tensor that has been flatten to (T * N, -1) N = hxs.size(0) T = int(x.size(0) / N) # unflatten x = x.view(T, N, x.size(1)) # Same deal with masks masks = masks.view(T, N) # Let's figure out which steps in the sequence have a zero for any agent # We will always assume t=0 has a zero in it as that makes the logic cleaner has_zeros = ((masks[1:] == 0.0) .any(dim=-1) .nonzero() .squeeze() .cpu()) # +1 to correct the masks[1:] if has_zeros.dim() == 0: # Deal with scalar has_zeros = [has_zeros.item() + 1] else: has_zeros = (has_zeros + 1).numpy().tolist() # add t=0 and t=T to the list has_zeros = [0] + has_zeros + [T] hxs = hxs.transpose(0, 1) outputs = [] for i in range(len(has_zeros) - 1): # We can now process steps that don't have any zeros in masks together! # This is much faster start_idx = has_zeros[i] end_idx = has_zeros[i + 1] temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous() rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp) outputs.append(rnn_scores) # assert len(outputs) == T # x is a (T, N, -1) tensor x = torch.cat(outputs, dim=0) # flatten x = x.reshape(T * N, -1) hxs = hxs.transpose(0, 1) x = self.norm(x) return x, hxs ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/util.py ================================================ import copy import numpy as np import torch import torch.nn as nn def init(module, weight_init, bias_init, gain=1): weight_init(module.weight.data, gain=gain) bias_init(module.bias.data) return module def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def check(input): output = torch.from_numpy(input) if type(input) == np.ndarray else input return output ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/config.py ================================================ import argparse def get_config(): """ The configuration parser for common hyperparameters of all environment. Please reach each `scripts/train/_runner.py` file to find private hyperparameters only used in . Prepare parameters: --algorithm_name specifiy the algorithm, including `["rmappo", "mappo", "rmappg", "mappg", "trpo"]` --experiment_name an identifier to distinguish different experiment. --seed set seed for numpy and torch --cuda by default True, will use GPU to train; or else will use CPU; --cuda_deterministic by default, make sure random seed effective. if set, bypass such function. --n_training_threads number of training threads working in parallel. by default 1 --n_rollout_threads number of parallel envs for training rollout. by default 32 --n_eval_rollout_threads number of parallel envs for evaluating rollout. by default 1 --n_render_rollout_threads number of parallel envs for rendering, could only be set as 1 for some environments. --num_env_steps number of env steps to train (default: 10e6) --user_name [for wandb usage], to specify user's name for simply collecting training data. --use_wandb [for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data. Env parameters: --env_name specify the name of environment --use_obs_instead_of_state [only for some env] by default False, will use global state; or else will use concatenated local obs. Replay Buffer parameters: --episode_length the max length of episode in the buffer. Network parameters: --share_policy by default True, all agents will share the same network; set to make training agents use different policies. --use_centralized_V by default True, use centralized training mode; or else will decentralized training mode. --stacked_frames Number of input frames which should be stack together. --hidden_size Dimension of hidden layers for actor/critic networks --layer_N Number of layers for actor/critic networks --use_ReLU by default True, will use ReLU. or else will use Tanh. --use_popart by default True, use running mean and std to normalize rewards. --use_feature_normalization by default True, apply layernorm to normalize inputs. --use_orthogonal by default True, use Orthogonal initialization for weights and 0 initialization for biases. or else, will use xavier uniform inilialization. --gain by default 0.01, use the gain # of last action layer --use_naive_recurrent_policy by default False, use the whole trajectory to calculate hidden states. --use_recurrent_policy by default, use Recurrent Policy. If set, do not use. --recurrent_N The number of recurrent layers ( default 1). --data_chunk_length Time length of chunks used to train a recurrent_policy, default 10. Optimizer parameters: --lr learning rate parameter, (default: 5e-4, fixed). --critic_lr learning rate of critic (default: 5e-4, fixed) --opti_eps RMSprop optimizer epsilon (default: 1e-5) --weight_decay coefficience of weight decay (default: 0) PPO parameters: --ppo_epoch number of ppo epochs (default: 15) --use_clipped_value_loss by default, clip loss value. If set, do not clip loss value. --clip_param ppo clip parameter (default: 0.2) --num_mini_batch number of batches for ppo (default: 1) --entropy_coef entropy term coefficient (default: 0.01) --use_max_grad_norm by default, use max norm of gradients. If set, do not use. --max_grad_norm max norm of gradients (default: 0.5) --use_gae by default, use generalized advantage estimation. If set, do not use gae. --gamma discount factor for rewards (default: 0.99) --gae_lambda gae lambda parameter (default: 0.95) --use_proper_time_limits by default, the return value does consider limits of time. If set, compute returns with considering time limits factor. --use_huber_loss by default, use huber loss. If set, do not use huber loss. --use_value_active_masks by default True, whether to mask useless data in value loss. --huber_delta coefficient of huber loss. PPG parameters: --aux_epoch number of auxiliary epochs. (default: 4) --clone_coef clone term coefficient (default: 0.01) Run parameters: --use_linear_lr_decay by default, do not apply linear decay to learning rate. If set, use a linear schedule on the learning rate Save & Log parameters: --save_interval time duration between contiunous twice models saving. --log_interval time duration between contiunous twice log printing. Eval parameters: --use_eval by default, do not start evaluation. If set`, start evaluation alongside with training. --eval_interval time duration between contiunous twice evaluation progress. --eval_episodes number of episodes of a single evaluation. Render parameters: --save_gifs by default, do not save render video. If set, save video. --use_render by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam. --render_episodes the number of episodes to render a given env --ifi the play interval of each rendered image in saved video. Pretrained parameters: --model_dir by default None. set the path to pretrained model. """ parser = argparse.ArgumentParser( description='mappo_lagrangian', formatter_class=argparse.RawDescriptionHelpFormatter) # prepare parameters parser.add_argument("--algorithm_name", type=str, default=' ', choices=[ "mappo_lagr"]) parser.add_argument("--experiment_name", type=str, default="check", help="an identifier to distinguish different experiment.") parser.add_argument("--seed", type=int, default=1, help="Random seed for numpy/torch") parser.add_argument("--cuda", action='store_false', default=False, help="by default True, will use GPU to train; or else will use CPU;") parser.add_argument("--cuda_deterministic", action='store_false', default=True, help="by default, make sure random seed effective. if set, bypass such function.") parser.add_argument("--n_training_threads", type=int, default=1, help="Number of torch threads for training") parser.add_argument("--n_rollout_threads", type=int, default=32, help="Number of parallel envs for training rollouts") parser.add_argument("--n_eval_rollout_threads", type=int, default=1, help="Number of parallel envs for evaluating rollouts") parser.add_argument("--n_render_rollout_threads", type=int, default=1, help="Number of parallel envs for rendering rollouts") parser.add_argument("--num_env_steps", type=int, default=10e6, help='Number of environment steps to train (default: 10e6)') parser.add_argument("--user_name", type=str, default='marl',help="[for wandb usage], to specify user's name for simply collecting training data.") parser.add_argument("--use_wandb", action='store_false', default=False, help="[for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.") # env parameters parser.add_argument("--env_name", type=str, default='StarCraft2', help="specify the name of environment") parser.add_argument("--use_obs_instead_of_state", action='store_true', default=False, help="Whether to use global state or concatenated obs") # replay buffer parameters parser.add_argument("--episode_length", type=int, default=200, help="Max length for any episode") # network parameters parser.add_argument("--share_policy", action='store_false', default=True, help='Whether agent share the same policy') parser.add_argument("--use_centralized_V", action='store_false', default=True, help="Whether to use centralized V function") parser.add_argument("--stacked_frames", type=int, default=1, help="Dimension of hidden layers for actor/critic networks") parser.add_argument("--use_stacked_frames", action='store_true', default=False, help="Whether to use stacked_frames") parser.add_argument("--hidden_size", type=int, default=64, help="Dimension of hidden layers for actor/critic networks") parser.add_argument("--layer_N", type=int, default=1, help="Number of layers for actor/critic networks") parser.add_argument("--use_ReLU", action='store_false', default=True, help="Whether to use ReLU") parser.add_argument("--use_popart", action='store_false', default=True, help="by default True, use running mean and std to normalize rewards.") parser.add_argument("--use_valuenorm", action='store_false', default=True, help="by default True, use running mean and std to normalize rewards.") parser.add_argument("--use_feature_normalization", action='store_false', default=True, help="Whether to apply layernorm to the inputs") parser.add_argument("--use_orthogonal", action='store_false', default=True, help="Whether to use Orthogonal initialization for weights and 0 initialization for biases") parser.add_argument("--gain", type=float, default=0.01, help="The gain # of last action layer") # recurrent parameters parser.add_argument("--use_naive_recurrent_policy", action='store_true', default=False, help='Whether to use a naive recurrent policy') parser.add_argument("--use_recurrent_policy", action='store_true', default=False, help='use a recurrent policy') parser.add_argument("--recurrent_N", type=int, default=1, help="The number of recurrent layers.") parser.add_argument("--data_chunk_length", type=int, default=10, help="Time length of chunks used to train a recurrent_policy") # optimizer parameters parser.add_argument("--lr", type=float, default=5e-4, help='learning rate (default: 5e-4)') parser.add_argument("--critic_lr", type=float, default=5e-4, help='critic learning rate (default: 5e-4)') parser.add_argument("--opti_eps", type=float, default=1e-5, help='RMSprop optimizer epsilon (default: 1e-5)') parser.add_argument("--weight_decay", type=float, default=0) parser.add_argument("--std_x_coef", type=float, default=1) parser.add_argument("--std_y_coef", type=float, default=0.5) # ppo parameters parser.add_argument("--ppo_epoch", type=int, default=15, help='number of ppo epochs (default: 15)') parser.add_argument("--use_clipped_value_loss", action='store_false', default=True, help="by default, clip loss value. If set, do not clip loss value.") parser.add_argument("--clip_param", type=float, default=0.2, help='ppo clip parameter (default: 0.2)') parser.add_argument("--num_mini_batch", type=int, default=1, help='number of batches for ppo (default: 1)') parser.add_argument("--entropy_coef", type=float, default=0.01, help='entropy term coefficient (default: 0.01)') # todo: lagrangian_coef is the lagrangian coefficient for mappo_lagrangian parser.add_argument("--lamda_lagr", type=float, default=0.78, help='lagrangrian coef coefficient (default: 0.78)') parser.add_argument("--lagrangian_coef_rate", type=float, default=5e-4, help='lagrangrian coef learning rate (default: 5e-4)') parser.add_argument("--lagrangian_coef", type=float, default=0.01, help='entropy term coefficient (default: 0.01)') parser.add_argument("--value_loss_coef", type=float, default=1, help='value loss coefficient (default: 0.5)') parser.add_argument("--use_max_grad_norm", action='store_false', default=True, help="by default, use max norm of gradients. If set, do not use.") parser.add_argument("--max_grad_norm", type=float, default=10.0, help='max norm of gradients (default: 0.5)') parser.add_argument("--use_gae", action='store_false', default=True, help='use generalized advantage estimation') parser.add_argument("--gamma", type=float, default=0.99, help='discount factor for rewards (default: 0.99)') parser.add_argument("--gae_lambda", type=float, default=0.95, help='gae lambda parameter (default: 0.95)') parser.add_argument("--use_proper_time_limits", action='store_true', default=False, help='compute returns taking into account time limits') parser.add_argument("--use_huber_loss", action='store_false', default=True, help="by default, use huber loss. If set, do not use huber loss.") parser.add_argument("--use_value_active_masks", action='store_false', default=True, help="by default True, whether to mask useless data in value loss.") parser.add_argument("--use_policy_active_masks", action='store_false', default=True, help="by default True, whether to mask useless data in policy loss.") parser.add_argument("--huber_delta", type=float, default=10.0, help=" coefficience of huber loss.") # run parameters parser.add_argument("--use_linear_lr_decay", action='store_true', default=False, help='use a linear schedule on the learning rate') # save parameters parser.add_argument("--save_interval", type=int, default=1, help="time duration between contiunous twice models saving.") # log parameters parser.add_argument("--log_interval", type=int, default=5, help="time duration between contiunous twice log printing.") # eval parameters parser.add_argument("--use_eval", action='store_true', default=False, help="by default, do not start evaluation. If set`, start evaluation alongside with training.") parser.add_argument("--eval_interval", type=int, default=25, help="time duration between contiunous twice evaluation progress.") parser.add_argument("--eval_episodes", type=int, default=32, help="number of episodes of a single evaluation.") # render parameters parser.add_argument("--save_gifs", action='store_true', default=False, help="by default, do not save render video. If set, save video.") parser.add_argument("--use_render", action='store_true', default=False, help="by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.") parser.add_argument("--render_episodes", type=int, default=5, help="the number of episodes to render a given env") parser.add_argument("--ifi", type=float, default=0.1, help="the play interval of each rendered image in saved video.") # pretrained parameters parser.add_argument("--model_dir", type=str, default=None, help="by default None. set the path to pretrained model.") # safe parameters parser.add_argument("--safety_bound", type=float, default=1, help="constraint upper bound") return parser ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/__init__.py ================================================ import socket from absl import flags FLAGS = flags.FLAGS FLAGS(['train_sc.py']) ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/env_wrappers.py ================================================ """ Modified from OpenAI Baselines code to work with multi-agent envs """ import numpy as np import torch from multiprocessing import Process, Pipe from abc import ABC, abstractmethod from mappo_lagrangian.utils.util import tile_images class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class ShareVecEnv(ABC): """ An abstract asynchronous, vectorized environment. Used to batch data from multiple copies of an environment, so that each observation becomes an batch of observations, and expected action is a batch of actions to be applied per-environment. """ closed = False viewer = None metadata = { 'render.modes': ['human', 'rgb_array'] } def __init__(self, num_envs, observation_space, share_observation_space, action_space): self.num_envs = num_envs self.observation_space = observation_space self.share_observation_space = share_observation_space self.action_space = action_space @abstractmethod def reset(self): """ Reset all the environments and return an array of observations, or a dict of observation arrays. If step_async is still doing work, that work will be cancelled and step_wait() should not be called until step_async() is invoked again. """ pass @abstractmethod def step_async(self, actions): """ Tell all the environments to start taking a step with the given actions. Call step_wait() to get the results of the step. You should not call this if a step_async run is already pending. """ pass @abstractmethod def step_wait(self): """ Wait for the step taken with step_async(). Returns (obs, rews, cos, dones, infos): - obs: an array of observations, or a dict of arrays of observations. - rews: an array of rewards - cos: an array of costs - dones: an array of "episode done" booleans - infos: a sequence of info objects """ pass def close_extras(self): """ Clean up the extra resources, beyond what's in this base class. Only runs when not self.closed. """ pass def close(self): if self.closed: return if self.viewer is not None: self.viewer.close() self.close_extras() self.closed = True def step(self, actions): """ Step the environments synchronously. This is available for backwards compatibility. """ self.step_async(actions) return self.step_wait() def render(self, mode='human'): imgs = self.get_images() bigimg = tile_images(imgs) if mode == 'human': self.get_viewer().imshow(bigimg) return self.get_viewer().isopen elif mode == 'rgb_array': return bigimg else: raise NotImplementedError def get_images(self): """ Return RGB images from each environment """ raise NotImplementedError @property def unwrapped(self): if isinstance(self, VecEnvWrapper): return self.venv.unwrapped else: return self def get_viewer(self): if self.viewer is None: from gym.envs.classic_control import rendering self.viewer = rendering.SimpleImageViewer() return self.viewer def worker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if 'bool' in done.__class__.__name__: if done: ob = env.reset() else: if np.all(done): ob = env.reset() remote.send((ob, reward, info["cost"], done, info)) elif cmd == 'reset': ob = env.reset() remote.send((ob)) elif cmd == 'render': if data == "rgb_array": fr = env.render(mode=data) remote.send(fr) elif data == "human": env.render(mode=data) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': env.close() remote.close() break elif cmd == 'get_spaces': remote.send((env.observation_space, env.share_observation_space, env.action_space)) else: raise NotImplementedError class GuardSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = False # could cause zombie process p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv() ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, cos, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) obs = [remote.recv() for remote in self.remotes] return np.stack(obs) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True class SubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv() ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, cos, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) obs = [remote.recv() for remote in self.remotes] return np.stack(obs) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def render(self, mode="rgb_array"): for remote in self.remotes: remote.send(('render', mode)) if mode == "rgb_array": frame = [remote.recv() for remote in self.remotes] return np.stack(frame) def shareworker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, s_ob, reward, done, info, available_actions = env.step(data) if 'bool' in done.__class__.__name__: if done: ob, s_ob, available_actions = env.reset() else: if np.all(done): ob, s_ob, available_actions = env.reset() remote.send((ob, s_ob, reward, done, info, available_actions)) elif cmd == 'reset': ob, s_ob, available_actions = env.reset() remote.send((ob, s_ob, available_actions)) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'render': if data == "rgb_array": fr = env.render(mode=data) remote.send(fr) elif data == "human": env.render(mode=data) elif cmd == 'close': env.close() remote.close() break elif cmd == 'get_spaces': remote.send( (env.observation_space, env.share_observation_space, env.action_space)) elif cmd == 'render_vulnerability': fr = env.render_vulnerability(data) remote.send((fr)) elif cmd == 'get_num_agents': remote.send((env.n_agents)) else: raise NotImplementedError class ShareSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_num_agents', None)) self.n_agents = self.remotes[0].recv() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv( ) # print("wrapper:", share_observation_space) ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, share_obs, rews, dones, infos, available_actions = zip(*results) cost_x= np.array([item[0]['cost'] for item in infos]) # print("=====cost_x=====: ", cost_x.sum()) # print("=====np.stack(dones)=====: ", np.stack(dones)) return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(cost_x), np.stack(dones), infos, np.stack(available_actions) def reset(self): for remote in self.remotes: remote.send(('reset', None)) results = [remote.recv() for remote in self.remotes] obs, share_obs, available_actions = zip(*results) return np.stack(obs), np.stack(share_obs), np.stack(available_actions) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def choosesimpleworker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) remote.send((ob, reward, info["cost"], done, info)) elif cmd == 'reset': ob = env.reset(data) remote.send((ob)) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': env.close() remote.close() break elif cmd == 'render': if data == "rgb_array": fr = env.render(mode=data) remote.send(fr) elif data == "human": env.render(mode=data) elif cmd == 'get_spaces': remote.send( (env.observation_space, env.share_observation_space, env.action_space)) else: raise NotImplementedError class ChooseSimpleSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=choosesimpleworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv() ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, cos, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos def reset(self, reset_choose): for remote, choose in zip(self.remotes, reset_choose): remote.send(('reset', choose)) obs = [remote.recv() for remote in self.remotes] return np.stack(obs) def render(self, mode="rgb_array"): for remote in self.remotes: remote.send(('render', mode)) if mode == "rgb_array": frame = [remote.recv() for remote in self.remotes] return np.stack(frame) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def chooseworker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, s_ob, reward, done, info, available_actions = env.step(data) remote.send((ob, s_ob, reward, info["cost"], done, info, available_actions)) elif cmd == 'reset': ob, s_ob, available_actions = env.reset(data) remote.send((ob, s_ob, available_actions)) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': env.close() remote.close() break elif cmd == 'render': remote.send(env.render(mode='rgb_array')) elif cmd == 'get_spaces': remote.send( (env.observation_space, env.share_observation_space, env.action_space)) else: raise NotImplementedError class ChooseSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv( ) ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, share_obs, rews, cos, dones, infos, available_actions = zip(*results) return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(cos), np.stack(dones), infos, np.stack(available_actions) def reset(self, reset_choose): for remote, choose in zip(self.remotes, reset_choose): remote.send(('reset', choose)) results = [remote.recv() for remote in self.remotes] obs, share_obs, available_actions = zip(*results) return np.stack(obs), np.stack(share_obs), np.stack(available_actions) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True def chooseguardworker(remote, parent_remote, env_fn_wrapper): parent_remote.close() env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) remote.send((ob, reward, info["cost"], done, info)) elif cmd == 'reset': ob = env.reset(data) remote.send((ob)) elif cmd == 'reset_task': ob = env.reset_task() remote.send(ob) elif cmd == 'close': env.close() remote.close() break elif cmd == 'get_spaces': remote.send( (env.observation_space, env.share_observation_space, env.action_space)) else: raise NotImplementedError class ChooseGuardSubprocVecEnv(ShareVecEnv): def __init__(self, env_fns, spaces=None): """ envs: list of gym environments to run in subprocesses """ self.waiting = False self.closed = False nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = False # if the main process crashes, we should not cause things to hang p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) observation_space, share_observation_space, action_space = self.remotes[0].recv( ) ShareVecEnv.__init__(self, len(env_fns), observation_space, share_observation_space, action_space) def step_async(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) self.waiting = True def step_wait(self): results = [remote.recv() for remote in self.remotes] self.waiting = False obs, rews, cos, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos def reset(self, reset_choose): for remote, choose in zip(self.remotes, reset_choose): remote.send(('reset', choose)) obs = [remote.recv() for remote in self.remotes] return np.stack(obs) def reset_task(self): for remote in self.remotes: remote.send(('reset_task', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): if self.closed: return if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True # single env class DummyVecEnv(ShareVecEnv): def __init__(self, env_fns): self.envs = [fn() for fn in env_fns] env = self.envs[0] ShareVecEnv.__init__(self, len( env_fns), env.observation_space, env.share_observation_space, env.action_space) self.actions = None def step_async(self, actions): self.actions = actions def step_wait(self): results = [env.step(a) for (a, env) in zip(self.actions, self.envs)] obs, rews, cos, dones, infos = map(np.array, zip(*results)) for (i, done) in enumerate(dones): if 'bool' in done.__class__.__name__: if done: obs[i] = self.envs[i].reset() else: if np.all(done): obs[i] = self.envs[i].reset() self.actions = None return obs, rews, cos, dones, infos def reset(self): obs = [env.reset() for env in self.envs] return np.array(obs) def close(self): for env in self.envs: env.close() def render(self, mode="human"): if mode == "rgb_array": return np.array([env.render(mode=mode) for env in self.envs]) elif mode == "human": for env in self.envs: env.render(mode=mode) else: raise NotImplementedError class ShareDummyVecEnv(ShareVecEnv): def __init__(self, env_fns): self.envs = [fn() for fn in env_fns] env = self.envs[0] ShareVecEnv.__init__(self, len( env_fns), env.observation_space, env.share_observation_space, env.action_space) self.actions = None def step_async(self, actions): self.actions = actions def step_wait(self): results = [env.step(a) for (a, env) in zip(self.actions, self.envs)] obs, share_obs, rews, cos, dones, infos, available_actions = map( np.array, zip(*results)) for (i, done) in enumerate(dones): if 'bool' in done.__class__.__name__: if done: obs[i], share_obs[i], available_actions[i] = self.envs[i].reset() else: if np.all(done): obs[i], share_obs[i], available_actions[i] = self.envs[i].reset() self.actions = None return obs, share_obs, rews, cos, dones, infos, available_actions def reset(self): results = [env.reset() for env in self.envs] obs, share_obs, available_actions = map(np.array, zip(*results)) return obs, share_obs, available_actions def close(self): for env in self.envs: env.close() def render(self, mode="human"): if mode == "rgb_array": return np.array([env.render(mode=mode) for env in self.envs]) elif mode == "human": for env in self.envs: env.render(mode=mode) else: raise NotImplementedError class ChooseDummyVecEnv(ShareVecEnv): def __init__(self, env_fns): self.envs = [fn() for fn in env_fns] env = self.envs[0] ShareVecEnv.__init__(self, len( env_fns), env.observation_space, env.share_observation_space, env.action_space) self.actions = None def step_async(self, actions): self.actions = actions def step_wait(self): results = [env.step(a) for (a, env) in zip(self.actions, self.envs)] obs, share_obs, rews, cos, dones, infos, available_actions = map( np.array, zip(*results)) self.actions = None return obs, share_obs, rews, cos, dones, infos, available_actions def reset(self, reset_choose): results = [env.reset(choose) for (env, choose) in zip(self.envs, reset_choose)] obs, share_obs, available_actions = map(np.array, zip(*results)) return obs, share_obs, available_actions def close(self): for env in self.envs: env.close() def render(self, mode="human"): if mode == "rgb_array": return np.array([env.render(mode=mode) for env in self.envs]) elif mode == "human": for env in self.envs: env.render(mode=mode) else: raise NotImplementedError class ChooseSimpleDummyVecEnv(ShareVecEnv): def __init__(self, env_fns): self.envs = [fn() for fn in env_fns] env = self.envs[0] ShareVecEnv.__init__(self, len( env_fns), env.observation_space, env.share_observation_space, env.action_space) self.actions = None def step_async(self, actions): self.actions = actions def step_wait(self): results = [env.step(a) for (a, env) in zip(self.actions, self.envs)] obs, rews, cos, dones, infos = map(np.array, zip(*results)) self.actions = None return obs, rews, cos, dones, infos def reset(self, reset_choose): obs = [env.reset(choose) for (env, choose) in zip(self.envs, reset_choose)] return np.array(obs) def close(self): for env in self.envs: env.close() def render(self, mode="human"): if mode == "rgb_array": return np.array([env.render(mode=mode) for env in self.envs]) elif mode == "human": for env in self.envs: env.render(mode=mode) else: raise NotImplementedError ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/MUJOCO_LOG.TXT ================================================ Sun Aug 29 11:16:41 2021 ERROR: Expired activation key ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/README.md ================================================ #### Safety Multi-agent Mujoco ## 1. Sate Many Agent Ant According to Zanger's work, The reward function is equal to the rewards in the common Ant-v2 environment and comprises the torso velocity in global x-direction, a negative control reward on exerted torque, a negative contact reward and a constant positive reward for survival, which results in ```python xposafter = self.get_body_com("torso_0")[0] forward_reward = (xposafter - xposbefore)/self.dt ctrl_cost = .5 * np.square(a).sum() contact_cost = 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1))) survive_reward = 1.0 reward = forward_reward - ctrl_cost - contact_cost + survive_reward ``` And the cost, ```python yposafter = self.get_body_com("torso_0")[1] ywall = np.array([-5, 5]) if xposafter < 20: y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall elif xposafter>20 and xposafter<60: y_walldist = yposafter + (xposafter-40)*np.tan(30/360*2*np.pi) - ywall elif xposafter>60 and xposafter<100: y_walldist = yposafter - (xposafter-80)*np.tan(30/360*2*np.pi) + ywall else: y_walldist = yposafter - 20*np.tan(30/360*2*np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 body_quat = self.data.get_body_xquat('torso_0') z_rot = 1-2*(body_quat[1]**2+body_quat[2]**2) ### normally xx-rotation, not sure what axes mujoco uses state = self.state_vector() notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0\ and z_rot>=-0.7 #ADDED done = not notdone done_cost = done * 1.0 cost = np.clip(obj_cost + done_cost, 0, 1) ``` [1] Zanger, Moritz A., Karam Daaboul, and J. Marius Zöllner. 2021. “Safe Continuous Control with Constrained Model-Based Policy Optimization.” arXiv [cs.LG]. arXiv. http://arxiv.org/abs/2104.06922. ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/__init__.py ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/__init__.py ================================================ from .mujoco_multi import MujocoMulti from .coupled_half_cheetah import CoupledHalfCheetah from .manyagent_swimmer import ManyAgentSwimmerEnv from .manyagent_ant import ManyAgentAntEnv ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/ant.py ================================================ import numpy as np # from mujoco_safety_gym.envs import mujoco_env from mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env from gym import utils import mujoco_py as mjp class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5) utils.EzPickle.__init__(self) def step(self, a): xposbefore = self.get_body_com("torso")[0] self.do_simulation(a, self.frame_skip) mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 xposafter = self.get_body_com("torso")[0] forward_reward = (xposafter - xposbefore) / self.dt ctrl_cost = .5 * np.square(a).sum() contact_cost = 0.5 * 1e-3 * np.sum( np.square(np.clip(self.sim.data.cfrc_ext, -1, 1))) survive_reward = 1.0 ### safety stuff yposafter = self.get_body_com("torso")[1] ywall = np.array([-5, 5]) if xposafter < 20: y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall elif xposafter > 20 and xposafter < 60: y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall elif xposafter > 60 and xposafter < 100: y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall else: y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 reward = forward_reward - ctrl_cost - contact_cost + survive_reward body_quat = self.data.get_body_xquat('torso') z_rot = 1 - 2 * ( body_quat[1] ** 2 + body_quat[2] ** 2) ### normally xx-rotation, not sure what axes mujoco uses state = self.state_vector() notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0 \ and z_rot >= -0.7 done = not notdone done_cost = done * 1.0 cost = np.clip(obj_cost + done_cost, 0, 1) ob = self._get_obs() return ob, reward, done, dict( reward_forward=forward_reward, reward_ctrl=-ctrl_cost, reward_contact=-contact_cost, reward_survive=survive_reward, cost_obj=obj_cost, cost_done=done_cost, cost=cost, ) def _get_obs(self): x = self.sim.data.qpos.flat[0] y = self.sim.data.qpos.flat[1] if x < 20: y_off = y - x * np.tan(30 / 360 * 2 * np.pi) elif x > 20 and x < 60: y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi) elif x > 60 and x < 100: y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi) else: y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi) return np.concatenate([ self.sim.data.qpos.flat[2:-42], self.sim.data.qvel.flat[:-36], [x / 5], [y_off], # np.clip(self.sim.data.cfrc_ext, -1, 1).flat, ]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1) qpos[-42:] = self.init_qpos[-42:] qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 qvel[-36:] = self.init_qvel[-36:] self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/.gitignore ================================================ *.auto.xml ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/__init__.py ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/ant.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/beifen_hopper.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/coupled_half_cheetah.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/half_cheetah.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/hopper.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/humanoid.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant.xml.template ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant__stage1.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer.xml.template ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer__bckp2.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer_bckp.xml ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py ================================================ import numpy as np from gym import utils from gym.envs.mujoco import mujoco_env from mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env import os import mujoco_py as mjp from gym import error, spaces class CoupledHalfCheetah(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): mujoco_env.MujocoEnv.__init__(self, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'coupled_half_cheetah.xml'), 5) utils.EzPickle.__init__(self) def step(self, action): #ADDED # xposbefore = self.sim.data.qpos[1] # t = self.data.time # wall_act = .02 * np.sin(t / 3) ** 2 - .004 # mjp.functions.mj_rnePostConstraint(self.sim.model, # self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 # action_p_wall = np.concatenate((np.squeeze(action), [wall_act])) # self.do_simulation(action_p_wall, self.frame_skip) # xposafter = self.sim.data.qpos[1] # wallpos = self.data.get_geom_xpos("obj_geom")[0] # wallvel = self.data.get_body_xvelp("obj1")[0] # xdist = wallpos - xposafter # obj_cost = int(np.abs(xdist) < 2) # if obj_cost > 0: # self.model.geom_rgba[9] = [1.0, 0, 0, 1.0] # else: # self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8] # ob = self._get_obs() # reward_ctrl = - 0.1 * np.square(action).sum() # reward_run = (xposafter - xposbefore) / self.dt # reward = reward_ctrl + reward_run # done = False # xposbefore1 = self.sim.data.qpos[0] # xposbefore2 = self.sim.data.qpos[len(self.sim.data.qpos) // 2] # print("self.sim.data.qpos", self.sim.data.qpos) xposbefore1 = self.get_body_com("torso")[0] xposbefore2 = self.get_body_com("torso2")[0] yposbefore1 = self.get_body_com("torso")[1] yposbefore2 = self.get_body_com("torso2")[1] # ADDED t = self.data.time wall_act = .02 * np.sin(t / 3) ** 2 - .004 mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 action_p_wall = np.concatenate((np.squeeze(action), [wall_act])) # print("action_p_wall", np.array(action_p_wall).shape) # print("action", np.array(action).shape) # print("self.frame_skip", self.frame_skip) self.do_simulation(action_p_wall, self.frame_skip) # self.do_simulation(action, self.frame_skip) # xposafter1 = self.sim.data.qpos[0] # xposafter2 = self.sim.data.qpos[len(self.sim.data.qpos)//2] xposafter1 = self.get_body_com("torso")[0] xposafter2 = self.get_body_com("torso2")[0] yposafter1 = self.get_body_com("torso")[1] yposafter2 = self.get_body_com("torso2")[1] # ADDED wallpos = self.data.get_geom_xpos("obj_geom")[0] # wallpos1 = self.data.get_geom_xpos("obj_geom1")[0] y_wallpos1 = self.data.get_geom_xpos("wall1")[1] y_wallpos2 = self.data.get_geom_xpos("wall2")[1] # print("x_wallpos1 = self.data.get_geom_xpos", x_wallpos1) # print("x_wallpos2 = self.data.get_geom_xpos", x_wallpos2) wallvel = self.data.get_body_xvelp("obj1")[0] xdist = np.abs(wallpos - xposafter1) #+ np.abs(wallpos - xposafter2) #+ (wallpos1 - xposafter1) + (wallpos1 - xposafter2) obj_cost = 0 # or int(np.abs(wallpos1 - xposafter2) < 5) or int(np.abs(wallpos1 - xposafter2) < 5)\ # if int(np.abs(wallpos - xposafter1) < 5) or int(np.abs(wallpos - xposafter2) < 5) \ or int(np.abs(y_wallpos1 - yposafter1) < 5) or int(np.abs(y_wallpos2 - yposafter2) < 5): obj_cost = 1 # obj_cost = int(np.abs(xdist) < 5) # print("xposbefore1", xposbefore1) # print("xposbefore2", xposbefore2) # print("yposafter1", yposafter1) # print("yposafter2", yposafter2) # print("np.abs(x_wallpos1 - yposafter1)", np.abs(x_wallpos1 - yposafter1)) # print("xposafter1", xposafter1) # print("xposafter2", xposafter2) # print("wallpos", wallpos) # print("wallpos1", wallpos1) # print("xdist", xdist) # print("(wallpos1 - xposafter2)", (wallpos1 - xposafter2)) # print("(wallpos - xposafter1)", (wallpos - xposafter1)) # print("(wallpos - xposafter2)", (wallpos - xposafter2)) if obj_cost > 0: self.model.geom_rgba[9] = [1.0, 0, 0, 1.0] else: self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8] ob = self._get_obs() ob = self._get_obs() reward_ctrl1 = - 0.1 * np.square(action[0:len(action)//2]).sum() reward_ctrl2 = - 0.1 * np.square(action[len(action)//2:]).sum() reward_run1 = (xposafter1 - xposbefore1)/self.dt reward_run2 = (xposafter2 - xposbefore2) / self.dt reward = (reward_ctrl1 + reward_ctrl2)/2.0 + (reward_run1 + reward_run2)/2.0 done = False return ob, reward, done, dict(cost=obj_cost, reward_run1=reward_run1, reward_ctrl1=reward_ctrl1, reward_run2=reward_run2, reward_ctrl2=reward_ctrl2) def _get_obs(self): #AADED wallvel = self.data.get_body_xvelp("obj1")[0] wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004 xdist = (self.data.get_geom_xpos("obj_geom")[0] - self.sim.data.qpos[1]) / 10 return np.concatenate([ self.sim.data.qpos.flat[2:], self.sim.data.qvel.flat[1:], [wallvel], [wall_f], np.clip([xdist], -5, 5), ]) # return np.concatenate([ # self.sim.data.qpos.flat[1:], # self.sim.data.qvel.flat, # ]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq) qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 def get_env_info(self): return {"episode_limit": self.episode_limit} def _set_action_space(self): bounds = self.model.actuator_ctrlrange.copy().astype(np.float32) low, high = bounds.T low, high = low[:-1], high[:-1] self.action_space = spaces.Box(low=low, high=high, dtype=np.float32) return self.action_space ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/half_cheetah.py ================================================ import numpy as np from gym import utils # from mujoco_safety_gym.envs import mujoco_env # from gym.envs.mujoco import mujoco_env from mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env import mujoco_py as mjp from gym import error, spaces class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): # print("half_aaaa") mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 5) utils.EzPickle.__init__(self) def step(self, action): xposbefore = self.sim.data.qpos[1] t = self.data.time wall_act = .02 * np.sin(t / 3) ** 2 - .004 mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 action_p_wall = np.concatenate((np.squeeze(action), [wall_act])) self.do_simulation(action_p_wall, self.frame_skip) xposafter = self.sim.data.qpos[1] wallpos = self.data.get_geom_xpos("obj_geom")[0] wallvel = self.data.get_body_xvelp("obj1")[0] xdist = wallpos - xposafter # print("wallpos", wallpos) # print("xposafter", xposafter) # print("xdist", xdist) obj_cost = int(np.abs(xdist) < 9) if obj_cost > 0: self.model.geom_rgba[9] = [1.0, 0, 0, 1.0] else: self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8] ob = self._get_obs() reward_ctrl = - 0.1 * np.square(action).sum() reward_run = (xposafter - xposbefore) / self.dt reward = reward_ctrl + reward_run cost = obj_cost # print("cost1", cost) done = False return ob, reward, done, dict(cost=cost, reward_run=reward_run, reward_ctrl=reward_ctrl) def _get_obs(self): wallvel = self.data.get_body_xvelp("obj1")[0] wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004 xdist = (self.data.get_geom_xpos("obj_geom")[0] - self.sim.data.qpos[1]) / 10 return np.concatenate([ self.sim.data.qpos.flat[2:], self.sim.data.qvel.flat[1:], [wallvel], [wall_f], np.clip([xdist], -5, 5), ]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq) qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 def _set_action_space(self): bounds = self.model.actuator_ctrlrange.copy().astype(np.float32) low, high = bounds.T low, high = low[:-1], high[:-1] self.action_space = spaces.Box(low=low, high=high, dtype=np.float32) return self.action_space ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/hopper.py ================================================ import numpy as np from mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env from gym import utils import mujoco_py as mjp class HopperEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): mujoco_env.MujocoEnv.__init__(self, 'hopper.xml', 4) utils.EzPickle.__init__(self) self.last_mocx = 5 #### vel readings are super noisy for mocap weld def step(self, a): posbefore = self.sim.data.qpos[3] t = self.data.time pos = (t + np.sin(t)) + 3 self.data.set_mocap_pos('mocap1', [pos, 0, 0.5]) mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 self.do_simulation(a, self.frame_skip) posafter, height, ang = self.sim.data.qpos[3:6] alive_bonus = 1.0 mocapx = self.sim.data.qpos[0] xdist = mocapx - posafter cost = int(np.abs(xdist) < 1) reward = (posafter - posbefore) / self.dt reward += alive_bonus reward -= 1e-3 * np.square(a).sum() s = self.state_vector() # done = not (np.isfinite(s).all() and (np.abs(s[5:]) < 100).all() and # (height > .7) and (abs(ang) < .2)) done = not ( np.isfinite(s).all() and (np.abs(s[2:]) < 100).all() and (height > 0.7) and (abs(ang) < 0.2) ) print("np.isfinite(s).all()", np.isfinite(s).all()) print("np.abs(s[5:])", (np.abs(s[2:]) < 100).all()) print("height", (height > 0.7)) print("abs(ang) ", (abs(ang) < 0.2)) ob = self._get_obs() return ob, reward, done, dict(cost=cost) def _get_obs(self): x = self.sim.data.qpos[3] mocapx = self.sim.data.qpos[0] mocvel = 1 + np.cos(self.data.time) mocacc = -np.sin(self.data.time) return np.concatenate([ self.sim.data.qpos.flat[4:], np.clip(self.sim.data.qvel[3:].flat, -10, 10), [mocvel], [mocacc], [mocapx - x], ]) def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq) qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv) self.set_state(qpos, qvel) return self._get_obs() def last_mocap_x(self): return self.last_mocx def viewer_setup(self): self.viewer.cam.trackbodyid = 2 self.viewer.cam.distance = self.model.stat.extent * 0.75 self.viewer.cam.lookat[2] = 1.15 self.viewer.cam.elevation = -20 ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/humanoid.py ================================================ import numpy as np # from mujoco_safety_gym.envs import mujoco_env from mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env from gym import utils import mujoco_py as mjp def mass_center(model, sim): mass = np.expand_dims(model.body_mass, 1) xpos = sim.data.xipos return (np.sum(mass * xpos, 0) / np.sum(mass))[0] class HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): mujoco_env.MujocoEnv.__init__(self, 'humanoid.xml', 5) utils.EzPickle.__init__(self) def _get_obs(self): data = self.sim.data x = data.qpos.flat[0] y = data.qpos.flat[1] if x < 20: y_off = y - x * np.tan(30 / 360 * 2 * np.pi) elif x > 20 and x < 60: y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi) elif x > 60 and x < 100: y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi) else: y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi) return np.concatenate([data.qpos.flat[2:-42], data.qvel.flat[:-36], [x / 5], [y_off]]) # return np.concatenate([data.qpos.flat[2:], # data.qvel.flat, # data.cinert.flat, # data.cvel.flat, # data.qfrc_actuator.flat, # data.cfrc_ext.flat]) def step(self, a): pos_before = mass_center(self.model, self.sim) self.do_simulation(a, self.frame_skip) mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 pos_after = mass_center(self.model, self.sim) alive_bonus = 5.0 data = self.sim.data lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum() quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum() quad_impact_cost = min(quad_impact_cost, 10) reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus yposafter = self.get_body_com("torso")[1] ywall = np.array([-2.3, 2.3]) if pos_after < 20: y_walldist = yposafter - pos_after * np.tan(30 / 360 * 2 * np.pi) + ywall elif pos_after > 20 and pos_after < 60: y_walldist = yposafter + (pos_after - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall elif pos_after > 60 and pos_after < 100: y_walldist = yposafter - (pos_after - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall else: y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 qpos = self.sim.data.qpos done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0)) done_cost = done * 1.0 cost = np.clip(obj_cost + done_cost, 0, 1) return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=-quad_ctrl_cost, reward_alive=alive_bonus, reward_impact=-quad_impact_cost, cost_obj=obj_cost, cost_done=done_cost, cost=cost, ) def reset_model(self): c = 0.01 # self.set_state( # self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq), # self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,) # ) # return self._get_obs() qpos = self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq) qpos[-42:] = self.init_qpos[-42:] qvel = self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv, ) qvel[-36:] = self.init_qvel[-36:] self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.trackbodyid = 1 self.viewer.cam.distance = self.model.stat.extent * 1.0 self.viewer.cam.lookat[2] = 2.0 self.viewer.cam.elevation = -20 ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_ant.py ================================================ import numpy as np from gym import utils from gym.envs.mujoco import mujoco_env from jinja2 import Template import mujoco_py as mjp import os class ManyAgentAntEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): # Return Flag: Distinguish the mujoco and Wrapper env. self.rflag = 0 agent_conf = kwargs.get("agent_conf") n_agents = int(agent_conf.split("x")[0]) n_segs_per_agents = int(agent_conf.split("x")[1]) n_segs = n_agents * n_segs_per_agents # Check whether asset file exists already, otherwise create it asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'manyagent_ant_{}_agents_each_{}_segments.auto.xml'.format(n_agents, n_segs_per_agents)) # if not os.path.exists(asset_path): # print("Auto-Generating Manyagent Ant asset with {} segments at {}.".format(n_segs, asset_path)) self._generate_asset(n_segs=n_segs, asset_path=asset_path) #asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p # 'manyagent_swimmer.xml') mujoco_env.MujocoEnv.__init__(self, asset_path, 4) utils.EzPickle.__init__(self) def _generate_asset(self, n_segs, asset_path): template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'manyagent_ant.xml.template') with open(template_path, "r") as f: t = Template(f.read()) body_str_template = """ """ body_close_str_template ="\n" actuator_str_template = """\t \n""" body_str = "" for i in range(1,n_segs): body_str += body_str_template.format(*([i]*16)) body_str += body_close_str_template*(n_segs-1) actuator_str = "" for i in range(n_segs): actuator_str += actuator_str_template.format(*([i]*8)) rt = t.render(body=body_str, actuators=actuator_str) with open(asset_path, "w") as f: f.write(rt) pass def step(self, a): xposbefore = self.get_body_com("torso_0")[0] self.do_simulation(a, self.frame_skip) #ADDED mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) #### calc contacts, this is a mujoco py version mismatch issue with mujoco200 xposafter = self.get_body_com("torso_0")[0] forward_reward = (xposafter - xposbefore)/self.dt ctrl_cost = .5 * np.square(a).sum() contact_cost = 0.5 * 1e-3 * np.sum( np.square(np.clip(self.sim.data.cfrc_ext, -1, 1))) survive_reward = 1.0 ### ADDED safety stuff yposafter = self.get_body_com("torso_0")[1] ywall = np.array([-4.5, 4.5]) if xposafter < 20: y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall elif xposafter>20 and xposafter<60: y_walldist = yposafter + (xposafter-40)*np.tan(30/360*2*np.pi) - ywall elif xposafter>60 and xposafter<100: y_walldist = yposafter - (xposafter-80)*np.tan(30/360*2*np.pi) + ywall else: y_walldist = yposafter - 20*np.tan(30/360*2*np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 reward = forward_reward - ctrl_cost - contact_cost + survive_reward #### ADDED body_quat = self.data.get_body_xquat('torso_0') z_rot = 1-2*(body_quat[1]**2+body_quat[2]**2) ### normally xx-rotation, not sure what axes mujoco uses state = self.state_vector() notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0\ and z_rot>=-0.7 #ADDED done = not notdone #ADDED done_cost = done * 1.0 cost = np.clip(obj_cost + done_cost, 0, 1) ob = self._get_obs() if self.rflag == 0: self.rflag += 1 return ob, reward, done, dict( cost=cost, reward_forward=forward_reward, # reward_ctrl=-ctrl_cost, reward_contact=-contact_cost, reward_survive=survive_reward, cost_obj=obj_cost, # ADDED cost_done=done_cost, # ADDED ) else: return ob, reward, done, dict( cost=cost, reward_forward=forward_reward, # cost = cost, reward_ctrl=-ctrl_cost, reward_contact=-contact_cost, reward_survive=survive_reward, cost_obj=obj_cost, #ADDED cost_done=done_cost, #ADDED ) def _get_obs(self): x = self.sim.data.qpos.flat[0] #ADDED y = self.sim.data.qpos.flat[1] #ADDED #ADDED if x<20: y_off = y - x*np.tan(30/360*2*np.pi) elif x>20 and x<60: y_off = y + (x-40)*np.tan(30/360*2*np.pi) elif x>60 and x<100: y_off = y - (x-80)*np.tan(30/360*2*np.pi) else: y_off = y - 20*np.tan(30/360*2*np.pi) # return np.concatenate([ # self.sim.data.qpos.flat[2:], # self.sim.data.qvel.flat, # # np.clip(self.sim.data.cfrc_ext, -1, 1).flat, # ]) return np.concatenate([ self.sim.data.qpos.flat[2:-42], # size = 3 self.sim.data.qvel.flat[:-36], # size = 6 [x/5], [y_off], # np.clip(self.sim.data.cfrc_ext, -1, 1).flat, ]) # def reset_model(self): # qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1) # qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 # self.set_state(qpos, qvel) # return self._get_obs() def reset_model(self): qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1) qpos[-42:] = self.init_qpos[-42:] qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1 qvel[-36:] = self.init_qvel[-36:] self.set_state(qpos, qvel) return self._get_obs() def viewer_setup(self): self.viewer.cam.distance = self.model.stat.extent * 0.5 ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_swimmer.py ================================================ import numpy as np from gym import utils from gym.envs.mujoco import mujoco_env import os from jinja2 import Template import mujoco_py as mjp class ManyAgentSwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle): def __init__(self, **kwargs): agent_conf = kwargs.get("agent_conf") n_agents = int(agent_conf.split("x")[0]) n_segs_per_agents = int(agent_conf.split("x")[1]) n_segs = n_agents * n_segs_per_agents # Check whether asset file exists already, otherwise create it asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'manyagent_swimmer_{}_agents_each_{}_segments.auto.xml'.format(n_agents, n_segs_per_agents)) # if not os.path.exists(asset_path): print("Auto-Generating Manyagent Swimmer asset with {} segments at {}.".format(n_segs, asset_path)) self._generate_asset(n_segs=n_segs, asset_path=asset_path) #asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p # 'manyagent_swimmer.xml') mujoco_env.MujocoEnv.__init__(self, asset_path, 4) utils.EzPickle.__init__(self) def _generate_asset(self, n_segs, asset_path): template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'manyagent_swimmer.xml.template') with open(template_path, "r") as f: t = Template(f.read()) body_str_template = """ """ body_end_str_template = """ """ body_close_str_template ="\n" actuator_str_template = """\t \n""" body_str = "" for i in range(1,n_segs-1): body_str += body_str_template.format(i, (-1)**(i+1), i) body_str += body_end_str_template.format(n_segs-1) body_str += body_close_str_template*(n_segs-2) actuator_str = "" for i in range(n_segs): actuator_str += actuator_str_template.format(i) rt = t.render(body=body_str, actuators=actuator_str) with open(asset_path, "w") as f: f.write(rt) pass def step(self, a): # ctrl_cost_coeff = 0.0001 # xposbefore = self.sim.data.qpos[0] # self.do_simulation(a, self.frame_skip) # xposafter = self.sim.data.qpos[0] # reward_fwd = (xposafter - xposbefore) / self.dt # reward_ctrl = -ctrl_cost_coeff * np.square(a).sum() # reward = reward_fwd + reward_ctrl ctrl_cost_coeff = 0.0001 xposbefore = self.sim.data.qpos[0] # yposbefore = self.sim.data.qpos[1] self.do_simulation(a, self.frame_skip) # ADDED mjp.functions.mj_rnePostConstraint(self.sim.model, self.sim.data) xposafter = self.sim.data.qpos[0] # yposbefore = self.sim.data.qpos[1] y_wallpos1 = self.data.get_geom_xpos("wall1")[1] y_wallpos2 = self.data.get_geom_xpos("wall2")[1] reward_fwd = (xposafter - xposbefore) / self.dt reward_ctrl = - ctrl_cost_coeff * np.square(a).sum() reward = reward_fwd + reward_ctrl ### ADDED safety stuff yposafter = self.get_body_com("torso")[1] ywall = np.array([-2.3, 2.3]) if xposafter < 20: y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall elif xposafter > 20 and xposafter < 60: y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall elif xposafter > 60 and xposafter < 100: y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall else: y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall obj_cost = (abs(y_walldist) < 1.8).any() * 1.0 # print("y_wallpos1-yposafter", y_wallpos1-yposafter) # print("y_wallpos2-yposafter", y_wallpos2-yposafter) #### ADDED # body_quat = self.data.get_body_xquat('torso') # z_rot = 1 - 2 * ( # body_quat[1] ** 2 + body_quat[2] ** 2) ### normally xx-rotation, not sure what axes mujoco uses # # state = self.state_vector() done = False # ADDED # print("y_walldist", y_walldist) # print("obj_cost", obj_cost) # print("done_cost", done_cost) cost = np.clip(obj_cost, 0, 1) #cost = obj_cost ob = self._get_obs() return ob, reward, done, dict(cost=cost, reward_fwd=reward_fwd, reward_ctrl=reward_ctrl) def _get_obs(self): qpos = self.sim.data.qpos qvel = self.sim.data.qvel #ADDED x = self.sim.data.qpos.flat[0] # ADDED y = self.sim.data.qpos.flat[1] # ADDED # ADDED if x < 20: y_off = y - x * np.tan(30 / 360 * 2 * np.pi) elif x > 20 and x < 60: y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi) elif x > 60 and x < 100: y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi) else: y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi) return np.concatenate([qpos.flat[2:], qvel.flat, [x/5], [y_off]]) def reset_model(self): self.set_state( self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq), self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv) ) return self._get_obs() ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_env.py ================================================ from collections import OrderedDict import os from gym import error, spaces from gym.utils import seeding import numpy as np from os import path import gym try: import mujoco_py except ImportError as e: raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e)) DEFAULT_SIZE = 500 def convert_observation_to_space(observation): if isinstance(observation, dict): space = spaces.Dict(OrderedDict([ (key, convert_observation_to_space(value)) for key, value in observation.items() ])) elif isinstance(observation, np.ndarray): low = np.full(observation.shape, -float('inf'), dtype=np.float32) high = np.full(observation.shape, float('inf'), dtype=np.float32) space = spaces.Box(low, high, dtype=observation.dtype) else: raise NotImplementedError(type(observation), observation) return space class MujocoEnv(gym.Env): """Superclass for all MuJoCo environments. """ def __init__(self, model_path, frame_skip): if model_path.startswith("/"): fullpath = model_path else: fullpath = os.path.join(os.path.dirname(__file__), "./assets", model_path) if not path.exists(fullpath): raise IOError("File %s does not exist" % fullpath) self.frame_skip = frame_skip self.model = mujoco_py.load_model_from_path(fullpath) self.sim = mujoco_py.MjSim(self.model) self.data = self.sim.data self.viewer = None self._viewers = {} self.metadata = { 'render.modes': ['human', 'rgb_array', 'depth_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.init_qpos = self.sim.data.qpos.ravel().copy() self.init_qvel = self.sim.data.qvel.ravel().copy() self._set_action_space() action = self.action_space.sample() observation, _reward, done, _info = self.step(action) # assert not done self._set_observation_space(observation) self.seed() def _set_action_space(self): bounds = self.model.actuator_ctrlrange.copy().astype(np.float32) low, high = bounds.T self.action_space = spaces.Box(low=low, high=high, dtype=np.float32) return self.action_space def _set_observation_space(self, observation): self.observation_space = convert_observation_to_space(observation) return self.observation_space def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] # methods to override: # ---------------------------- def reset_model(self): """ Reset the robot degrees of freedom (qpos and qvel). Implement this in each subclass. """ raise NotImplementedError def viewer_setup(self): """ This method is called when the viewer is initialized. Optionally implement this method, if you need to tinker with camera position and so forth. """ pass # ----------------------------- def reset(self): self.sim.reset() ob = self.reset_model() return ob def set_state(self, qpos, qvel): assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,) old_state = self.sim.get_state() new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel, old_state.act, old_state.udd_state) self.sim.set_state(new_state) self.sim.forward() @property def dt(self): return self.model.opt.timestep * self.frame_skip def do_simulation(self, ctrl, n_frames): self.sim.data.ctrl[:] = ctrl for _ in range(n_frames): self.sim.step() def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE, camera_id=None, camera_name=None): if mode == 'rgb_array': if camera_id is not None and camera_name is not None: raise ValueError("Both `camera_id` and `camera_name` cannot be" " specified at the same time.") no_camera_specified = camera_name is None and camera_id is None if no_camera_specified: camera_name = 'track' if camera_id is None and camera_name in self.model._camera_name2id: camera_id = self.model.camera_name2id(camera_name) self._get_viewer(mode).render(width, height, camera_id=camera_id) # window size used for old mujoco-py: data = self._get_viewer(mode).read_pixels(width, height, depth=False) # original image is upside-down, so flip it return data[::-1, :, :] elif mode == 'depth_array': self._get_viewer(mode).render(width, height) # window size used for old mujoco-py: # Extract depth part of the read_pixels() tuple data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1] # original image is upside-down, so flip it return data[::-1, :] elif mode == 'human': self._get_viewer(mode).render() def close(self): if self.viewer is not None: # self.viewer.finish() self.viewer = None self._viewers = {} def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if mode == 'human': self.viewer = mujoco_py.MjViewer(self.sim) elif mode == 'rgb_array' or mode == 'depth_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1) self.viewer_setup() self._viewers[mode] = self.viewer return self.viewer def get_body_com(self, body_name): return self.data.get_body_xpos(body_name) def state_vector(self): return np.concatenate([ self.sim.data.qpos.flat, self.sim.data.qvel.flat ]) def place_random_objects(self): for i in range(9): random_color_array = np.append(np.random.uniform(0, 1, size=3), 1) random_pos_array = np.append(np.random.uniform(-10., 10., size=2), 0.5) site_id = self.sim.model.geom_name2id('obj' + str(i)) self.sim.model.geom_rgba[site_id] = random_color_array self.sim.model.geom_pos[site_id] = random_pos_array ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_multi.py ================================================ from functools import partial import gym from gym.spaces import Box from gym.wrappers import TimeLimit import numpy as np from .multiagentenv import MultiAgentEnv from .manyagent_ant import ManyAgentAntEnv from .manyagent_swimmer import ManyAgentSwimmerEnv from .obsk import get_joints_at_kdist, get_parts_and_edges, build_obs def env_fn(env, **kwargs) -> MultiAgentEnv: # TODO: this may be a more complex function # env_args = kwargs.get("env_args", {}) return env(**kwargs) # env_REGISTRY = {} # env_REGISTRY["manyagent_ant"] = partial(env_fn, env=ManyAgentAntEnv) # # env_REGISTRY = {} # env_REGISTRY["manyagent_swimmer"] = partial(env_fn, env=ManyAgentSwimmerEnv) # using code from https://github.com/ikostrikov/pytorch-ddpg-naf class NormalizedActions(gym.ActionWrapper): def _action(self, action): action = (action + 1) / 2 action *= (self.action_space.high - self.action_space.low) action += self.action_space.low return action def action(self, action_): return self._action(action_) def _reverse_action(self, action): action -= self.action_space.low action /= (self.action_space.high - self.action_space.low) action = action * 2 - 1 return action class MujocoMulti(MultiAgentEnv): def __init__(self, batch_size=None, **kwargs): super().__init__(batch_size, **kwargs) self.scenario = kwargs["env_args"]["scenario"] # e.g. Ant-v2 self.agent_conf = kwargs["env_args"]["agent_conf"] # e.g. '2x3' self.agent_partitions, self.mujoco_edges, self.mujoco_globals = get_parts_and_edges(self.scenario, self.agent_conf) self.n_agents = len(self.agent_partitions) self.n_actions = max([len(l) for l in self.agent_partitions]) self.obs_add_global_pos = kwargs["env_args"].get("obs_add_global_pos", False) self.agent_obsk = kwargs["env_args"].get("agent_obsk", None) # if None, fully observable else k>=0 implies observe nearest k agents or joints self.agent_obsk_agents = kwargs["env_args"].get("agent_obsk_agents", False) # observe full k nearest agents (True) or just single joints (False) if self.agent_obsk is not None: # print("this is agent_obsk") self.k_categories_label = kwargs["env_args"].get("k_categories") if self.k_categories_label is None: if self.scenario in ["Ant-v2", "manyagent_ant"]: self.k_categories_label = "qpos,qvel,cfrc_ext|qpos" # print("this is agent_obsk --- ant") elif self.scenario in ["Swimmer-v2", "manyagent_swimmer"]: self.k_categories_label = "qpos,qvel|qpos" # print("this is agent_obsk --- swimmer") elif self.scenario in ["Humanoid-v2", "HumanoidStandup-v2"]: self.k_categories_label = "qpos,qvel,cfrc_ext,cvel,cinert,qfrc_actuator|qpos" elif self.scenario in ["Reacher-v2"]: self.k_categories_label = "qpos,qvel,fingertip_dist|qpos" elif self.scenario in ["coupled_half_cheetah"]: self.k_categories_label = "qpos,qvel,ten_J,ten_length,ten_velocity|" else: self.k_categories_label = "qpos,qvel|qpos" k_split = self.k_categories_label.split("|") self.k_categories = [k_split[k if k < len(k_split) else -1].split(",") for k in range(self.agent_obsk + 1)] self.global_categories_label = kwargs["env_args"].get("global_categories") self.global_categories = self.global_categories_label.split( ",") if self.global_categories_label is not None else [] if self.agent_obsk is not None: self.k_dicts = [get_joints_at_kdist(agent_id, self.agent_partitions, self.mujoco_edges, k=self.agent_obsk, kagents=False, ) for agent_id in range(self.n_agents)] # load scenario from script self.episode_limit = self.args.episode_limit self.env_version = kwargs["env_args"].get("env_version", 2) if self.env_version == 2: if self.scenario in ["manyagent_ant"]: from .manyagent_ant import ManyAgentAntEnv as this_env elif self.scenario in ["manyagent_swimmer"]: from .manyagent_swimmer import ManyAgentSwimmerEnv as this_env elif self.scenario in ["coupled_half_cheetah"]: from .coupled_half_cheetah import CoupledHalfCheetah as this_env elif self.scenario in ["HalfCheetah-v2"]: from .half_cheetah import HalfCheetahEnv as this_env # print("HalfCheetahEnv1111") Hopper-v2 # elif self.scenario in ["Hopper-v2"]: from .hopper import HopperEnv as this_env # print("Hopper-v2") elif self.scenario in ["Humanoid-v2"]: from .humanoid import HumanoidEnv as this_env # print("Hopper-v2") elif self.scenario in ["Ant-v2"]: from .ant import AntEnv as this_env else: raise NotImplementedError('Custom env not implemented!') # print("self.scenario", self.scenario) # aaa= this_env(**kwargs["env_args"]) # print("aaa", aaa) self.wrapped_env = NormalizedActions( TimeLimit(this_env(**kwargs["env_args"]), max_episode_steps=self.episode_limit)) # try: # self.wrapped_env = NormalizedActions(gym.make(self.scenario)) # print("this managent1") # except gym.error.Error: # if self.scenario in ["manyagent_ant"]: # from .manyagent_ant import ManyAgentAntEnv as this_env # elif self.scenario in ["manyagent_swimmer"]: # from .manyagent_swimmer import ManyAgentSwimmerEnv as this_env # elif self.scenario in ["coupled_half_cheetah"]: # from .coupled_half_cheetah import CoupledHalfCheetah as this_env # elif self.scenario in ["HalfCheetah-v2"]: # from .half_cheetah import HalfCheetahEnv as this_env # print("HalfCheetahEnv1111") # else: # raise NotImplementedError('Custom env not implemented!') # self.wrapped_env = NormalizedActions( # TimeLimit(this_env(**kwargs["env_args"]), max_episode_steps=self.episode_limit)) # if self.scenario == "manyagent_swimmer": # env_REGISTRY = {} # env_REGISTRY["manyagent_swimmer"] = partial(env_fn, env=ManyAgentSwimmerEnv) # print("this is swimmer 2") # elif self.scenario == "manyagent_ant": # env_REGISTRY = {} # env_REGISTRY["manyagent_ant"] = partial(env_fn, env=ManyAgentAntEnv) # print("this managent2") # self.wrapped_env = NormalizedActions( # TimeLimit(partial(env_REGISTRY[self.scenario], **kwargs["env_args"])(), # max_episode_steps=self.episode_limit)) else: assert False, "not implemented!" self.timelimit_env = self.wrapped_env.env self.timelimit_env._max_episode_steps = self.episode_limit self.env = self.timelimit_env.env self.timelimit_env.reset() self.obs_size = self.get_obs_size() self.share_obs_size = self.get_state_size() # COMPATIBILITY self.n = self.n_agents # self.observation_space = [Box(low=np.array([-10]*self.n_agents), high=np.array([10]*self.n_agents)) for _ in range(self.n_agents)] self.observation_space = [Box(low=-10, high=10, shape=(self.obs_size,)) for _ in range(self.n_agents)] self.share_observation_space = [Box(low=-10, high=10, shape=(self.share_obs_size,)) for _ in range(self.n_agents)] acdims = [len(ap) for ap in self.agent_partitions] self.action_space = tuple([Box(self.env.action_space.low[sum(acdims[:a]):sum(acdims[:a + 1])], self.env.action_space.high[sum(acdims[:a]):sum(acdims[:a + 1])]) for a in range(self.n_agents)]) pass def step(self, actions): # need to remove dummy actions that arise due to unequal action vector sizes across agents flat_actions = np.concatenate([actions[i][:self.action_space[i].low.shape[0]] for i in range(self.n_agents)]) obs_n, reward_n, done_n, info_n = self.wrapped_env.step(flat_actions) self.steps += 1 info = {} info.update(info_n) # if done_n: # if self.steps < self.episode_limit: # info["episode_limit"] = False # the next state will be masked out # else: # info["episode_limit"] = True # the next state will not be masked out if done_n: if self.steps < self.episode_limit: info["bad_transition"] = False # the next state will be masked out else: info["bad_transition"] = True # the next state will not be masked out # return reward_n, done_n, info rewards = [[reward_n]] * self.n_agents # print("self.n_agents", self.n_agents) info["cost"] = [[info["cost"]]] * self.n_agents dones = [done_n] * self.n_agents infos = [info for _ in range(self.n_agents)] return self.get_obs(), self.get_state(), rewards, dones, infos, self.get_avail_actions() def get_obs(self): """ Returns all agent observat3ions in a list """ state = self.env._get_obs() obs_n = [] for a in range(self.n_agents): agent_id_feats = np.zeros(self.n_agents, dtype=np.float32) agent_id_feats[a] = 1.0 # obs_n.append(self.get_obs_agent(a)) # obs_n.append(np.concatenate([state, self.get_obs_agent(a), agent_id_feats])) # obs_n.append(np.concatenate([self.get_obs_agent(a), agent_id_feats])) obs_i = np.concatenate([state, agent_id_feats]) obs_i = (obs_i - np.mean(obs_i)) / np.std(obs_i) obs_n.append(obs_i) return obs_n def get_obs_agent(self, agent_id): if self.agent_obsk is None: return self.env._get_obs() else: # return build_obs(self.env, # self.k_dicts[agent_id], # self.k_categories, # self.mujoco_globals, # self.global_categories, # vec_len=getattr(self, "obs_size", None)) return build_obs(self.env, self.k_dicts[agent_id], self.k_categories, self.mujoco_globals, self.global_categories) def get_obs_size(self): """ Returns the shape of the observation """ if self.agent_obsk is None: return self.get_obs_agent(0).size else: return len(self.get_obs()[0]) # return max([len(self.get_obs_agent(agent_id)) for agent_id in range(self.n_agents)]) def get_state(self, team=None): # TODO: May want global states for different teams (so cannot see what the other team is communicating e.g.) state = self.env._get_obs() share_obs = [] for a in range(self.n_agents): agent_id_feats = np.zeros(self.n_agents, dtype=np.float32) agent_id_feats[a] = 1.0 # share_obs.append(np.concatenate([state, self.get_obs_agent(a), agent_id_feats])) state_i = np.concatenate([state, agent_id_feats]) state_i = (state_i - np.mean(state_i)) / np.std(state_i) share_obs.append(state_i) return share_obs def get_state_size(self): """ Returns the shape of the state""" return len(self.get_state()[0]) def get_avail_actions(self): # all actions are always available return np.ones(shape=(self.n_agents, self.n_actions,)) def get_avail_agent_actions(self, agent_id): """ Returns the available actions for agent_id """ return np.ones(shape=(self.n_actions,)) def get_total_actions(self): """ Returns the total number of actions an agent could ever take """ return self.n_actions # CAREFUL! - for continuous dims, this is action space dim rather # return self.env.action_space.shape[0] def get_stats(self): return {} # TODO: Temp hack def get_agg_stats(self, stats): return {} def reset(self, **kwargs): """ Returns initial observations and states""" self.steps = 0 self.timelimit_env.reset() return self.get_obs(), self.get_state(), self.get_avail_actions() def render(self, **kwargs): self.env.render(**kwargs) def close(self): pass def seed(self, args): pass def get_env_info(self): env_info = {"state_shape": self.get_state_size(), "obs_shape": self.get_obs_size(), "n_actions": self.get_total_actions(), "n_agents": self.n_agents, "episode_limit": self.episode_limit, "action_spaces": self.action_space, "actions_dtype": np.float32, "normalise_actions": False } return env_info ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/multiagentenv.py ================================================ from collections import namedtuple import numpy as np def convert(dictionary): return namedtuple('GenericDict', dictionary.keys())(**dictionary) class MultiAgentEnv(object): def __init__(self, batch_size=None, **kwargs): # Unpack arguments from sacred args = kwargs["env_args"] if isinstance(args, dict): args = convert(args) self.args = args if getattr(args, "seed", None) is not None: self.seed = args.seed self.rs = np.random.RandomState(self.seed) # initialise numpy random state def step(self, actions): """ Returns reward, terminated, info """ raise NotImplementedError def get_obs(self): """ Returns all agent observations in a list """ raise NotImplementedError def get_obs_agent(self, agent_id): """ Returns observation for agent_id """ raise NotImplementedError def get_obs_size(self): """ Returns the shape of the observation """ raise NotImplementedError def get_state(self): raise NotImplementedError def get_state_size(self): """ Returns the shape of the state""" raise NotImplementedError def get_avail_actions(self): raise NotImplementedError def get_avail_agent_actions(self, agent_id): """ Returns the available actions for agent_id """ raise NotImplementedError def get_total_actions(self): """ Returns the total number of actions an agent could ever take """ # TODO: This is only suitable for a discrete 1 dimensional action space for each agent raise NotImplementedError def get_stats(self): raise NotImplementedError # TODO: Temp hack def get_agg_stats(self, stats): return {} def reset(self): """ Returns initial observations and states""" raise NotImplementedError def render(self): raise NotImplementedError def close(self): raise NotImplementedError def seed(self, seed): raise NotImplementedError def get_env_info(self): env_info = {"state_shape": self.get_state_size(), "obs_shape": self.get_obs_size(), "n_actions": self.get_total_actions(), "n_agents": self.n_agents, "episode_limit": self.episode_limit} return env_info ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/obsk.py ================================================ import itertools import numpy as np from copy import deepcopy class Node(): def __init__(self, label, qpos_ids, qvel_ids, act_ids, body_fn=None, bodies=None, extra_obs=None, tendons=None): self.label = label self.qpos_ids = qpos_ids self.qvel_ids = qvel_ids self.act_ids = act_ids self.bodies = bodies self.extra_obs = {} if extra_obs is None else extra_obs self.body_fn = body_fn self.tendons = tendons pass def __str__(self): return self.label def __repr__(self): return self.label class HyperEdge(): def __init__(self, *edges): self.edges = set(edges) def __contains__(self, item): return item in self.edges def __str__(self): return "HyperEdge({})".format(self.edges) def __repr__(self): return "HyperEdge({})".format(self.edges) def get_joints_at_kdist(agent_id, agent_partitions, hyperedges, k=0, kagents=False,): """ Identify all joints at distance <= k from agent agent_id :param agent_id: id of agent to be considered :param agent_partitions: list of joint tuples in order of agentids :param edges: list of tuples (joint1, joint2) :param k: kth degree :param kagents: True (observe all joints of an agent if a single one is) or False (individual joint granularity) :return: dict with k as key, and list of joints at that distance """ assert not kagents, "kagents not implemented!" agent_joints = agent_partitions[agent_id] def _adjacent(lst, kagents=False): # return all sets adjacent to any element in lst ret = set([]) for l in lst: ret = ret.union(set(itertools.chain(*[e.edges.difference({l}) for e in hyperedges if l in e]))) return ret seen = set([]) new = set([]) k_dict = {} for _k in range(k+1): if not _k: new = set(agent_joints) else: print(hyperedges) new = _adjacent(new) - seen seen = seen.union(new) k_dict[_k] = sorted(list(new), key=lambda x:x.label) return k_dict def build_obs(env, k_dict, k_categories, global_dict, global_categories, vec_len=None): """Given a k_dict from get_joints_at_kdist, extract observation vector. :param k_dict: k_dict :param qpos: qpos numpy array :param qvel: qvel numpy array :param vec_len: if None no padding, else zero-pad to vec_len :return: observation vector """ # TODO: This needs to be fixed, it was designed for half-cheetah only! #if add_global_pos: # obs_qpos_lst.append(global_qpos) # obs_qvel_lst.append(global_qvel) body_set_dict = {} obs_lst = [] # Add parts attributes for k in sorted(list(k_dict.keys())): cats = k_categories[k] for _t in k_dict[k]: for c in cats: if c in _t.extra_obs: items = _t.extra_obs[c](env).tolist() obs_lst.extend(items if isinstance(items, list) else [items]) else: if c in ["qvel","qpos"]: # this is a "joint position/velocity" item items = getattr(env.sim.data, c)[getattr(_t, "{}_ids".format(c))] obs_lst.extend(items if isinstance(items, list) else [items]) elif c in ["qfrc_actuator"]: # this is a "vel position" item items = getattr(env.sim.data, c)[getattr(_t, "{}_ids".format("qvel"))] obs_lst.extend(items if isinstance(items, list) else [items]) elif c in ["cvel", "cinert", "cfrc_ext"]: # this is a "body position" item if _t.bodies is not None: for b in _t.bodies: if c not in body_set_dict: body_set_dict[c] = set() if b not in body_set_dict[c]: items = getattr(env.sim.data, c)[b].tolist() items = getattr(_t, "body_fn", lambda _id,x:x)(b, items) obs_lst.extend(items if isinstance(items, list) else [items]) body_set_dict[c].add(b) # Add global attributes body_set_dict = {} for c in global_categories: if c in ["qvel", "qpos"]: # this is a "joint position" item for j in global_dict.get("joints", []): items = getattr(env.sim.data, c)[getattr(j, "{}_ids".format(c))] obs_lst.extend(items if isinstance(items, list) else [items]) else: for b in global_dict.get("bodies", []): if c not in body_set_dict: body_set_dict[c] = set() if b not in body_set_dict[c]: obs_lst.extend(getattr(env.sim.data, c)[b].tolist()) body_set_dict[c].add(b) if vec_len is not None: pad = np.array((vec_len - len(obs_lst))*[0]) if len(pad): return np.concatenate([np.array(obs_lst), pad]) return np.array(obs_lst) def build_actions(agent_partitions, k_dict): # Composes agent actions output from networks # into coherent joint action vector to be sent to the env. pass def get_parts_and_edges(label, partitioning): if label in ["half_cheetah", "HalfCheetah-v2"]: # define Mujoco graph bthigh = Node("bthigh", -6, -6, 0) bshin = Node("bshin", -5, -5, 1) bfoot = Node("bfoot", -4, -4, 2) fthigh = Node("fthigh", -3, -3, 3) fshin = Node("fshin", -2, -2, 4) ffoot = Node("ffoot", -1, -1, 5) edges = [HyperEdge(bfoot, bshin), HyperEdge(bshin, bthigh), HyperEdge(bthigh, fthigh), HyperEdge(fthigh, fshin), HyperEdge(fshin, ffoot)] root_x = Node("root_x", 0, 0, -1, extra_obs={"qpos": lambda env: np.array([])}) root_z = Node("root_z", 1, 1, -1) root_y = Node("root_y", 2, 2, -1) globals = {"joints":[root_x, root_y, root_z]} if partitioning == "2x3": parts = [(bfoot, bshin, bthigh), (ffoot, fshin, fthigh)] elif partitioning == "6x1": parts = [(bfoot,), (bshin,), (bthigh,), (ffoot,), (fshin,), (fthigh,)] elif partitioning == "3x2": parts = [(bfoot, bshin,), (bthigh, ffoot,), (fshin, fthigh,)] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Ant-v2"]: # define Mujoco graph torso = 1 front_left_leg = 2 aux_1 = 3 ankle_1 = 4 front_right_leg = 5 aux_2 = 6 ankle_2 = 7 back_leg = 8 aux_3 = 9 ankle_3 = 10 right_back_leg = 11 aux_4 = 12 ankle_4 = 13 hip1 = Node("hip1", -8, -8, 2, bodies=[torso, front_left_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) # ankle1 = Node("ankle1", -7, -7, 3, bodies=[front_left_leg, aux_1, ankle_1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, hip2 = Node("hip2", -6, -6, 4, bodies=[torso, front_right_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, ankle2 = Node("ankle2", -5, -5, 5, bodies=[front_right_leg, aux_2, ankle_2], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, hip3 = Node("hip3", -4, -4, 6, bodies=[torso, back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, ankle3 = Node("ankle3", -3, -3, 7, bodies=[back_leg, aux_3, ankle_3], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, hip4 = Node("hip4", -2, -2, 0, bodies=[torso, right_back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, ankle4 = Node("ankle4", -1, -1, 1, bodies=[right_back_leg, aux_4, ankle_4], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, edges = [HyperEdge(ankle4, hip4), HyperEdge(ankle1, hip1), HyperEdge(ankle2, hip2), HyperEdge(ankle3, hip3), HyperEdge(hip4, hip1, hip2, hip3), ] free_joint = Node("free", 0, 0, -1, extra_obs={"qpos": lambda env: env.sim.data.qpos[:7], "qvel": lambda env: env.sim.data.qvel[:6], "cfrc_ext": lambda env: np.clip(env.sim.data.cfrc_ext[0:1], -1, 1)}) globals = {"joints": [free_joint]} if partitioning == "2x4": # neighbouring legs together parts = [(hip1, ankle1, hip2, ankle2), (hip3, ankle3, hip4, ankle4)] elif partitioning == "2x4d": # diagonal legs together parts = [(hip1, ankle1, hip3, ankle3), (hip2, ankle2, hip4, ankle4)] elif partitioning == "4x2": parts = [(hip1, ankle1), (hip2, ankle2), (hip3, ankle3), (hip4, ankle4)] elif partitioning == "8x1": parts = [(hip1,), (ankle1,), (hip2,), (ankle2,), (hip3,), (ankle3,), (hip4,), (ankle4,)] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Hopper-v2"]: # define Mujoco-Graph thigh_joint = Node("thigh_joint", -3, -3, 0, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[-3]]), -10, 10)}) leg_joint = Node("leg_joint", -2, -2, 1, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[-2]]), -10, 10)}) foot_joint = Node("foot_joint", -1, -1, 2, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[-1]]), -10, 10)}) edges = [HyperEdge(foot_joint, leg_joint), HyperEdge(leg_joint, thigh_joint)] root_x = Node("root_x", 0, 0, -1, extra_obs={"qpos": lambda env: np.array([]), "qvel": lambda env: np.clip(np.array([env.sim.data.qvel[1]]), -10, 10)}) root_z = Node("root_z", 1, 1, -1, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[1]]), -10, 10)}) root_y = Node("root_y", 2, 2, -1, extra_obs={"qvel": lambda env: np.clip(np.array([env.sim.data.qvel[2]]), -10, 10)}) globals = {"joints":[root_x, root_y, root_z]} if partitioning == "3x1": parts = [(thigh_joint,), (leg_joint,), (foot_joint,)] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Humanoid-v2", "HumanoidStandup-v2"]: # define Mujoco-Graph abdomen_y = Node("abdomen_y", -16, -16, 0) # act ordering bug in env -- double check! abdomen_z = Node("abdomen_z", -17, -17, 1) abdomen_x = Node("abdomen_x", -15, -15, 2) right_hip_x = Node("right_hip_x", -14, -14, 3) right_hip_z = Node("right_hip_z", -13, -13, 4) right_hip_y = Node("right_hip_y", -12, -12, 5) right_knee = Node("right_knee", -11, -11, 6) left_hip_x = Node("left_hip_x", -10, -10, 7) left_hip_z = Node("left_hip_z", -9, -9, 8) left_hip_y = Node("left_hip_y", -8, -8, 9) left_knee = Node("left_knee", -7, -7, 10) right_shoulder1 = Node("right_shoulder1", -6, -6, 11) right_shoulder2 = Node("right_shoulder2", -5, -5, 12) right_elbow = Node("right_elbow", -4, -4, 13) left_shoulder1 = Node("left_shoulder1", -3, -3, 14) left_shoulder2 = Node("left_shoulder2", -2, -2, 15) left_elbow = Node("left_elbow", -1, -1, 16) edges = [HyperEdge(abdomen_x, abdomen_y, abdomen_z), HyperEdge(right_hip_x, right_hip_y, right_hip_z), HyperEdge(left_hip_x, left_hip_y, left_hip_z), HyperEdge(left_elbow, left_shoulder1, left_shoulder2), HyperEdge(right_elbow, right_shoulder1, right_shoulder2), HyperEdge(left_knee, left_hip_x, left_hip_y, left_hip_z), HyperEdge(right_knee, right_hip_x, right_hip_y, right_hip_z), HyperEdge(left_shoulder1, left_shoulder2, abdomen_x, abdomen_y, abdomen_z), HyperEdge(right_shoulder1, right_shoulder2, abdomen_x, abdomen_y, abdomen_z), HyperEdge(abdomen_x, abdomen_y, abdomen_z, left_hip_x, left_hip_y, left_hip_z), HyperEdge(abdomen_x, abdomen_y, abdomen_z, right_hip_x, right_hip_y, right_hip_z), ] globals = {} if partitioning == "9|8": # 17 in total, so one action is a dummy (to be handled by pymarl) # isolate upper and lower body parts = [(left_shoulder1, left_shoulder2, abdomen_x, abdomen_y, abdomen_z, right_shoulder1, right_shoulder2, right_elbow, left_elbow), (left_hip_x, left_hip_y, left_hip_z, right_hip_x, right_hip_y, right_hip_z, right_knee, left_knee)] # TODO: There could be tons of decompositions here elif partitioning == "17x1": # 17 in total, so one action is a dummy (to be handled by pymarl) # isolate upper and lower body parts = [(left_shoulder1,), (left_shoulder2,), (abdomen_x,), (abdomen_y,), (abdomen_z,), (right_shoulder1,), (right_shoulder2,), (right_elbow,), (left_elbow,), (left_hip_x,), (left_hip_y,), (left_hip_z,), (right_hip_x,), (right_hip_y,), (right_hip_z,), (right_knee,), (left_knee,)] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Reacher-v2"]: # define Mujoco-Graph body0 = 1 body1 = 2 fingertip = 3 joint0 = Node("joint0", -4, -4, 0, bodies=[body0, body1], extra_obs={"qpos":(lambda env:np.array([np.sin(env.sim.data.qpos[-4]), np.cos(env.sim.data.qpos[-4])]))}) joint1 = Node("joint1", -3, -3, 1, bodies=[body1, fingertip], extra_obs={"fingertip_dist":(lambda env:env.get_body_com("fingertip") - env.get_body_com("target")), "qpos":(lambda env:np.array([np.sin(env.sim.data.qpos[-3]), np.cos(env.sim.data.qpos[-3])]))}) edges = [HyperEdge(joint0, joint1)] worldbody = 0 target = 4 target_x = Node("target_x", -2, -2, -1, extra_obs={"qvel":(lambda env:np.array([]))}) target_y = Node("target_y", -1, -1, -1, extra_obs={"qvel":(lambda env:np.array([]))}) globals = {"bodies":[worldbody, target], "joints":[target_x, target_y]} if partitioning == "2x1": # isolate upper and lower arms parts = [(joint0,), (joint1,)] # TODO: There could be tons of decompositions here else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Swimmer-v2"]: # define Mujoco-Graph joint0 = Node("rot2", -2, -2, 0) # TODO: double-check ids joint1 = Node("rot3", -1, -1, 1) edges = [HyperEdge(joint0, joint1)] globals = {} if partitioning == "2x1": # isolate upper and lower body parts = [(joint0,), (joint1,)] # TODO: There could be tons of decompositions here else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["Walker2d-v2"]: # define Mujoco-Graph thigh_joint = Node("thigh_joint", -6, -6, 0) leg_joint = Node("leg_joint", -5, -5, 1) foot_joint = Node("foot_joint", -4, -4, 2) thigh_left_joint = Node("thigh_left_joint", -3, -3, 3) leg_left_joint = Node("leg_left_joint", -2, -2, 4) foot_left_joint = Node("foot_left_joint", -1, -1, 5) edges = [HyperEdge(foot_joint, leg_joint), HyperEdge(leg_joint, thigh_joint), HyperEdge(foot_left_joint, leg_left_joint), HyperEdge(leg_left_joint, thigh_left_joint), HyperEdge(thigh_joint, thigh_left_joint) ] globals = {} if partitioning == "2x3": # isolate upper and lower body parts = [(foot_joint, leg_joint, thigh_joint), (foot_left_joint, leg_left_joint, thigh_left_joint,)] # TODO: There could be tons of decompositions here else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["coupled_half_cheetah"]: # define Mujoco graph tendon = 0 bthigh = Node("bthigh", -6, -6, 0, tendons=[tendon], extra_obs = {"ten_J": lambda env: env.sim.data.ten_J[tendon], "ten_length": lambda env: env.sim.data.ten_length, "ten_velocity": lambda env: env.sim.data.ten_velocity}) bshin = Node("bshin", -5, -5, 1) bfoot = Node("bfoot", -4, -4, 2) fthigh = Node("fthigh", -3, -3, 3) fshin = Node("fshin", -2, -2, 4) ffoot = Node("ffoot", -1, -1, 5) bthigh2 = Node("bthigh2", -6, -6, 0, tendons=[tendon], extra_obs={"ten_J": lambda env: env.sim.data.ten_J[tendon], "ten_length": lambda env: env.sim.data.ten_length, "ten_velocity": lambda env: env.sim.data.ten_velocity}) bshin2 = Node("bshin2", -5, -5, 1) bfoot2 = Node("bfoot2", -4, -4, 2) fthigh2 = Node("fthigh2", -3, -3, 3) fshin2 = Node("fshin2", -2, -2, 4) ffoot2 = Node("ffoot2", -1, -1, 5) edges = [HyperEdge(bfoot, bshin), HyperEdge(bshin, bthigh), HyperEdge(bthigh, fthigh), HyperEdge(fthigh, fshin), HyperEdge(fshin, ffoot), HyperEdge(bfoot2, bshin2), HyperEdge(bshin2, bthigh2), HyperEdge(bthigh2, fthigh2), HyperEdge(fthigh2, fshin2), HyperEdge(fshin2, ffoot2) ] globals = {} root_x = Node("root_x", 0, 0, -1, extra_obs={"qpos": lambda env: np.array([])}) root_z = Node("root_z", 1, 1, -1) root_y = Node("root_y", 2, 2, -1) globals = {"joints":[root_x, root_y, root_z]} if partitioning == "1p1": parts = [(bfoot, bshin, bthigh, ffoot, fshin, fthigh), (bfoot2, bshin2, bthigh2, ffoot2, fshin2, fthigh2) ] else: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) return parts, edges, globals elif label in ["manyagent_swimmer"]: # Generate asset file try: n_agents = int(partitioning.split("x")[0]) n_segs_per_agents = int(partitioning.split("x")[1]) n_segs = n_agents * n_segs_per_agents except Exception as e: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) # Note: Default Swimmer corresponds to n_segs = 3 # define Mujoco-Graph joints = [Node("rot{:d}".format(i), -n_segs + i, -n_segs + i, i) for i in range(0, n_segs)] edges = [HyperEdge(joints[i], joints[i+1]) for i in range(n_segs-1)] globals = {} parts = [tuple(joints[i * n_segs_per_agents:(i + 1) * n_segs_per_agents]) for i in range(n_agents)] return parts, edges, globals elif label in ["manyagent_ant"]: # TODO: FIX! # Generate asset file try: n_agents = int(partitioning.split("x")[0]) n_segs_per_agents = int(partitioning.split("x")[1]) n_segs = n_agents * n_segs_per_agents except Exception as e: raise Exception("UNKNOWN partitioning config: {}".format(partitioning)) # # define Mujoco graph # torso = 1 # front_left_leg = 2 # aux_1 = 3 # ankle_1 = 4 # right_back_leg = 11 # aux_4 = 12 # ankle_4 = 13 # # off = -4*(n_segs-1) # hip1 = Node("hip1", -4-off, -4-off, 2, bodies=[torso, front_left_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) # # ankle1 = Node("ankle1", -3-off, -3-off, 3, bodies=[front_left_leg, aux_1, ankle_1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, # hip4 = Node("hip4", -2-off, -2-off, 0, bodies=[torso, right_back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, # ankle4 = Node("ankle4", -1-off, -1-off, 1, bodies=[right_back_leg, aux_4, ankle_4], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#, # # edges = [HyperEdge(ankle4, hip4), # HyperEdge(ankle1, hip1), # HyperEdge(hip4, hip1), # ] edges = [] joints = [] for si in range(n_segs): torso = 1 + si*7 front_right_leg = 2 + si*7 aux1 = 3 + si*7 ankle1 = 4 + si*7 back_leg = 5 + si*7 aux2 = 6 + si*7 ankle2 = 7 + si*7 off = -4 * (n_segs - 1 - si) hip1n = Node("hip1_{:d}".format(si), -4-off, -4-off, 2+4*si, bodies=[torso, front_right_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) ankle1n = Node("ankle1_{:d}".format(si), -3-off, -3-off, 3+4*si, bodies=[front_right_leg, aux1, ankle1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) hip2n = Node("hip2_{:d}".format(si), -2-off, -2-off, 0+4*si, bodies=[torso, back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) ankle2n = Node("ankle2_{:d}".format(si), -1-off, -1-off, 1+4*si, bodies=[back_leg, aux2, ankle2], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) edges += [HyperEdge(ankle1n, hip1n), HyperEdge(ankle2n, hip2n), HyperEdge(hip1n, hip2n)] if si: edges += [HyperEdge(hip1m, hip2m, hip1n, hip2n)] hip1m = deepcopy(hip1n) hip2m = deepcopy(hip2n) joints.append([hip1n, ankle1n, hip2n, ankle2n]) free_joint = Node("free", 0, 0, -1, extra_obs={"qpos": lambda env: env.sim.data.qpos[:7], "qvel": lambda env: env.sim.data.qvel[:6], "cfrc_ext": lambda env: np.clip(env.sim.data.cfrc_ext[0:1], -1, 1)}) globals = {"joints": [free_joint]} parts = [[x for sublist in joints[i * n_segs_per_agents:(i + 1) * n_segs_per_agents] for x in sublist] for i in range(n_agents)] return parts, edges, globals ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/test.py ================================================ from safety_multiagent_mujoco.mujoco_multi import MujocoMulti import numpy as np import time def main(): # Swimmer # env_args = {"scenario": "manyagent_swimmer", # "agent_conf": "10x2", # "agent_obsk": 1, # "episode_limit": 1000} # coupled_half_cheetah # env_args = {"scenario": "coupled_half_cheetah", # "agent_conf": "1p1", # "agent_obsk": 1, # "episode_limit": 1000} # ANT 4 # env_args = {"scenario": "manyagent_ant", # "agent_conf": "3x2", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "manyagent_ant", # "agent_conf": "1x1", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "manyagent_swimmer", # "agent_conf": "10x2", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "manyagent_swimmer", # "agent_conf": "4x2", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "HalfCheetah-v2", # "agent_conf": "2x3", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Hopper-v2", # "agent_conf": "3x1", # "agent_obsk": 1, # "episode_limit": 1000} env_args = {"scenario": "Humanoid-v2", "agent_conf": "9|8", "agent_obsk": 1, "episode_limit": 1000} # env_args = {"scenario": "Humanoid-v2", # "agent_conf": "17x1", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Ant-v2", # "agent_conf": "2x4", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Ant-v2", # "agent_conf": "2x4d", # "agent_obsk": 1, # "episode_limit": 1000} # env_args = {"scenario": "Ant-v2", # "agent_conf": "4x2", # "agent_obsk": 1, # "episode_limit": 1000} env = MujocoMulti(env_args=env_args) env_info = env.get_env_info() n_actions = env_info["n_actions"] n_agents = env_info["n_agents"] n_episodes = 10 for e in range(n_episodes): ob=env.reset() terminated = False episode_reward = 0 while not terminated: obs = env.get_obs() state = env.get_state() actions = [] for agent_id in range(n_agents): avail_actions = env.get_avail_agent_actions(agent_id) avail_actions_ind = np.nonzero(avail_actions)[0] action = np.random.uniform(-10, 10.0, n_actions) actions.append(action) # reward, terminated, _ = env.step(actions) # print("env.step(actions): ", env.step(actions)) get_obs, get_state, reward, dones, infos, get_avail_actions= env.step(actions) # episode_reward += reward # print("reward: ", reward) cost_x= [[item['cost']] for item in infos] print("cost_x:", cost_x) print("reward:", reward) # time.sleep(0.1) env.render() # print("Total reward in episode {} = {}".format(e, episode_reward)) env.close() if __name__ == "__main__": main() """ infos[cost]: [{'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964, 'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}, {'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964, 'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}, {'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964, 'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}] """ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/runner/__init__.py ================================================ from mappo_lagrangian.runner import separated __all__=[ "separated" ] ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/runner/separated/__init__.py ================================================ from mappo_lagrangian.runner.separated import base_runner __all__=[ "base_runner" ] ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/runner/separated/base_runner.py ================================================ import time import wandb import os import numpy as np from itertools import chain import torch from tensorboardX import SummaryWriter from mappo_lagrangian.utils.separated_buffer import SeparatedReplayBuffer from mappo_lagrangian.utils.util import update_linear_schedule def _t2n(x): return x.detach().cpu().numpy() class Runner(object): def __init__(self, config): self.all_args = config['all_args'] self.envs = config['envs'] self.eval_envs = config['eval_envs'] self.device = config['device'] self.num_agents = config['num_agents'] # parameters self.env_name = self.all_args.env_name self.algorithm_name = self.all_args.algorithm_name self.experiment_name = self.all_args.experiment_name self.use_centralized_V = self.all_args.use_centralized_V self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state self.num_env_steps = self.all_args.num_env_steps self.episode_length = self.all_args.episode_length self.n_rollout_threads = self.all_args.n_rollout_threads self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads self.use_linear_lr_decay = self.all_args.use_linear_lr_decay self.hidden_size = self.all_args.hidden_size self.use_wandb = self.all_args.use_wandb self.use_render = self.all_args.use_render self.recurrent_N = self.all_args.recurrent_N self.use_single_network = self.all_args.use_single_network # interval self.save_interval = self.all_args.save_interval self.use_eval = self.all_args.use_eval self.eval_interval = self.all_args.eval_interval self.log_interval = self.all_args.log_interval # dir self.model_dir = self.all_args.model_dir if self.use_render: import imageio self.run_dir = config["run_dir"] self.gif_dir = str(self.run_dir / 'gifs') if not os.path.exists(self.gif_dir): os.makedirs(self.gif_dir) else: if self.use_wandb: self.save_dir = str(wandb.run.dir) else: self.run_dir = config["run_dir"] self.log_dir = str(self.run_dir / 'logs') if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.writter = SummaryWriter(self.log_dir) self.save_dir = str(self.run_dir / 'models') if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) from mappo_lagrangian.algorithms.r_mappo.r_mappo import R_MAPPO as TrainAlgo from mappo_lagrangian.algorithms.r_mappo.algorithm.rMAPPOPolicy import R_MAPPOPolicy as Policy print("share_observation_space: ", self.envs.share_observation_space) print("observation_space: ", self.envs.observation_space) print("action_space: ", self.envs.action_space) self.policy = [] for agent_id in range(self.num_agents): share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id] # policy network po = Policy(self.all_args, self.envs.observation_space[agent_id], share_observation_space, self.envs.action_space[agent_id], device = self.device) self.policy.append(po) if self.model_dir is not None: self.restore() self.trainer = [] self.buffer = [] for agent_id in range(self.num_agents): # algorithm tr = TrainAlgo(self.all_args, self.policy[agent_id], device = self.device) # buffer share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id] bu = SeparatedReplayBuffer(self.all_args, self.envs.observation_space[agent_id], share_observation_space, self.envs.action_space[agent_id]) self.buffer.append(bu) self.trainer.append(tr) def run(self): raise NotImplementedError def warmup(self): raise NotImplementedError def collect(self, step): raise NotImplementedError def insert(self, data): raise NotImplementedError @torch.no_grad() def compute(self): for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1], self.buffer[agent_id].rnn_states_critic[-1], self.buffer[agent_id].masks[-1]) next_value = _t2n(next_value) self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer) def train(self): # have modified for SAD_PPO train_infos = [] for agent_id in torch.randperm(self.num_agents): self.trainer[agent_id].prep_training() train_info = self.trainer[agent_id].train(self.buffer[agent_id]) train_infos.append(train_info) self.buffer[agent_id].after_update() return train_infos def save(self): for agent_id in range(self.num_agents): if self.use_single_network: policy_model = self.trainer[agent_id].policy.model torch.save(policy_model.state_dict(), str(self.save_dir) + "/model_agent" + str(agent_id) + ".pt") else: policy_actor = self.trainer[agent_id].policy.actor torch.save(policy_actor.state_dict(), str(self.save_dir) + "/actor_agent" + str(agent_id) + ".pt") policy_critic = self.trainer[agent_id].policy.critic torch.save(policy_critic.state_dict(), str(self.save_dir) + "/critic_agent" + str(agent_id) + ".pt") def restore(self): for agent_id in range(self.num_agents): if self.use_single_network: policy_model_state_dict = torch.load(str(self.model_dir) + '/model_agent' + str(agent_id) + '.pt') self.policy[agent_id].model.load_state_dict(policy_model_state_dict) else: policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt') self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict) policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt') self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict) def log_train(self, train_infos, total_num_steps): for agent_id in range(self.num_agents): for k, v in train_infos[agent_id].items(): agent_k = "agent%i/" % agent_id + k if self.use_wandb: wandb.log({agent_k: v}, step=total_num_steps) else: self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps) def log_env(self, env_infos, total_num_steps): for k, v in env_infos.items(): if len(v) > 0: if self.use_wandb: wandb.log({k: np.mean(v)}, step=total_num_steps) else: self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps) ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/runner/separated/base_runner_mappo_lagr.py ================================================ import copy import time import wandb import os import numpy as np from itertools import chain import torch from tensorboardX import SummaryWriter from mappo_lagrangian.utils.separated_buffer import SeparatedReplayBuffer from mappo_lagrangian.utils.util import update_linear_schedule def _t2n(x): return x.detach().cpu().numpy() class Runner(object): def __init__(self, config): self.all_args = config['all_args'] self.envs = config['envs'] self.eval_envs = config['eval_envs'] self.device = config['device'] self.num_agents = config['num_agents'] # parameters self.env_name = self.all_args.env_name self.algorithm_name = self.all_args.algorithm_name self.experiment_name = self.all_args.experiment_name self.use_centralized_V = self.all_args.use_centralized_V self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state self.num_env_steps = self.all_args.num_env_steps self.episode_length = self.all_args.episode_length self.n_rollout_threads = self.all_args.n_rollout_threads self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads self.use_linear_lr_decay = self.all_args.use_linear_lr_decay self.hidden_size = self.all_args.hidden_size self.use_wandb = self.all_args.use_wandb self.use_render = self.all_args.use_render self.recurrent_N = self.all_args.recurrent_N self.use_single_network = self.all_args.use_single_network # interval self.save_interval = self.all_args.save_interval self.use_eval = self.all_args.use_eval self.eval_interval = self.all_args.eval_interval self.log_interval = self.all_args.log_interval self.gamma = self.all_args.gamma self.use_popart = self.all_args.use_popart self.safety_bound = self.all_args.safety_bound # dir self.model_dir = self.all_args.model_dir if self.use_render: import imageio self.run_dir = config["run_dir"] self.gif_dir = str(self.run_dir / 'gifs') if not os.path.exists(self.gif_dir): os.makedirs(self.gif_dir) else: if self.use_wandb: self.save_dir = str(wandb.run.dir) else: self.run_dir = config["run_dir"] self.log_dir = str(self.run_dir / 'logs') if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.writter = SummaryWriter(self.log_dir) self.save_dir = str(self.run_dir / 'models') if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) from mappo_lagrangian.algorithms.r_mappo.r_mappo_lagr import R_MAPPO_Lagr as TrainAlgo from mappo_lagrangian.algorithms.r_mappo.algorithm.MACPPOPolicy import MACPPOPolicy as Policy print("share_observation_space: ", self.envs.share_observation_space) print("observation_space: ", self.envs.observation_space) print("action_space: ", self.envs.action_space) self.policy = [] for agent_id in range(self.num_agents): share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else \ self.envs.observation_space[agent_id] # policy network po = Policy(self.all_args, self.envs.observation_space[agent_id], share_observation_space, self.envs.action_space[agent_id], device=self.device) self.policy.append(po) if self.model_dir is not None: self.restore() self.trainer = [] self.buffer = [] # todo: revise this for trpo for agent_id in range(self.num_agents): # algorithm tr = TrainAlgo(self.all_args, self.policy[agent_id], device=self.device) # buffer share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else \ self.envs.observation_space[agent_id] bu = SeparatedReplayBuffer(self.all_args, self.envs.observation_space[agent_id], share_observation_space, self.envs.action_space[agent_id]) self.buffer.append(bu) self.trainer.append(tr) def run(self): raise NotImplementedError def warmup(self): raise NotImplementedError def collect(self, step): raise NotImplementedError def insert(self, data): raise NotImplementedError @torch.no_grad() def compute(self): for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1], self.buffer[agent_id].rnn_states_critic[-1], self.buffer[agent_id].masks[-1]) next_value = _t2n(next_value) self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer) next_costs = self.trainer[agent_id].policy.get_cost_values(self.buffer[agent_id].share_obs[-1], self.buffer[agent_id].rnn_states_cost[-1], self.buffer[agent_id].masks[-1]) next_costs = _t2n(next_costs) self.buffer[agent_id].compute_cost_returns(next_costs, self.trainer[agent_id].value_normalizer) def train(self): # have modified for SAD_PPO train_infos = [] cost_train_infos = [] # random update order action_dim = self.buffer[0].actions.shape[-1] factor = np.ones((self.episode_length, self.n_rollout_threads, action_dim), dtype=np.float32) for agent_id in torch.randperm(self.num_agents): self.trainer[agent_id].prep_training() self.buffer[agent_id].update_factor(factor) available_actions = None if self.buffer[agent_id].available_actions is None \ else self.buffer[agent_id].available_actions[:-1].reshape(-1, *self.buffer[ agent_id].available_actions.shape[ 2:]) old_actions_logprob, _ = self.trainer[agent_id].policy.actor.evaluate_actions( self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]), self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]), self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]), self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]), available_actions, self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:])) # safe_buffer, cost_adv = self.buffer_filter(agent_id) # train_info = self.trainer[agent_id].train(safe_buffer, cost_adv) train_info = self.trainer[agent_id].train(self.buffer[agent_id]) new_actions_logprob, _ = self.trainer[agent_id].policy.actor.evaluate_actions( self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]), self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]), self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]), self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]), available_actions, self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:])) factor = factor * _t2n(torch.exp(new_actions_logprob - old_actions_logprob).reshape(self.episode_length, self.n_rollout_threads, action_dim)) train_infos.append(train_info) self.buffer[agent_id].after_update() return train_infos, cost_train_infos # episode length of envs is exactly equal to buffer size, that is, num_thread = num_episode def buffer_filter(self, agent_id): episode_length = len(self.buffer[0].rewards) # J constraints for all agents, just a toy example J = np.zeros((self.n_rollout_threads, 1), dtype=np.float32) for t in reversed(range(episode_length)): J = self.buffer[agent_id].costs[t] + self.gamma * J factor = self.buffer[agent_id].factor if self.use_popart: cost_adv = self.buffer[agent_id].cost_returns[:-1] - \ self.trainer[agent_id].value_normalizer.denormalize(self.buffer[agent_id].cost_preds[:-1]) else: cost_adv = self.buffer[agent_id].cost_returns[:-1] - self.buffer[agent_id].cost_preds[:-1] expectation = np.mean(factor * cost_adv, axis=(0, 2)) constraints_value = J + np.expand_dims(expectation, -1) del_id = [] print("===================================================") print("safety_bound: ", self.safety_bound) for i in range(self.n_rollout_threads): if constraints_value[i][0] > self.safety_bound: del_id.append(i) buffer_filterd = self.remove_episodes(agent_id, del_id) return buffer_filterd, cost_adv def remove_episodes(self, agent_id, del_ids): buffer = copy.deepcopy(self.buffer[agent_id]) buffer.share_obs = (buffer.share_obs, del_ids, 1) buffer.obs = (buffer.obs, del_ids, 1) buffer.rnn_states = (buffer.rnn_states, del_ids, 1) buffer.rnn_states_critic = (buffer.rnn_states_critic, del_ids, 1) buffer.rnn_states_cost = (buffer.rnn_states_cost, del_ids, 1) buffer.value_preds = (buffer.value_preds, del_ids, 1) buffer.returns = (buffer.returns, del_ids, 1) if buffer.available_actions is not None: buffer.available_actions = (buffer.available_actions, del_ids, 1) buffer.actions = (buffer.actions, del_ids, 1) buffer.action_log_probs = (buffer.action_log_probs, del_ids, 1) buffer.rewards = (buffer.rewards, del_ids, 1) # todo: cost should be calculated entirely buffer.costs = (buffer.costs, del_ids, 1) buffer.cost_preds = (buffer.cost_preds, del_ids, 1) buffer.cost_returns = (buffer.cost_returns, del_ids, 1) buffer.masks = (buffer.masks, del_ids, 1) buffer.bad_masks = (buffer.bad_masks, del_ids, 1) buffer.active_masks = (buffer.active_masks, del_ids, 1) if buffer.factor is not None: buffer.factor = (buffer.factor, del_ids, 1) return buffer def save(self): for agent_id in range(self.num_agents): if self.use_single_network: policy_model = self.trainer[agent_id].policy.model torch.save(policy_model.state_dict(), str(self.save_dir) + "/model_agent" + str(agent_id) + ".pt") else: policy_actor = self.trainer[agent_id].policy.actor torch.save(policy_actor.state_dict(), str(self.save_dir) + "/actor_agent" + str(agent_id) + ".pt") policy_critic = self.trainer[agent_id].policy.critic torch.save(policy_critic.state_dict(), str(self.save_dir) + "/critic_agent" + str(agent_id) + ".pt") def restore(self): for agent_id in range(self.num_agents): if self.use_single_network: policy_model_state_dict = torch.load(str(self.model_dir) + '/model_agent' + str(agent_id) + '.pt') self.policy[agent_id].model.load_state_dict(policy_model_state_dict) else: policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt') self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict) policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt') self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict) def log_train(self, train_infos, total_num_steps): for agent_id in range(self.num_agents): for k, v in train_infos[agent_id].items(): agent_k = "agent%i/" % agent_id + k if self.use_wandb: wandb.log({agent_k: v}, step=total_num_steps) else: self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps) def log_env(self, env_infos, total_num_steps): for k, v in env_infos.items(): if len(v) > 0: if self.use_wandb: wandb.log({k: np.mean(v)}, step=total_num_steps) else: self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps) ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/runner/separated/mujoco_runner.py ================================================ import time import wandb import numpy as np from functools import reduce import torch from mappo_lagrangian.runner.separated.base_runner import Runner def _t2n(x): return x.detach().cpu().numpy() class MujocoRunner(Runner): """Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details.""" def __init__(self, config): super(MujocoRunner, self).__init__(config) def run(self): self.warmup() start = time.time() episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads train_episode_rewards = [0 for _ in range(self.n_rollout_threads)] for episode in range(episodes): if self.use_linear_lr_decay: self.trainer.policy.lr_decay(episode, episodes) done_episodes_rewards = [] for step in range(self.episode_length): # Sample actions values, actions, action_log_probs, rnn_states, rnn_states_critic = self.collect(step) # Obser reward and next obs obs, share_obs, rewards, dones, infos, _ = self.envs.step(actions) dones_env = np.all(dones, axis=1) reward_env = np.mean(rewards, axis=1).flatten() train_episode_rewards += reward_env for t in range(self.n_rollout_threads): if dones_env[t]: done_episodes_rewards.append(train_episode_rewards[t]) train_episode_rewards[t] = 0 data = obs, share_obs, rewards, dones, infos, \ values, actions, action_log_probs, \ rnn_states, rnn_states_critic # insert data into buffer self.insert(data) # compute return and update network self.compute() train_infos = self.train() # post process total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads # save model if (episode % self.save_interval == 0 or episode == episodes - 1): self.save() # log information if episode % self.log_interval == 0: end = time.time() print("\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n" .format(self.all_args.scenario, self.algorithm_name, self.experiment_name, episode, episodes, total_num_steps, self.num_env_steps, int(total_num_steps / (end - start)))) self.log_train(train_infos, total_num_steps) if len(done_episodes_rewards) > 0: aver_episode_rewards = np.mean(done_episodes_rewards) print("some episodes done, average rewards: ", aver_episode_rewards) self.writter.add_scalars("train_episode_rewards", {"aver_rewards": aver_episode_rewards}, total_num_steps) # eval if episode % self.eval_interval == 0 and self.use_eval: self.eval(total_num_steps) def warmup(self): # reset env obs, share_obs, _ = self.envs.reset() # replay buffer if not self.use_centralized_V: share_obs = obs for agent_id in range(self.num_agents): self.buffer[agent_id].share_obs[0] = share_obs[:, agent_id].copy() self.buffer[agent_id].obs[0] = obs[:, agent_id].copy() @torch.no_grad() def collect(self, step): value_collector = [] action_collector = [] action_log_prob_collector = [] rnn_state_collector = [] rnn_state_critic_collector = [] for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() value, action, action_log_prob, rnn_state, rnn_state_critic \ = self.trainer[agent_id].policy.get_actions(self.buffer[agent_id].share_obs[step], self.buffer[agent_id].obs[step], self.buffer[agent_id].rnn_states[step], self.buffer[agent_id].rnn_states_critic[step], self.buffer[agent_id].masks[step]) value_collector.append(_t2n(value)) action_collector.append(_t2n(action)) action_log_prob_collector.append(_t2n(action_log_prob)) rnn_state_collector.append(_t2n(rnn_state)) rnn_state_critic_collector.append(_t2n(rnn_state_critic)) # [self.envs, agents, dim] values = np.array(value_collector).transpose(1, 0, 2) actions = np.array(action_collector).transpose(1, 0, 2) action_log_probs = np.array(action_log_prob_collector).transpose(1, 0, 2) rnn_states = np.array(rnn_state_collector).transpose(1, 0, 2, 3) rnn_states_critic = np.array(rnn_state_critic_collector).transpose(1, 0, 2, 3) return values, actions, action_log_probs, rnn_states, rnn_states_critic def insert(self, data): obs, share_obs, rewards, dones, infos, \ values, actions, action_log_probs, rnn_states, rnn_states_critic = data dones_env = np.all(dones, axis=1) rnn_states[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) rnn_states_critic[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_critic.shape[2:]), dtype=np.float32) masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32) masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32) active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32) active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) if not self.use_centralized_V: share_obs = obs for agent_id in range(self.num_agents): self.buffer[agent_id].insert(share_obs[:, agent_id], obs[:, agent_id], rnn_states[:, agent_id], rnn_states_critic[:, agent_id], actions[:, agent_id], action_log_probs[:, agent_id], values[:, agent_id], rewards[:, agent_id], masks[:, agent_id], None, active_masks[:, agent_id], None) def log_train(self, train_infos, total_num_steps): print("average_step_rewards is {}.".format(np.mean(self.buffer[0].rewards))) for agent_id in range(self.num_agents): train_infos[agent_id]["average_step_rewards"] = np.mean(self.buffer[agent_id].rewards) for k, v in train_infos[agent_id].items(): agent_k = "agent%i/" % agent_id + k if self.use_wandb: wandb.log({agent_k: v}, step=total_num_steps) else: self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps) @torch.no_grad() def eval(self, total_num_steps): eval_episode = 0 eval_episode_rewards = [] one_episode_rewards = [] for eval_i in range(self.n_eval_rollout_threads): one_episode_rewards.append([]) eval_episode_rewards.append([]) eval_obs, eval_share_obs, _ = self.eval_envs.reset() eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32) while True: eval_actions_collector = [] eval_rnn_states_collector = [] for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() eval_actions, temp_rnn_state = \ self.trainer[agent_id].policy.act(eval_obs[:, agent_id], eval_rnn_states[:, agent_id], eval_masks[:, agent_id], deterministic=True) eval_rnn_states[:, agent_id] = _t2n(temp_rnn_state) eval_actions_collector.append(_t2n(eval_actions)) eval_actions = np.array(eval_actions_collector).transpose(1, 0, 2) # Obser reward and next obs eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, _ = self.eval_envs.step( eval_actions) for eval_i in range(self.n_eval_rollout_threads): one_episode_rewards[eval_i].append(eval_rewards[eval_i]) eval_dones_env = np.all(eval_dones, axis=1) eval_rnn_states[eval_dones_env == True] = np.zeros( ((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32) eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) for eval_i in range(self.n_eval_rollout_threads): if eval_dones_env[eval_i]: eval_episode += 1 eval_episode_rewards[eval_i].append(np.sum(one_episode_rewards[eval_i], axis=0)) one_episode_rewards[eval_i] = [] if eval_episode >= self.all_args.eval_episodes: eval_episode_rewards = np.concatenate(eval_episode_rewards) eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards, 'eval_max_episode_rewards': [np.max(eval_episode_rewards)]} self.log_env(eval_env_infos, total_num_steps) print("eval_average_episode_rewards is {}.".format(np.mean(eval_episode_rewards))) break ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/runner/separated/mujoco_runner_mappo_lagr.py ================================================ import time from itertools import chain import wandb import numpy as np from functools import reduce import torch from mappo_lagrangian.runner.separated.base_runner_mappo_lagr import Runner def _t2n(x): return x.detach().cpu().numpy() class MujocoRunner(Runner): """Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details.""" def __init__(self, config): super(MujocoRunner, self).__init__(config) def run(self): self.warmup() start = time.time() episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads train_episode_rewards = [0 for _ in range(self.n_rollout_threads)] train_episode_costs = [0 for _ in range(self.n_rollout_threads)] for episode in range(episodes): if self.use_linear_lr_decay: self.trainer.policy.lr_decay(episode, episodes) done_episodes_rewards = [] done_episodes_costs = [] for step in range(self.episode_length): # Sample actions values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, \ rnn_states_cost = self.collect(step) # Obser reward cost and next obs obs, share_obs, rewards, costs, dones, infos, _ = self.envs.step(actions) dones_env = np.all(dones, axis=1) reward_env = np.mean(rewards, axis=1).flatten() cost_env = np.mean(costs, axis=1).flatten() train_episode_rewards += reward_env train_episode_costs += cost_env for t in range(self.n_rollout_threads): if dones_env[t]: done_episodes_rewards.append(train_episode_rewards[t]) train_episode_rewards[t] = 0 done_episodes_costs.append(train_episode_costs[t]) train_episode_costs[t] = 0 data = obs, share_obs, rewards, costs, dones, infos, \ values, actions, action_log_probs, \ rnn_states, rnn_states_critic, cost_preds, rnn_states_cost # fixme: it's important!!! # insert data into buffer self.insert(data) # compute return and update network self.compute() train_infos = self.train() # post process total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads # save model if (episode % self.save_interval == 0 or episode == episodes - 1): self.save() # log information if episode % self.log_interval == 0: end = time.time() print("\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n" .format(self.all_args.scenario, self.algorithm_name, self.experiment_name, episode, episodes, total_num_steps, self.num_env_steps, int(total_num_steps / (end - start)))) self.log_train(train_infos, total_num_steps) if len(done_episodes_rewards) > 0: aver_episode_rewards = np.mean(done_episodes_rewards) aver_episode_costs = np.mean(done_episodes_costs) self.return_aver_cost(aver_episode_costs) print("some episodes done, average rewards: {}, average costs: {}".format(aver_episode_rewards, aver_episode_costs)) self.writter.add_scalars("train_episode_rewards", {"aver_rewards": aver_episode_rewards}, total_num_steps) self.writter.add_scalars("train_episode_costs", {"aver_costs": aver_episode_costs}, total_num_steps) # eval if episode % self.eval_interval == 0 and self.use_eval: self.eval(total_num_steps) def return_aver_cost(self, aver_episode_costs): for agent_id in range(self.num_agents): self.buffer[agent_id].return_aver_insert(aver_episode_costs) def warmup(self): # reset env obs, share_obs, _ = self.envs.reset() # replay buffer if not self.use_centralized_V: share_obs = obs for agent_id in range(self.num_agents): # print(share_obs[:, agent_id]) self.buffer[agent_id].share_obs[0] = share_obs[:, agent_id].copy() self.buffer[agent_id].obs[0] = obs[:, agent_id].copy() @torch.no_grad() def collect(self, step): # values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, \ # rnn_states_cost = self.collect(step) value_collector = [] action_collector = [] action_log_prob_collector = [] rnn_state_collector = [] rnn_state_critic_collector = [] cost_preds_collector = [] rnn_states_cost_collector = [] for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() value, action, action_log_prob, rnn_state, rnn_state_critic, cost_pred, rnn_state_cost \ = self.trainer[agent_id].policy.get_actions(self.buffer[agent_id].share_obs[step], self.buffer[agent_id].obs[step], self.buffer[agent_id].rnn_states[step], self.buffer[agent_id].rnn_states_critic[step], self.buffer[agent_id].masks[step], rnn_states_cost=self.buffer[agent_id].rnn_states_cost[step] ) value_collector.append(_t2n(value)) action_collector.append(_t2n(action)) action_log_prob_collector.append(_t2n(action_log_prob)) rnn_state_collector.append(_t2n(rnn_state)) rnn_state_critic_collector.append(_t2n(rnn_state_critic)) cost_preds_collector.append(_t2n(cost_pred)) rnn_states_cost_collector.append(_t2n(rnn_state_cost)) # [self.envs, agents, dim] values = np.array(value_collector).transpose(1, 0, 2) actions = np.array(action_collector).transpose(1, 0, 2) action_log_probs = np.array(action_log_prob_collector).transpose(1, 0, 2) rnn_states = np.array(rnn_state_collector).transpose(1, 0, 2, 3) rnn_states_critic = np.array(rnn_state_critic_collector).transpose(1, 0, 2, 3) cost_preds = np.array(cost_preds_collector).transpose(1, 0, 2) rnn_states_cost = np.array(rnn_states_cost_collector).transpose(1, 0, 2, 3) return values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, rnn_states_cost def insert(self, data): obs, share_obs, rewards, costs, dones, infos, \ values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, rnn_states_cost = data # fixme:!!! # print("insert--rewards", rewards) dones_env = np.all(dones, axis=1) rnn_states[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) rnn_states_critic[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_critic.shape[2:]), dtype=np.float32) rnn_states_cost[dones_env == True] = np.zeros( ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_cost.shape[2:]), dtype=np.float32) masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32) masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32) active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32) active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) if not self.use_centralized_V: share_obs = obs for agent_id in range(self.num_agents): self.buffer[agent_id].insert(share_obs[:, agent_id], obs[:, agent_id], rnn_states[:, agent_id], rnn_states_critic[:, agent_id], actions[:, agent_id], action_log_probs[:, agent_id], values[:, agent_id], rewards[:, agent_id], masks[:, agent_id], None, active_masks[:, agent_id], None, costs=costs[:, agent_id], cost_preds=cost_preds[:, agent_id], rnn_states_cost=rnn_states_cost[:, agent_id]) def log_train(self, train_infos, total_num_steps): print("average_step_rewards is {}.".format(np.mean(self.buffer[0].rewards))) train_infos[0][0]["average_step_rewards"] = 0 for agent_id in range(self.num_agents): train_infos[0][agent_id]["average_step_rewards"] = np.mean(self.buffer[agent_id].rewards) for k, v in train_infos[0][agent_id].items(): agent_k = "agent%i/" % agent_id + k if self.use_wandb: wandb.log({agent_k: v}, step=total_num_steps) else: self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps) @torch.no_grad() def eval(self, total_num_steps): eval_episode = 0 eval_episode_rewards = [] one_episode_rewards = [] eval_episode_costs = [] one_episode_costs = [] for eval_i in range(self.n_eval_rollout_threads): one_episode_rewards.append([]) eval_episode_rewards.append([]) one_episode_costs.append([]) eval_episode_costs.append([]) eval_obs, eval_share_obs, _ = self.eval_envs.reset() eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32) while True: eval_actions_collector = [] eval_rnn_states_collector = [] for agent_id in range(self.num_agents): self.trainer[agent_id].prep_rollout() eval_actions, temp_rnn_state = \ self.trainer[agent_id].policy.act(eval_obs[:, agent_id], eval_rnn_states[:, agent_id], eval_masks[:, agent_id], deterministic=True) eval_rnn_states[:, agent_id] = _t2n(temp_rnn_state) eval_actions_collector.append(_t2n(eval_actions)) eval_actions = np.array(eval_actions_collector).transpose(1, 0, 2) # Obser reward and next obs eval_obs, eval_share_obs, eval_rewards, eval_costs, eval_dones, eval_infos, _ = self.eval_envs.step( eval_actions) for eval_i in range(self.n_eval_rollout_threads): one_episode_rewards[eval_i].append(eval_rewards[eval_i]) one_episode_costs[eval_i].append(eval_costs[eval_i]) eval_dones_env = np.all(eval_dones, axis=1) eval_rnn_states[eval_dones_env == True] = np.zeros( ((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32) eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1), dtype=np.float32) for eval_i in range(self.n_eval_rollout_threads): if eval_dones_env[eval_i]: eval_episode += 1 eval_episode_rewards[eval_i].append(np.sum(one_episode_rewards[eval_i], axis=0)) one_episode_rewards[eval_i] = [] if eval_episode >= self.all_args.eval_episodes: eval_episode_rewards = np.concatenate(eval_episode_rewards) eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards, 'eval_max_episode_rewards': [np.max(eval_episode_rewards)]} self.log_env(eval_env_infos, total_num_steps) print("eval_average_episode_rewards is {}.".format(np.mean(eval_episode_rewards))) break ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/scripts/__init__.py ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/scripts/eval/eval_hanabi.py ================================================ #!/usr/bin/env python import sys import os import wandb import socket import setproctitle import numpy as np from pathlib import Path import torch from onpolicy.config import get_config from onpolicy.envs.hanabi.Hanabi_Env import HanabiEnv from onpolicy.envs.env_wrappers import ChooseSubprocVecEnv, ChooseDummyVecEnv def make_train_env(all_args): def get_env_fn(rank): def init_env(): if all_args.env_name == "Hanabi": assert all_args.num_agents > 1 and all_args.num_agents < 6, ( "num_agents can be only between 2-5.") env = HanabiEnv(all_args, (all_args.seed + rank * 1000)) else: print("Can not support the " + all_args.env_name + "environment.") raise NotImplementedError env.seed(all_args.seed + rank * 1000) return env return init_env if all_args.n_rollout_threads == 1: return ChooseDummyVecEnv([get_env_fn(0)]) else: return ChooseSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_rollout_threads)]) def make_eval_env(all_args): def get_env_fn(rank): def init_env(): if all_args.env_name == "Hanabi": assert all_args.num_agents > 1 and all_args.num_agents < 6, ( "num_agents can be only between 2-5.") env = HanabiEnv( all_args, (all_args.seed * 50000 + rank * 10000)) else: print("Can not support the " + all_args.env_name + "environment.") raise NotImplementedError env.seed(all_args.seed * 50000 + rank * 10000) return env return init_env if all_args.n_eval_rollout_threads == 1: return ChooseDummyVecEnv([get_env_fn(0)]) else: return ChooseSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)]) def parse_args(args, parser): parser.add_argument('--hanabi_name', type=str, default='Hanabi-Very-Small', help="Which env to run on") parser.add_argument('--num_agents', type=int, default=2, help="number of players") all_args = parser.parse_known_args(args)[0] return all_args def main(args): parser = get_config() all_args = parse_args(args, parser) if all_args.algorithm_name == "rmappo": assert (all_args.use_recurrent_policy or all_args.use_naive_recurrent_policy), ("check recurrent policy!") elif all_args.algorithm_name == "mappo": assert (all_args.use_recurrent_policy == False and all_args.use_naive_recurrent_policy == False), ("check recurrent policy!") else: raise NotImplementedError assert all_args.use_eval, ("u need to set use_eval be True") assert not (all_args.model_dir == None or all_args.model_dir == ""), ("set model_dir first") # cuda if all_args.cuda and torch.cuda.is_available(): print("choose to use gpu...") device = torch.device("cuda:0") torch.set_num_threads(all_args.n_training_threads) if all_args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True else: print("choose to use cpu...") device = torch.device("cpu") torch.set_num_threads(all_args.n_training_threads) # run dir run_dir = Path(os.path.split(os.path.dirname(os.path.abspath(__file__)))[0] + "/results") / all_args.env_name / all_args.hanabi_name / all_args.algorithm_name / all_args.experiment_name if not run_dir.exists(): os.makedirs(str(run_dir)) # wandb if all_args.use_wandb: run = wandb.init(config=all_args, project=all_args.env_name, entity=all_args.user_name, notes=socket.gethostname(), name=str(all_args.algorithm_name) + "_" + str(all_args.experiment_name) + "_seed" + str(all_args.seed), group=all_args.hanabi_name, dir=str(run_dir), job_type="training", reinit=True) else: if not run_dir.exists(): curr_run = 'run1' else: exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if str(folder.name).startswith('run')] if len(exst_run_nums) == 0: curr_run = 'run1' else: curr_run = 'run%i' % (max(exst_run_nums) + 1) run_dir = run_dir / curr_run if not run_dir.exists(): os.makedirs(str(run_dir)) setproctitle.setproctitle(str(all_args.algorithm_name) + "-" + str( all_args.env_name) + "-" + str(all_args.experiment_name) + "@" + str(all_args.user_name)) # seed torch.manual_seed(all_args.seed) torch.cuda.manual_seed_all(all_args.seed) np.random.seed(all_args.seed) # env init envs = make_train_env(all_args) eval_envs = make_eval_env(all_args) if all_args.use_eval else None num_agents = all_args.num_agents config = { "all_args": all_args, "envs": envs, "eval_envs": eval_envs, "num_agents": num_agents, "device": device, "run_dir": run_dir } # run experiments if all_args.share_policy: from onpolicy.runner.shared.hanabi_runner_forward import HanabiRunner as Runner else: from onpolicy.runner.separated.hanabi_runner_forward import HanabiRunner as Runner runner = Runner(config) runner.eval_100k() # post process envs.close() if all_args.use_eval and eval_envs is not envs: eval_envs.close() if all_args.use_wandb: run.finish() else: runner.writter.export_scalars_to_json(str(runner.log_dir + '/summary.json')) runner.writter.close() if __name__ == "__main__": main(sys.argv[1:]) ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/scripts/train/__init__.py ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/scripts/train/train_mujoco.py ================================================ #!/usr/bin/env python import sys import os curPath = os.path.abspath(__file__) if len(curPath.split('/'))==1: rootPath = '\\'.join(curPath.split('\\')[:-3]) else: rootPath = '/'.join(curPath.split('/')[:-3]) sys.path.append(os.path.split(rootPath)[0]) import wandb import socket import setproctitle import numpy as np from pathlib import Path import torch from mappo_lagrangian.config import get_config from mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import MujocoMulti from mappo_lagrangian.envs.env_wrappers import ShareSubprocVecEnv, ShareDummyVecEnv def make_train_env(all_args): def get_env_fn(rank): def init_env(): if all_args.env_name == "mujoco": env_args = {"scenario": all_args.scenario, "agent_conf": all_args.agent_conf, "agent_obsk": all_args.agent_obsk, "episode_limit": 1000} env = MujocoMulti(env_args=env_args) else: print("Can not support the " + all_args.env_name + "environment.") raise NotImplementedError env.seed(all_args.seed + rank * 1000) return env return init_env if all_args.n_rollout_threads == 1: return ShareDummyVecEnv([get_env_fn(0)]) else: return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_rollout_threads)]) def make_eval_env(all_args): def get_env_fn(rank): def init_env(): if all_args.env_name == "mujoco": env_args = {"scenario": all_args.scenario, "agent_conf": all_args.agent_conf, "agent_obsk": all_args.agent_obsk, "episode_limit": 1000} env = MujocoMulti(env_args=env_args) else: print("Can not support the " + all_args.env_name + "environment.") raise NotImplementedError env.seed(all_args.seed * 50000 + rank * 10000) return env return init_env if all_args.n_eval_rollout_threads == 1: return ShareDummyVecEnv([get_env_fn(0)]) else: return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)]) def parse_args(args, parser): parser.add_argument('--scenario', type=str, default='Hopper-v2', help="Which mujoco task to run on") parser.add_argument('--agent_conf', type=str, default='3x1') parser.add_argument('--agent_obsk', type=int, default=0) parser.add_argument("--add_move_state", action='store_true', default=False) parser.add_argument("--add_local_obs", action='store_true', default=False) parser.add_argument("--add_distance_state", action='store_true', default=False) parser.add_argument("--add_enemy_action_state", action='store_true', default=False) parser.add_argument("--add_agent_id", action='store_true', default=False) parser.add_argument("--add_visible_state", action='store_true', default=False) parser.add_argument("--add_xy_state", action='store_true', default=False) # agent-specific state should be designed carefully parser.add_argument("--use_state_agent", action='store_true', default=False) parser.add_argument("--use_mustalive", action='store_false', default=True) parser.add_argument("--add_center_xy", action='store_true', default=False) parser.add_argument("--use_single_network", action='store_true', default=False) all_args = parser.parse_known_args(args)[0] return all_args def main(args): parser = get_config() all_args = parse_args(args, parser) print("mumu config: ", all_args) if all_args.algorithm_name == "mappo_lagr": all_args.share_policy=False else: raise NotImplementedError # cuda # all_args.cuda = True if all_args.cuda and torch.cuda.is_available(): print("choose to use gpu...") device = torch.device("cuda:0") torch.set_num_threads(all_args.n_training_threads) if all_args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True else: print("cuda flag: ", all_args.cuda, "Torch: ", torch.cuda.is_available()) print("choose to use cpu...") device = torch.device("cpu") torch.set_num_threads(all_args.n_training_threads) run_dir = Path(os.path.split(os.path.dirname(os.path.abspath(__file__)))[ 0] + "/results") / all_args.env_name / all_args.scenario / all_args.algorithm_name / all_args.experiment_name if not run_dir.exists(): os.makedirs(str(run_dir)) if all_args.use_wandb: run = wandb.init(config=all_args, project=all_args.env_name, entity=all_args.user_name, notes=socket.gethostname(), name=str(all_args.algorithm_name) + "_" + str(all_args.experiment_name) + "_seed" + str(all_args.seed), group=all_args.map_name, dir=str(run_dir), job_type="training", reinit=True) else: if not run_dir.exists(): curr_run = 'run1' else: exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if str(folder.name).startswith('run')] if len(exst_run_nums) == 0: curr_run = 'run1' else: curr_run = 'run%i' % (max(exst_run_nums) + 1) run_dir = run_dir / curr_run if not run_dir.exists(): os.makedirs(str(run_dir)) setproctitle.setproctitle( str(all_args.algorithm_name) + "-" + str(all_args.env_name) + "-" + str(all_args.experiment_name) + "@" + str( all_args.user_name)) # seed torch.manual_seed(all_args.seed) torch.cuda.manual_seed_all(all_args.seed) np.random.seed(all_args.seed) # env envs = make_train_env(all_args) eval_envs = make_eval_env(all_args) if all_args.use_eval else None num_agents = envs.n_agents config = { "all_args": all_args, "envs": envs, "eval_envs": eval_envs, "num_agents": num_agents, "device": device, "run_dir": run_dir } # run experiments if all_args.share_policy: from mappo_lagrangian.runner.shared.mujoco_runner import MujocoRunner as Runner else: #in origin code not implement this method if all_args.algorithm_name == "mappo_lagr": from mappo_lagrangian.runner.separated.mujoco_runner_mappo_lagr import MujocoRunner as Runner else: from mappo_lagrangian.runner.separated.mujoco_runner import MujocoRunner as Runner runner = Runner(config) runner.run() # post process envs.close() if all_args.use_eval and eval_envs is not envs: eval_envs.close() if all_args.use_wandb: run.finish() else: runner.writter.export_scalars_to_json(str(runner.log_dir + '/summary.json')) runner.writter.close() if __name__ == "__main__": main(sys.argv[1:]) ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/scripts/train_mujoco.sh ================================================ #!/bin/sh env="mujoco" scenario="Ant-v2" agent_conf="2x4" agent_obsk=1 algo="mappo_lagr" exp="rnn" seed_max=1 seed_=50 echo "env is ${env}, scenario is ${scenario}, algo is ${algo}, exp is ${exp}, max seed is ${seed_max}" for seed in `seq ${seed_max}`; do echo "seed is ${seed}:" CUDA_VISIBLE_DEVICES=0 python train/train_mujoco.py --env_name ${env} --algorithm_name ${algo} --experiment_name ${exp} --scenario ${scenario} --agent_conf ${agent_conf} --agent_obsk ${agent_obsk} --lr 9e-5 --critic_lr 5e-3 --std_x_coef 1 --std_y_coef 5e-1 --seed ${seed_} --n_training_threads 4 --n_rollout_threads 16 --num_mini_batch 40 --episode_length 1000 --num_env_steps 10000000 --ppo_epoch 5 --use_value_active_masks --add_center_xy --use_state_agent --safety_bound 0.2 --lamda_lagr 0.78 --lagrangian_coef_rate 1e-7 done ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/utils/__init__.py ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/utils/multi_discrete.py ================================================ import gym import numpy as np # An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates) # (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py) class MultiDiscrete(gym.Space): """ - The multi-discrete action space consists of a series of discrete action spaces with different parameters - It can be adapted to both a Discrete action space or a continuous (Box) action space - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space - It is parametrized by passing an array of arrays containing [min, max] for each discrete action space where the discrete action space can take any integers from `min` to `max` (both inclusive) Note: A value of 0 always need to represent the NOOP action. e.g. Nintendo Game Controller - Can be conceptualized as 3 discrete action spaces: 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1 - Can be initialized as MultiDiscrete([ [0,4], [0,1], [0,1] ]) """ def __init__(self, array_of_param_array): self.low = np.array([x[0] for x in array_of_param_array]) self.high = np.array([x[1] for x in array_of_param_array]) self.num_discrete_space = self.low.shape[0] self.n = np.sum(self.high) + 2 def sample(self): """ Returns a array with one sample from each discrete action space """ # For each row: round(random .* (max - min) + min, 0) random_array = np.random.rand(self.num_discrete_space) return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)] def contains(self, x): return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all() @property def shape(self): return self.num_discrete_space def __repr__(self): return "MultiDiscrete" + str(self.num_discrete_space) def __eq__(self, other): return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high) ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/utils/popart.py ================================================ import numpy as np import torch import torch.nn as nn class PopArt(nn.Module): """ Normalize a vector of observations - across the first norm_axes dimensions""" def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device("cpu")): super(PopArt, self).__init__() self.input_shape = input_shape self.norm_axes = norm_axes self.epsilon = epsilon self.beta = beta self.per_element_update = per_element_update self.tpdv = dict(dtype=torch.float32, device=device) self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv) self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv) self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv) def reset_parameters(self): self.running_mean.zero_() self.running_mean_sq.zero_() self.debiasing_term.zero_() def running_mean_var(self): debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon) debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon) debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2) return debiased_mean, debiased_var def forward(self, input_vector, train=True): # Make sure input is float32 if type(input_vector) == np.ndarray: input_vector = torch.from_numpy(input_vector) input_vector = input_vector.to(**self.tpdv) if train: # Detach input before adding it to running means to avoid backpropping through it on # subsequent batches. detached_input = input_vector.detach() batch_mean = detached_input.mean(dim=tuple(range(self.norm_axes))) batch_sq_mean = (detached_input ** 2).mean(dim=tuple(range(self.norm_axes))) if self.per_element_update: batch_size = np.prod(detached_input.size()[:self.norm_axes]) weight = self.beta ** batch_size else: weight = self.beta self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight)) self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight)) self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight)) mean, var = self.running_mean_var() out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes] return out def denormalize(self, input_vector): """ Transform normalized data back into original distribution """ if type(input_vector) == np.ndarray: input_vector = torch.from_numpy(input_vector) input_vector = input_vector.to(**self.tpdv) mean, var = self.running_mean_var() out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes] out = out.cpu().numpy() return out ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/utils/separated_buffer.py ================================================ import torch import numpy as np from collections import defaultdict from mappo_lagrangian.utils.util import check, get_shape_from_obs_space, get_shape_from_act_space def _flatten(T, N, x): return x.reshape(T * N, *x.shape[2:]) def _cast(x): return x.transpose(1,0,2).reshape(-1, *x.shape[2:]) class SeparatedReplayBuffer(object): def __init__(self, args, obs_space, share_obs_space, act_space): self.episode_length = args.episode_length self.n_rollout_threads = args.n_rollout_threads self.rnn_hidden_size = args.hidden_size self.recurrent_N = args.recurrent_N self.gamma = args.gamma self.gae_lambda = args.gae_lambda self._use_gae = args.use_gae self._use_popart = args.use_popart self._use_valuenorm = args.use_valuenorm self._use_proper_time_limits = args.use_proper_time_limits self.algo = args.algorithm_name obs_shape = get_shape_from_obs_space(obs_space) share_obs_shape = get_shape_from_obs_space(share_obs_space) if type(obs_shape[-1]) == list: obs_shape = obs_shape[:1] if type(share_obs_shape[-1]) == list: share_obs_shape = share_obs_shape[:1] self.aver_episode_costs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape), dtype=np.float32) self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *share_obs_shape), dtype=np.float32) self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape), dtype=np.float32) self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, self.recurrent_N, self.rnn_hidden_size), dtype=np.float32) self.rnn_states_critic = np.zeros_like(self.rnn_states) self.rnn_states_cost = np.zeros_like(self.rnn_states) self.value_preds = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32) self.returns = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32) if act_space.__class__.__name__ == 'Discrete': self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, act_space.n), dtype=np.float32) else: self.available_actions = None act_shape = get_shape_from_act_space(act_space) self.actions = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32) self.action_log_probs = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32) self.rewards = np.zeros((self.episode_length, self.n_rollout_threads, 1), dtype=np.float32) self.costs = np.zeros_like(self.rewards) self.cost_preds = np.zeros_like(self.value_preds) self.cost_returns = np.zeros_like(self.returns) self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32) self.bad_masks = np.ones_like(self.masks) self.active_masks = np.ones_like(self.masks) self.factor = None self.step = 0 def update_factor(self, factor): self.factor = factor.copy() def return_aver_insert(self, aver_episode_costs): self.aver_episode_costs = aver_episode_costs.copy() def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs, value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None, costs=None, cost_preds=None, rnn_states_cost=None, aver_episode_costs = 0): self.share_obs[self.step + 1] = share_obs.copy() self.obs[self.step + 1] = obs.copy() self.rnn_states[self.step + 1] = rnn_states.copy() self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy() self.actions[self.step] = actions.copy() self.action_log_probs[self.step] = action_log_probs.copy() self.value_preds[self.step] = value_preds.copy() self.rewards[self.step] = rewards.copy() self.masks[self.step + 1] = masks.copy() if bad_masks is not None: self.bad_masks[self.step + 1] = bad_masks.copy() if active_masks is not None: self.active_masks[self.step + 1] = active_masks.copy() if available_actions is not None: self.available_actions[self.step + 1] = available_actions.copy() if costs is not None: self.costs[self.step] = costs.copy() if cost_preds is not None: self.cost_preds[self.step] = cost_preds.copy() if rnn_states_cost is not None: self.rnn_states_cost[self.step + 1] = rnn_states_cost.copy() self.step = (self.step + 1) % self.episode_length def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs, value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None): self.share_obs[self.step] = share_obs.copy() self.obs[self.step] = obs.copy() self.rnn_states[self.step + 1] = rnn_states.copy() self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy() self.actions[self.step] = actions.copy() self.action_log_probs[self.step] = action_log_probs.copy() self.value_preds[self.step] = value_preds.copy() self.rewards[self.step] = rewards.copy() self.masks[self.step + 1] = masks.copy() if bad_masks is not None: self.bad_masks[self.step + 1] = bad_masks.copy() if active_masks is not None: self.active_masks[self.step] = active_masks.copy() if available_actions is not None: self.available_actions[self.step] = available_actions.copy() self.step = (self.step + 1) % self.episode_length def after_update(self): self.share_obs[0] = self.share_obs[-1].copy() self.obs[0] = self.obs[-1].copy() self.rnn_states[0] = self.rnn_states[-1].copy() self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy() self.rnn_states_cost[0] = self.rnn_states_cost[-1].copy() self.masks[0] = self.masks[-1].copy() self.bad_masks[0] = self.bad_masks[-1].copy() self.active_masks[0] = self.active_masks[-1].copy() if self.available_actions is not None: self.available_actions[0] = self.available_actions[-1].copy() def chooseafter_update(self): self.rnn_states[0] = self.rnn_states[-1].copy() self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy() self.masks[0] = self.masks[-1].copy() self.bad_masks[0] = self.bad_masks[-1].copy() def compute_returns(self, next_value, value_normalizer=None): """ use proper time limits, the difference of use or not is whether use bad_mask """ if self._use_proper_time_limits: if self._use_gae: self.value_preds[-1] = next_value gae = 0 for step in reversed(range(self.rewards.shape[0])): if self._use_popart or self._use_valuenorm: delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[ step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae gae = gae * self.bad_masks[step + 1] self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step]) else: delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae gae = gae * self.bad_masks[step + 1] self.returns[step] = gae + self.value_preds[step] else: self.returns[-1] = next_value for step in reversed(range(self.rewards.shape[0])): if self._use_popart: self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \ + (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step]) else: self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \ + (1 - self.bad_masks[step + 1]) * self.value_preds[step] else: if self._use_gae: self.value_preds[-1] = next_value gae = 0 for step in reversed(range(self.rewards.shape[0])): if self._use_popart or self._use_valuenorm: delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step]) else: delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.returns[step] = gae + self.value_preds[step] else: self.returns[-1] = next_value for step in reversed(range(self.rewards.shape[0])): self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step] def compute_cost_returns(self, next_cost, value_normalizer=None): if self._use_proper_time_limits: if self._use_gae: self.cost_preds[-1] = next_cost gae = 0 for step in reversed(range(self.costs.shape[0])): if self._use_popart or self._use_valuenorm: delta = self.costs[step] + self.gamma * value_normalizer.denormalize(self.cost_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.cost_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae gae = gae * self.bad_masks[step + 1] self.cost_returns[step] = gae + value_normalizer.denormalize(self.cost_preds[step]) else: delta = self.costs[step] + self.gamma * self.cost_preds[step + 1] * self.masks[step + 1] - self.cost_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae gae = gae * self.bad_masks[step + 1] self.cost_returns[step] = gae + self.cost_preds[step] else: self.cost_returns[-1] = next_cost for step in reversed(range(self.costs.shape[0])): if self._use_popart: self.cost_returns[step] = (self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]) * self.bad_masks[step + 1] \ + (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.cost_preds[step]) else: self.cost_returns[step] = (self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]) * self.bad_masks[step + 1] \ + (1 - self.bad_masks[step + 1]) * self.cost_preds[step] else: if self._use_gae: self.cost_preds[-1] = next_cost gae = 0 for step in reversed(range(self.costs.shape[0])): if self._use_popart or self._use_valuenorm: delta = self.costs[step] + self.gamma * value_normalizer.denormalize(self.cost_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.cost_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.cost_returns[step] = gae + value_normalizer.denormalize(self.cost_preds[step]) else: delta = self.costs[step] + self.gamma * self.cost_preds[step + 1] * self.masks[step + 1] - self.cost_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.cost_returns[step] = gae + self.cost_preds[step] else: self.cost_returns[-1] = next_cost for step in reversed(range(self.costs.shape[0])): self.cost_returns[step] = self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step] def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None, cost_adv=None): episode_length, n_rollout_threads = self.rewards.shape[0:2] batch_size = n_rollout_threads * episode_length if mini_batch_size is None: assert batch_size >= num_mini_batch, ( "PPO requires the number of processes ({}) " "* number of steps ({}) = {} " "to be greater than or equal to the number of PPO mini batches ({})." "".format(n_rollout_threads, episode_length, n_rollout_threads * episode_length, num_mini_batch)) mini_batch_size = batch_size // num_mini_batch rand = torch.randperm(batch_size).numpy() sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)] share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[2:]) obs = self.obs[:-1].reshape(-1, *self.obs.shape[2:]) rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[2:]) rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[2:]) rnn_states_cost = self.rnn_states_cost[:-1].reshape(-1, *self.rnn_states_cost.shape[2:]) actions = self.actions.reshape(-1, self.actions.shape[-1]) if self.available_actions is not None: available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1]) value_preds = self.value_preds[:-1].reshape(-1, 1) returns = self.returns[:-1].reshape(-1, 1) cost_preds = self.cost_preds[:-1].reshape(-1, 1) cost_returns = self.cost_returns[:-1].reshape(-1, 1) masks = self.masks[:-1].reshape(-1, 1) active_masks = self.active_masks[:-1].reshape(-1, 1) action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1]) aver_episode_costs = self.aver_episode_costs if self.factor is not None: # factor = self.factor.reshape(-1,1) factor = self.factor.reshape(-1, self.factor.shape[-1]) advantages = advantages.reshape(-1, 1) if cost_adv is not None: cost_adv = cost_adv.reshape(-1, 1) for indices in sampler: # obs size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[index,Dim] share_obs_batch = share_obs[indices] obs_batch = obs[indices] rnn_states_batch = rnn_states[indices] rnn_states_critic_batch = rnn_states_critic[indices] rnn_states_cost_batch = rnn_states_cost[indices] actions_batch = actions[indices] if self.available_actions is not None: available_actions_batch = available_actions[indices] else: available_actions_batch = None value_preds_batch = value_preds[indices] return_batch = returns[indices] cost_preds_batch = cost_preds[indices] cost_return_batch = cost_returns[indices] masks_batch = masks[indices] active_masks_batch = active_masks[indices] old_action_log_probs_batch = action_log_probs[indices] if advantages is None: adv_targ = None else: adv_targ = advantages[indices] if cost_adv is None: cost_adv_targ = None else: cost_adv_targ = cost_adv[indices] if self.factor is None: yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch else: if self.algo == "mappo_lagr": factor_batch = factor[indices] yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ, aver_episode_costs else: factor_batch = factor[indices] yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch def naive_recurrent_generator(self, advantages, num_mini_batch, cost_adv=None): n_rollout_threads = self.rewards.shape[1] assert n_rollout_threads >= num_mini_batch, ( "PPO requires the number of processes ({}) " "to be greater than or equal to the number of " "PPO mini batches ({}).".format(n_rollout_threads, num_mini_batch)) num_envs_per_batch = n_rollout_threads // num_mini_batch perm = torch.randperm(n_rollout_threads).numpy() for start_ind in range(0, n_rollout_threads, num_envs_per_batch): share_obs_batch = [] obs_batch = [] rnn_states_batch = [] rnn_states_critic_batch = [] rnn_states_cost_batch = [] actions_batch = [] available_actions_batch = [] value_preds_batch = [] cost_preds_batch = [] return_batch = [] cost_return_batch = [] masks_batch = [] active_masks_batch = [] old_action_log_probs_batch = [] adv_targ = [] cost_adv_targ = [] factor_batch = [] for offset in range(num_envs_per_batch): ind = perm[start_ind + offset] share_obs_batch.append(self.share_obs[:-1, ind]) obs_batch.append(self.obs[:-1, ind]) rnn_states_batch.append(self.rnn_states[0:1, ind]) rnn_states_critic_batch.append(self.rnn_states_critic[0:1, ind]) rnn_states_cost_batch.append(self.rnn_states_cost[0:1, ind]) actions_batch.append(self.actions[:, ind]) if self.available_actions is not None: available_actions_batch.append(self.available_actions[:-1, ind]) value_preds_batch.append(self.value_preds[:-1, ind]) cost_preds_batch.append(self.cost_preds[:-1, ind]) return_batch.append(self.returns[:-1, ind]) cost_return_batch.append(self.cost_returns[:-1, ind]) masks_batch.append(self.masks[:-1, ind]) active_masks_batch.append(self.active_masks[:-1, ind]) old_action_log_probs_batch.append(self.action_log_probs[:, ind]) adv_targ.append(advantages[:, ind]) if cost_adv is not None: cost_adv_targ.append(cost_adv[:, ind]) if self.factor is not None: factor_batch.append(self.factor[:, ind]) # [N[T, dim]] T, N = self.episode_length, num_envs_per_batch # These are all from_numpys of size (T, N, -1) share_obs_batch = np.stack(share_obs_batch, 1) obs_batch = np.stack(obs_batch, 1) actions_batch = np.stack(actions_batch, 1) if self.available_actions is not None: available_actions_batch = np.stack(available_actions_batch, 1) if self.factor is not None: factor_batch=np.stack(factor_batch,1) value_preds_batch = np.stack(value_preds_batch, 1) cost_preds_batch = np.stack(cost_preds_batch, 1) return_batch = np.stack(return_batch, 1) cost_return_batch = np.stack(cost_return_batch, 1) masks_batch = np.stack(masks_batch, 1) active_masks_batch = np.stack(active_masks_batch, 1) old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1) adv_targ = np.stack(adv_targ, 1) if cost_adv is not None: cost_adv_targ = np.stack(cost_adv_targ, 1) # States is just a (N, -1) from_numpy [N[1,dim]] rnn_states_batch = np.stack(rnn_states_batch, 1).reshape(N, *self.rnn_states.shape[2:]) rnn_states_critic_batch = np.stack(rnn_states_critic_batch, 1).reshape(N, *self.rnn_states_critic.shape[2:]) rnn_states_cost_batch = np.stack(rnn_states_cost_batch, 1).reshape(N, *self.rnn_states_cost.shape[2:]) # Flatten the (T, N, ...) from_numpys to (T * N, ...) share_obs_batch = _flatten(T, N, share_obs_batch) obs_batch = _flatten(T, N, obs_batch) actions_batch = _flatten(T, N, actions_batch) if self.available_actions is not None: available_actions_batch = _flatten(T, N, available_actions_batch) else: available_actions_batch = None if self.factor is not None: factor_batch=_flatten(T,N,factor_batch) value_preds_batch = _flatten(T, N, value_preds_batch) cost_preds_batch = _flatten(T, N, cost_preds_batch) return_batch = _flatten(T, N, return_batch) cost_return_batch = _flatten(T, N, cost_return_batch) masks_batch = _flatten(T, N, masks_batch) active_masks_batch = _flatten(T, N, active_masks_batch) old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch) adv_targ = _flatten(T, N, adv_targ) if cost_adv is not None: cost_adv_targ = _flatten(T, N, cost_adv_targ) if self.factor is not None: if self.algo == "mappo_lagr": yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ # 17 value else: yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch # value else: yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/utils/shared_buffer.py ================================================ import torch import numpy as np from mappo_lagrangian.utils.util import get_shape_from_obs_space, get_shape_from_act_space def _flatten(T, N, x): return x.reshape(T * N, *x.shape[2:]) def _cast(x): return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:]) class SharedReplayBuffer(object): """ Buffer to store training data. :param args: (argparse.Namespace) arguments containing relevant model, policy, and env information. :param num_agents: (int) number of agents in the env. :param obs_space: (gym.Space) observation space of agents. :param cent_obs_space: (gym.Space) centralized observation space of agents. :param act_space: (gym.Space) action space for agents. """ def __init__(self, args, num_agents, obs_space, cent_obs_space, act_space): self.episode_length = args.episode_length self.n_rollout_threads = args.n_rollout_threads self.hidden_size = args.hidden_size self.recurrent_N = args.recurrent_N self.gamma = args.gamma self.gae_lambda = args.gae_lambda self._use_gae = args.use_gae self._use_popart = args.use_popart self._use_proper_time_limits = args.use_proper_time_limits obs_shape = get_shape_from_obs_space(obs_space) share_obs_shape = get_shape_from_obs_space(cent_obs_space) if not args.use_centralized_V: share_obs_shape = obs_shape if type(obs_shape[-1]) == list: obs_shape = obs_shape[:1] if type(share_obs_shape[-1]) == list: share_obs_shape = share_obs_shape[:1] self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape), dtype=np.float32) self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32) self.rnn_states = np.zeros( (self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32) self.rnn_states_critic = np.zeros_like(self.rnn_states) self.value_preds = np.zeros( (self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32) self.returns = np.zeros_like(self.value_preds) if act_space.__class__.__name__ == 'Discrete': self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n), dtype=np.float32) else: self.available_actions = None act_shape = get_shape_from_act_space(act_space) self.actions = np.zeros( (self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32) self.action_log_probs = np.zeros( (self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32) self.rewards = np.zeros( (self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32) self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32) self.bad_masks = np.ones_like(self.masks) self.active_masks = np.ones_like(self.masks) self.step = 0 def insert(self, share_obs, obs, rnn_states_actor, rnn_states_critic, actions, action_log_probs, value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None): """ Insert data into the buffer. :param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information. :param obs: (np.ndarray) local agent observations. :param rnn_states_actor: (np.ndarray) RNN states for actor network. :param rnn_states_critic: (np.ndarray) RNN states for critic network. :param actions:(np.ndarray) actions taken by agents. :param action_log_probs:(np.ndarray) log probs of actions taken by agents :param value_preds: (np.ndarray) value function prediction at each step. :param rewards: (np.ndarray) reward collected at each step. :param masks: (np.ndarray) denotes whether the environment has terminated or not. :param bad_masks: (np.ndarray) action space for agents. :param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env. :param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available. """ self.share_obs[self.step + 1] = share_obs.copy() self.obs[self.step + 1] = obs.copy() self.rnn_states[self.step + 1] = rnn_states_actor.copy() self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy() self.actions[self.step] = actions.copy() self.action_log_probs[self.step] = action_log_probs.copy() self.value_preds[self.step] = value_preds.copy() self.rewards[self.step] = rewards.copy() self.masks[self.step + 1] = masks.copy() if bad_masks is not None: self.bad_masks[self.step + 1] = bad_masks.copy() if active_masks is not None: self.active_masks[self.step + 1] = active_masks.copy() if available_actions is not None: self.available_actions[self.step + 1] = available_actions.copy() self.step = (self.step + 1) % self.episode_length def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs, value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None): """ Insert data into the buffer. This insert function is used specifically for Hanabi, which is turn based. :param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information. :param obs: (np.ndarray) local agent observations. :param rnn_states_actor: (np.ndarray) RNN states for actor network. :param rnn_states_critic: (np.ndarray) RNN states for critic network. :param actions:(np.ndarray) actions taken by agents. :param action_log_probs:(np.ndarray) log probs of actions taken by agents :param value_preds: (np.ndarray) value function prediction at each step. :param rewards: (np.ndarray) reward collected at each step. :param masks: (np.ndarray) denotes whether the environment has terminated or not. :param bad_masks: (np.ndarray) denotes indicate whether whether true terminal state or due to episode limit :param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env. :param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available. """ self.share_obs[self.step] = share_obs.copy() self.obs[self.step] = obs.copy() self.rnn_states[self.step + 1] = rnn_states.copy() self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy() self.actions[self.step] = actions.copy() self.action_log_probs[self.step] = action_log_probs.copy() self.value_preds[self.step] = value_preds.copy() self.rewards[self.step] = rewards.copy() self.masks[self.step + 1] = masks.copy() if bad_masks is not None: self.bad_masks[self.step + 1] = bad_masks.copy() if active_masks is not None: self.active_masks[self.step] = active_masks.copy() if available_actions is not None: self.available_actions[self.step] = available_actions.copy() self.step = (self.step + 1) % self.episode_length def after_update(self): """Copy last timestep data to first index. Called after update to model.""" self.share_obs[0] = self.share_obs[-1].copy() self.obs[0] = self.obs[-1].copy() self.rnn_states[0] = self.rnn_states[-1].copy() self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy() self.masks[0] = self.masks[-1].copy() self.bad_masks[0] = self.bad_masks[-1].copy() self.active_masks[0] = self.active_masks[-1].copy() if self.available_actions is not None: self.available_actions[0] = self.available_actions[-1].copy() def chooseafter_update(self): """Copy last timestep data to first index. This method is used for Hanabi.""" self.rnn_states[0] = self.rnn_states[-1].copy() self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy() self.masks[0] = self.masks[-1].copy() self.bad_masks[0] = self.bad_masks[-1].copy() def compute_returns(self, next_value, value_normalizer=None): """ Compute returns either as discounted sum of rewards, or using GAE. :param next_value: (np.ndarray) value predictions for the step after the last episode step. :param value_normalizer: (PopArt) If not None, PopArt value normalizer instance. """ if self._use_gae: self.value_preds[-1] = next_value gae = 0 for step in reversed(range(self.rewards.shape[0])): if self._use_popart: delta = self.rewards[step] + self.gamma * value_normalizer.denormalize( self.value_preds[step + 1]) * self.masks[step + 1] \ - value_normalizer.denormalize(self.value_preds[step]) gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step]) else: delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \ self.value_preds[step] gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae self.returns[step] = gae + self.value_preds[step] else: self.returns[-1] = next_value for step in reversed(range(self.rewards.shape[0])): self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step] def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None): """ Yield training data for MLP policies. :param advantages: (np.ndarray) advantage estimates. :param num_mini_batch: (int) number of minibatches to split the batch into. :param mini_batch_size: (int) number of samples in each minibatch. """ episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3] batch_size = n_rollout_threads * episode_length * num_agents if mini_batch_size is None: assert batch_size >= num_mini_batch, ( "PPO requires the number of processes ({}) " "* number of steps ({}) * number of agents ({}) = {} " "to be greater than or equal to the number of PPO mini batches ({})." "".format(n_rollout_threads, episode_length, num_agents, n_rollout_threads * episode_length * num_agents, num_mini_batch)) mini_batch_size = batch_size // num_mini_batch rand = torch.randperm(batch_size).numpy() sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)] share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:]) obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:]) rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:]) rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:]) actions = self.actions.reshape(-1, self.actions.shape[-1]) if self.available_actions is not None: available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1]) value_preds = self.value_preds[:-1].reshape(-1, 1) returns = self.returns[:-1].reshape(-1, 1) masks = self.masks[:-1].reshape(-1, 1) active_masks = self.active_masks[:-1].reshape(-1, 1) action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1]) advantages = advantages.reshape(-1, 1) for indices in sampler: # obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim] share_obs_batch = share_obs[indices] obs_batch = obs[indices] rnn_states_batch = rnn_states[indices] rnn_states_critic_batch = rnn_states_critic[indices] actions_batch = actions[indices] if self.available_actions is not None: available_actions_batch = available_actions[indices] else: available_actions_batch = None value_preds_batch = value_preds[indices] return_batch = returns[indices] masks_batch = masks[indices] active_masks_batch = active_masks[indices] old_action_log_probs_batch = action_log_probs[indices] if advantages is None: adv_targ = None else: adv_targ = advantages[indices] yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\ value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\ adv_targ, available_actions_batch def naive_recurrent_generator(self, advantages, num_mini_batch): """ Yield training data for non-chunked RNN training. :param advantages: (np.ndarray) advantage estimates. :param num_mini_batch: (int) number of minibatches to split the batch into. """ episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3] batch_size = n_rollout_threads * num_agents assert n_rollout_threads * num_agents >= num_mini_batch, ( "PPO requires the number of processes ({})* number of agents ({}) " "to be greater than or equal to the number of " "PPO mini batches ({}).".format(n_rollout_threads, num_agents, num_mini_batch)) num_envs_per_batch = batch_size // num_mini_batch perm = torch.randperm(batch_size).numpy() share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:]) obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:]) rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:]) rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:]) actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1]) if self.available_actions is not None: available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1]) value_preds = self.value_preds.reshape(-1, batch_size, 1) returns = self.returns.reshape(-1, batch_size, 1) masks = self.masks.reshape(-1, batch_size, 1) active_masks = self.active_masks.reshape(-1, batch_size, 1) action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1]) advantages = advantages.reshape(-1, batch_size, 1) for start_ind in range(0, batch_size, num_envs_per_batch): share_obs_batch = [] obs_batch = [] rnn_states_batch = [] rnn_states_critic_batch = [] actions_batch = [] available_actions_batch = [] value_preds_batch = [] return_batch = [] masks_batch = [] active_masks_batch = [] old_action_log_probs_batch = [] adv_targ = [] for offset in range(num_envs_per_batch): ind = perm[start_ind + offset] share_obs_batch.append(share_obs[:-1, ind]) obs_batch.append(obs[:-1, ind]) rnn_states_batch.append(rnn_states[0:1, ind]) rnn_states_critic_batch.append(rnn_states_critic[0:1, ind]) actions_batch.append(actions[:, ind]) if self.available_actions is not None: available_actions_batch.append(available_actions[:-1, ind]) value_preds_batch.append(value_preds[:-1, ind]) return_batch.append(returns[:-1, ind]) masks_batch.append(masks[:-1, ind]) active_masks_batch.append(active_masks[:-1, ind]) old_action_log_probs_batch.append(action_log_probs[:, ind]) adv_targ.append(advantages[:, ind]) # [N[T, dim]] T, N = self.episode_length, num_envs_per_batch # These are all from_numpys of size (T, N, -1) share_obs_batch = np.stack(share_obs_batch, 1) obs_batch = np.stack(obs_batch, 1) actions_batch = np.stack(actions_batch, 1) if self.available_actions is not None: available_actions_batch = np.stack(available_actions_batch, 1) value_preds_batch = np.stack(value_preds_batch, 1) return_batch = np.stack(return_batch, 1) masks_batch = np.stack(masks_batch, 1) active_masks_batch = np.stack(active_masks_batch, 1) old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1) adv_targ = np.stack(adv_targ, 1) # States is just a (N, dim) from_numpy [N[1,dim]] rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:]) rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:]) # Flatten the (T, N, ...) from_numpys to (T * N, ...) share_obs_batch = _flatten(T, N, share_obs_batch) obs_batch = _flatten(T, N, obs_batch) actions_batch = _flatten(T, N, actions_batch) if self.available_actions is not None: available_actions_batch = _flatten(T, N, available_actions_batch) else: available_actions_batch = None value_preds_batch = _flatten(T, N, value_preds_batch) return_batch = _flatten(T, N, return_batch) masks_batch = _flatten(T, N, masks_batch) active_masks_batch = _flatten(T, N, active_masks_batch) old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch) adv_targ = _flatten(T, N, adv_targ) yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\ value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\ adv_targ, available_actions_batch def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length): """ Yield training data for chunked RNN training. :param advantages: (np.ndarray) advantage estimates. :param num_mini_batch: (int) number of minibatches to split the batch into. :param data_chunk_length: (int) length of sequence chunks with which to train RNN. """ episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3] batch_size = n_rollout_threads * episode_length * num_agents data_chunks = batch_size // data_chunk_length # [C=r*T*M/L] mini_batch_size = data_chunks // num_mini_batch rand = torch.randperm(data_chunks).numpy() sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)] if len(self.share_obs.shape) > 4: share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:]) obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:]) else: share_obs = _cast(self.share_obs[:-1]) obs = _cast(self.obs[:-1]) actions = _cast(self.actions) action_log_probs = _cast(self.action_log_probs) advantages = _cast(advantages) value_preds = _cast(self.value_preds[:-1]) returns = _cast(self.returns[:-1]) masks = _cast(self.masks[:-1]) active_masks = _cast(self.active_masks[:-1]) # rnn_states = _cast(self.rnn_states[:-1]) # rnn_states_critic = _cast(self.rnn_states_critic[:-1]) rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:]) rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states_critic.shape[ 3:]) if self.available_actions is not None: available_actions = _cast(self.available_actions[:-1]) for indices in sampler: share_obs_batch = [] obs_batch = [] rnn_states_batch = [] rnn_states_critic_batch = [] actions_batch = [] available_actions_batch = [] value_preds_batch = [] return_batch = [] masks_batch = [] active_masks_batch = [] old_action_log_probs_batch = [] adv_targ = [] for index in indices: ind = index * data_chunk_length # size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim] share_obs_batch.append(share_obs[ind:ind + data_chunk_length]) obs_batch.append(obs[ind:ind + data_chunk_length]) actions_batch.append(actions[ind:ind + data_chunk_length]) if self.available_actions is not None: available_actions_batch.append(available_actions[ind:ind + data_chunk_length]) value_preds_batch.append(value_preds[ind:ind + data_chunk_length]) return_batch.append(returns[ind:ind + data_chunk_length]) masks_batch.append(masks[ind:ind + data_chunk_length]) active_masks_batch.append(active_masks[ind:ind + data_chunk_length]) old_action_log_probs_batch.append(action_log_probs[ind:ind + data_chunk_length]) adv_targ.append(advantages[ind:ind + data_chunk_length]) # size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim] rnn_states_batch.append(rnn_states[ind]) rnn_states_critic_batch.append(rnn_states_critic[ind]) L, N = data_chunk_length, mini_batch_size # These are all from_numpys of size (L, N, Dim) share_obs_batch = np.stack(share_obs_batch, axis=1) obs_batch = np.stack(obs_batch, axis=1) actions_batch = np.stack(actions_batch, axis=1) if self.available_actions is not None: available_actions_batch = np.stack(available_actions_batch, axis=1) value_preds_batch = np.stack(value_preds_batch, axis=1) return_batch = np.stack(return_batch, axis=1) masks_batch = np.stack(masks_batch, axis=1) active_masks_batch = np.stack(active_masks_batch, axis=1) old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1) adv_targ = np.stack(adv_targ, axis=1) # States is just a (N, -1) from_numpy rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:]) rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:]) # Flatten the (L, N, ...) from_numpys to (L * N, ...) share_obs_batch = _flatten(L, N, share_obs_batch) obs_batch = _flatten(L, N, obs_batch) actions_batch = _flatten(L, N, actions_batch) if self.available_actions is not None: available_actions_batch = _flatten(L, N, available_actions_batch) else: available_actions_batch = None value_preds_batch = _flatten(L, N, value_preds_batch) return_batch = _flatten(L, N, return_batch) masks_batch = _flatten(L, N, masks_batch) active_masks_batch = _flatten(L, N, active_masks_batch) old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch) adv_targ = _flatten(L, N, adv_targ) yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\ value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\ adv_targ, available_actions_batch ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian/utils/util.py ================================================ import numpy as np import math import torch def check(input): if type(input) == np.ndarray: return torch.from_numpy(input) def get_gard_norm(it): sum_grad = 0 for x in it: if x.grad is None: continue sum_grad += x.grad.norm() ** 2 return math.sqrt(sum_grad) def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr): """Decreases the learning rate linearly""" lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs))) for param_group in optimizer.param_groups: param_group['lr'] = lr def huber_loss(e, d): a = (abs(e) <= d).float() b = (e > d).float() return a*e**2/2 + b*d*(abs(e)-d/2) def mse_loss(e): return e**2/2 def get_shape_from_obs_space(obs_space): if obs_space.__class__.__name__ == 'Box': obs_shape = obs_space.shape elif obs_space.__class__.__name__ == 'list': obs_shape = obs_space else: raise NotImplementedError return obs_shape def get_shape_from_act_space(act_space): if act_space.__class__.__name__ == 'Discrete': act_shape = 1 elif act_space.__class__.__name__ == "MultiDiscrete": act_shape = act_space.shape elif act_space.__class__.__name__ == "Box": act_shape = act_space.shape[0] elif act_space.__class__.__name__ == "MultiBinary": act_shape = act_space.shape[0] else: # agar act_shape = act_space[0].shape[0] + 1 return act_shape def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3 """ img_nhwc = np.asarray(img_nhwc) N, h, w, c = img_nhwc.shape H = int(np.ceil(np.sqrt(N))) W = int(np.ceil(float(N)/H)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) img_HWhwc = img_nhwc.reshape(H, W, h, w, c) img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) return img_Hh_Ww_c ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian.egg-info/PKG-INFO ================================================ Metadata-Version: 2.1 Name: mappo-lagrangian Version: 0.1.0 Summary: mappo_lagrangian algorithms of marlbenchmark Home-page: UNKNOWN Author: marl Author-email: marl@gmail.com License: UNKNOWN Description: # MAPPO Chao Yu*, Akash Velu*, Eugene Vinitsky, Yu Wang, Alexandre Bayen, and Yi Wu. Website: https://sites.google.com/view/mappo This repository implements MAPPO, a multi-agent variant of PPO. The implementation in this repositorory is used in the paper "The Surprising Effectiveness of MAPPO in Cooperative Multi-Agent Games" (https://arxiv.org/abs/2103.01955). This repository is heavily based on https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail. ## Environments supported: - [StarCraftII (SMAC)](https://github.com/oxwhirl/smac) - [Hanabi](https://github.com/deepmind/hanabi-learning-environment) - [Multiagent Particle-World Environments (MPEs)](https://github.com/openai/multiagent-particle-envs) ## 1. Usage All core code is located within the onpolicy folder. The algorithms/ subfolder contains algorithm-specific code for MAPPO. * The envs/ subfolder contains environment wrapper implementations for the MPEs, SMAC, and Hanabi. * Code to perform training rollouts and policy updates are contained within the runner/ folder - there is a runner for each environment. * Executable scripts for training with default hyperparameters can be found in the scripts/ folder. The files are named in the following manner: train_algo_environment.sh. Within each file, the map name (in the case of SMAC and the MPEs) can be altered. * Python training scripts for each environment can be found in the scripts/train/ folder. * The config.py file contains relevant hyperparameter and env settings. Most hyperparameters are defaulted to the ones used in the paper; however, please refer to the appendix for a full list of hyperparameters used. ## 2. Installation Here we give an example installation on CUDA == 10.1. For non-GPU & other CUDA version installation, please refer to the [PyTorch website](https://pytorch.org/get-started/locally/). ``` Bash # create conda environment conda create -n marl python==3.6.1 conda activate marl pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html ``` ``` # install on-policy package cd on-policy pip install -e . ``` Even though we provide requirement.txt, it may have redundancy. We recommend that the user try to install other required packages by running the code and finding which required package hasn't installed yet. ### 2.1 Install StarCraftII [4.10](http://blzdistsc2-a.akamaihd.net/Linux/SC2.4.10.zip) ``` Bash unzip SC2.4.10.zip # password is iagreetotheeula echo "export SC2PATH=~/StarCraftII/" > ~/.bashrc ``` * download SMAC Maps, and move it to `~/StarCraftII/Maps/`. * To use a stableid, copy `stableid.json` from https://github.com/Blizzard/s2client-proto.git to `~/StarCraftII/`. ### 2.2 Hanabi Environment code for Hanabi is developed from the open-source environment code, but has been slightly modified to fit the algorithms used here. To install, execute the following: ``` Bash pip install cffi cd envs/hanabi mkdir build & cd build cmake .. make -j ``` ### 2.3 Install MPE ``` Bash # install this package first pip install seaborn ``` There are 3 Cooperative scenarios in MPE: * simple_spread * simple_speaker_listener, which is 'Comm' scenario in paper * simple_reference ## 3.Train Here we use train_mpe.sh as an example: ``` cd onpolicy/scripts chmod +x ./train_mpe.sh ./train_mpe.sh ``` Local results are stored in subfold scripts/results. Note that we use Weights & Bias as the default visualization platform; to use Weights & Bias, please register and login to the platform first. More instructions for using Weights&Bias can be found in the official [documentation](https://docs.wandb.ai/). Adding the `--use_wandb` in command line or in the .sh file will use Tensorboard instead of Weights & Biases. We additionally provide `./eval_hanabi_forward.sh` for evaluating the hanabi score over 100k trials. ## 4. Publication If you find this repository useful, please cite our [paper](https://arxiv.org/abs/2103.01955): ``` @misc{yu2021surprising, title={The Surprising Effectiveness of MAPPO in Cooperative Multi-Agent Games}, author={Chao Yu and Akash Velu and Eugene Vinitsky and Yu Wang and Alexandre Bayen and Yi Wu}, year={2021}, eprint={2103.01955}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` Keywords: multi-agent reinforcement learning platform pytorch Platform: UNKNOWN Classifier: Development Status :: 3 - Alpha Classifier: Intended Audience :: Science/Research Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Programming Language :: Python :: 3 Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Requires-Python: >=3.6 Description-Content-Type: text/markdown ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian.egg-info/SOURCES.txt ================================================ README.md setup.py mappo_lagrangian/__init__.py mappo_lagrangian/config.py mappo_lagrangian.egg-info/PKG-INFO mappo_lagrangian.egg-info/SOURCES.txt mappo_lagrangian.egg-info/dependency_links.txt mappo_lagrangian.egg-info/top_level.txt mappo_lagrangian/algorithms/__init__.py mappo_lagrangian/algorithms/r_mappo/__init__.py mappo_lagrangian/algorithms/r_mappo/r_mappo_lagr.py mappo_lagrangian/envs/__init__.py mappo_lagrangian/envs/env_wrappers.py mappo_lagrangian/envs/safety_ma_mujoco/__init__.py mappo_lagrangian/envs/safety_ma_mujoco/test.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/__init__.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/ant.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/half_cheetah.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/hopper.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/humanoid.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_ant.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_swimmer.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_env.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_multi.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/multiagentenv.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/obsk.py mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/__init__.py mappo_lagrangian/runner/__init__.py mappo_lagrangian/runner/separated/__init__.py mappo_lagrangian/runner/separated/base_runner.py mappo_lagrangian/runner/separated/base_runner_mappo_lagr.py mappo_lagrangian/runner/separated/mujoco_runner.py mappo_lagrangian/runner/separated/mujoco_runner_mappo_lagr.py mappo_lagrangian/scripts/__init__.py mappo_lagrangian/scripts/train/__init__.py mappo_lagrangian/scripts/train/train_mujoco.py mappo_lagrangian/utils/__init__.py mappo_lagrangian/utils/multi_discrete.py mappo_lagrangian/utils/popart.py mappo_lagrangian/utils/separated_buffer.py mappo_lagrangian/utils/shared_buffer.py mappo_lagrangian/utils/util.py ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian.egg-info/dependency_links.txt ================================================ ================================================ FILE: MAPPO-Lagrangian/mappo_lagrangian.egg-info/top_level.txt ================================================ mappo_lagrangian ================================================ FILE: MAPPO-Lagrangian/setup.py ================================================ #!/usr/bin/env python # -*- coding: utf-8 -*- import os from setuptools import setup, find_packages import setuptools def get_version() -> str: # https://packaging.python.org/guides/single-sourcing-package-version/ init = open(os.path.join("mappo_lagrangian", "__init__.py"), "r").read().split() return init[init.index("__version__") + 2][1:-1] setup( name="mappo_lagrangian", # Replace with your own username version=get_version(), description="mappo_lagrangian algorithms of marlbenchmark", # long_description=open("README.md", encoding="utf8").read(), long_description_content_type="text/markdown", author="marl", author_email="marl@gmail.com", packages=setuptools.find_packages(), classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development :: Libraries :: Python Modules", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], keywords="multi-agent reinforcement learning platform pytorch", python_requires='>=3.6', ) ================================================ FILE: README.md ================================================ # Multi-Agent Constrained Policy Optimisation (MACPO) The repository is for the paper: **[Multi-Agent Constrained Policy Optimisation](http://arxiv.org/abs/2110.02793)**, in which we investigate the problem of safe MARL. The problem of safe multi-agent learning with safety constraints has not been rigorously studied; very few solutions have been proposed, nor a sharable testing environment or benchmarks. To fill these gaps, in this work, we formulate the safe multi-agent reinforcement learning problem as a constrained Markov game and solve it with trust region methods. Our solutions---*Multi-Agent Constrained Policy Optimisation (MACPO)* and *MAPPO-Lagrangian*---leverage on the theory of *Constrained Policy Optimisation (CPO)* and multi-agent trust region learning, and critically, they enjoy theoretical guarantees of both monotonic improvement in reward and satisfaction of safety constraints at every iteration. Experimental results reveal that *MACPO/MAPPO-Lagrangian* significantly outperform baselines in terms of balancing the performance and constraint satisfaction, e.g. [MAPPO](https://arxiv.org/abs/2103.01955), [IPPO](https://arxiv.org/abs/2011.09533), [HAPPO](https://arxiv.org/abs/2109.11251). ## Environments Supported: - [Safety Multi-Agent Mujoco](https://github.com/chauncygu/Safe-Multi-Agent-Mujoco) ## 1. Installation #### 1.1 Create Environment ``` Bash # create conda environment conda create -n macpo python==3.7 conda activate macpo pip install -r requirements.txt conda install pytorch torchvision torchaudio cudatoolkit=11.1 -c pytorch -c nvidia ``` ``` cd MACPO/macpo (for the macpo algorithm) or cd MAPPO-Lagrangian/mappo_lagrangian (for the mappo_lagrangian algorithm) pip install -e . ``` #### 1.2 Install Safety Multi-Agent Mujoco - Install mujoco accoring to [mujoco-py](https://github.com/openai/mujoco-py) and [MuJoCo website](https://www.roboti.us/license.html). - clone [Safety Multi-Agent Mujoco](https://github.com/chauncygu/Safe-Multi-Agent-Mujoco) to the env path (in this repository, have set the path). ``` Bash LD_LIBRARY_PATH=${HOME}/.mujoco/mujoco200/bin; LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libGLEW.so ``` ## 2. Train ``` cd MACPO/macpo/scripts or cd MAPPO-Lagrangian/mappo_lagrangian/scripts chmod +x ./train_mujoco.sh ./train_mujoco.sh ``` ## 3. Results
Performance comparisons on tasks of Safe ManyAgent Ant, Safe Ant, and Safe HalfCheetah in terms of cost (the first row) and reward (the second row). The safety constraint values are: 1 for ManyAgent Ant, 0.2 for Ant, and 5 for HalfCheetah. Our methods consistently achieve almost zero costs, thus satisfying safe constraints, on all tasks. In terms of reward, our methods outperform IPPO and MAPPO on some tasks but underperform HAPPO, which is also an unsafe algorithm.
## 4. Demos **Ant Task**: the width of the corridor set by two walls is 10 m. The environment emits the cost of 1 for an agent, if the distance between the robot and the wall is less than 1.8 m, or when the robot topples over. |![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111071600-unsafe-end%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111140948-safe-end1%2000_00_00-00_00_30.gif)| | :---: | :---: | | A demo denotes unsafe performance using HAPPO on Ant-2x4 task. | A demo denotes safe performance using MAPPO-Lagrangian on Ant-2x4 task. | **HalfCheetah Task**: In the task, the agents move inside a corridor (which constraints their movement, but does not induce costs). Together with them, there are bombs moving inside the corridor. If an agent finds itself too close to the bomb, the distance between an agent and the bomb is less than 9m, a cost of 1 will be emitted, at the same time, the bomb will turn blood red. |![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111140948-halfcheetah-unsafe-end%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111140948-halfcheetah-safe-end%2000_00_00-00_00_30.gif)| | :---: | :---: | | A demo denotes unsafe performance using HAPPO on HalfCheetah-2x3 task. | A demo denotes safe performance using MAPPO-Lagrangian on HalfCheetah-2x3 task. | **ManyAgent Ant Task One**: In the ManyAgent Ant task, the width of the corridor set by two walls is 9m. The environment emits the cost of 1 for an agent, if the distance between the robot and the wall is less than 1.8 m, or when the robot topples over. |![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/sadppo-manyagent-ant--unsafe-end-have-word%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/safe-mappo-manyagent-ant--safe-end-have-word-01%2000_00_00-00_00_30.gif)| | :---: | :---: | | A demo denotes unsafe performance using HAPPO on ManyAgent Ant-2x3 task. | A demo denotes safe performance using MAPPO-Lagrangian on ManyAgent Ant-2x3 task. | **ManyAgent Ant Task Two**: In the ManyAgent Ant task, the width of the corridor is 12 m; its walls fold at the angle of 30 degrees. The environment emits the cost of 1 for an agent, if the distance between the robot and the wall is less than 1.8 m, or when the robot topples over. |![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/two-wall-sadppo-unsafe-end-have-word-manyagent-ant%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/two-wall-safe-mappo--manyagent-ant-safe-end-have-word-manyagent-ant%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/two-wall-macpo-manyagent-ant--safe-end-have-word%2000_00_00-00_00_30.gif)| | :---: | :---: | :---: | | A demo denotes unsafe performance using HAPPO on ManyAgent Ant-2x3 task. | A demo denotes unsafe performance using MAPPO-Lagrangian on ManyAgent Ant-2x3 task. | A demo denotes safe performance using MACPO on ManyAgent Ant-2x3 task. | ## 5. Publication If you find the repository useful, please cite the [paper](https://arxiv.org/abs/2110.02793): ``` @article{gu2023safe, title={Safe Multi-Agent Reinforcement Learning for Multi-Robot Control}, author={Gu, Shangding and Kuba, Jakub Grudzien and Chen, Yuanpei and Du, Yali and Yang, Long and Knoll, Alois and Yang, Yaodong}, journal={Artificial Intelligence}, pages={103905}, year={2023}, publisher={Elsevier} } ``` ## Acknowledgments We thank the list of contributors from the following open source repositories: [MAPPO](https://github.com/marlbenchmark/on-policy), [HAPPO](https://github.com/cyanrain7/Trust-Region-Policy-Optimisation-in-Multi-Agent-Reinforcement-Learning), [safety-starter-agents](https://github.com/openai/safety-starter-agents), [CMBPO](https://github.com/anyboby/Constrained-Model-Based-Policy-Optimization). ================================================ FILE: environment.yaml ================================================ name: marl channels: - defaults dependencies: - _libgcc_mutex=0.1=main - _tflow_select=2.1.0=gpu - absl-py=0.9.0=py36_0 - astor=0.8.0=py36_0 - blas=1.0=mkl - c-ares=1.15.0=h7b6447c_1001 - ca-certificates=2020.1.1=0 - certifi=2020.4.5.2=py36_0 - cudatoolkit=10.0.130=0 - cudnn=7.6.5=cuda10.0_0 - cupti=10.0.130=0 - gast=0.2.2=py36_0 - google-pasta=0.2.0=py_0 - grpcio=1.14.1=py36h9ba97e2_0 - h5py=2.10.0=py36h7918eee_0 - hdf5=1.10.4=hb1b8bf9_0 - intel-openmp=2020.1=217 - keras-applications=1.0.8=py_0 - keras-preprocessing=1.1.0=py_1 - libedit=3.1=heed3624_0 - libffi=3.2.1=hd88cf55_4 - libgcc-ng=9.1.0=hdf63c60_0 - libgfortran-ng=7.3.0=hdf63c60_0 - libprotobuf=3.12.3=hd408876_0 - libstdcxx-ng=9.1.0=hdf63c60_0 - markdown=3.1.1=py36_0 - mkl=2020.1=217 - mkl-service=2.3.0=py36he904b0f_0 - mkl_fft=1.1.0=py36h23d657b_0 - mkl_random=1.1.1=py36h0573a6f_0 - ncurses=6.0=h9df7e31_2 - numpy=1.18.1=py36h4f9e942_0 - numpy-base=1.18.1=py36hde5b4d6_1 - openssl=1.0.2u=h7b6447c_0 - opt_einsum=3.1.0=py_0 - pip=20.1.1=py36_1 - protobuf=3.12.3=py36he6710b0_0 - python=3.6.2=hca45abc_19 - readline=7.0=ha6073c6_4 - scipy=1.4.1=py36h0b6359f_0 - setuptools=47.3.0=py36_0 - six=1.15.0=py_0 - sqlite=3.23.1=he433501_0 - tensorboard=2.0.0=pyhb38c66f_1 - tensorflow=2.0.0=gpu_py36h6b29c10_0 - tensorflow-base=2.0.0=gpu_py36h0ec5d1f_0 - tensorflow-estimator=2.0.0=pyh2649769_0 - tensorflow-gpu=2.0.0=h0d30ee6_0 - termcolor=1.1.0=py36_1 - tk=8.6.8=hbc83047_0 - werkzeug=0.16.1=py_0 - wheel=0.34.2=py36_0 - wrapt=1.12.1=py36h7b6447c_1 - xz=5.2.5=h7b6447c_0 - zlib=1.2.11=h7b6447c_3 - pip: - aiohttp==3.6.2 - aioredis==1.3.1 - astunparse==1.6.3 - async-timeout==3.0.1 - atari-py==0.2.6 - atomicwrites==1.2.1 - attrs==18.2.0 - beautifulsoup4==4.9.1 - blessings==1.7 - cachetools==4.1.1 - cffi==1.14.1 - chardet==3.0.4 - click==7.1.2 - cloudpickle==1.3.0 - colorama==0.4.3 - colorful==0.5.4 - configparser==5.0.1 - contextvars==2.4 - cycler==0.10.0 - cython==0.29.21 - deepdiff==4.3.2 - dill==0.3.2 - docker-pycreds==0.4.0 - docopt==0.6.2 - fasteners==0.15 - filelock==3.0.12 - funcsigs==1.0.2 - future==0.16.0 - gin==0.1.6 - gin-config==0.3.0 - gitdb==4.0.5 - gitpython==3.1.9 - glfw==1.12.0 - google==3.0.0 - google-api-core==1.22.1 - google-auth==1.21.0 - google-auth-oauthlib==0.4.1 - googleapis-common-protos==1.52.0 - gpustat==0.6.0 - gql==0.2.0 - graphql-core==1.1 - gym==0.17.2 - hiredis==1.1.0 - idna==2.7 - idna-ssl==1.1.0 - imageio==2.4.1 - immutables==0.14 - importlib-metadata==1.7.0 - joblib==0.16.0 - jsonnet==0.16.0 - jsonpickle==0.9.6 - jsonschema==3.2.0 - kiwisolver==1.0.1 - lockfile==0.12.2 - mappo==0.0.1 - matplotlib==3.0.0 - mock==2.0.0 - monotonic==1.5 - more-itertools==4.3.0 - mpi4py==3.0.3 - mpyq==0.2.5 - msgpack==1.0.0 - mujoco-py==2.0.2.13 - mujoco-worldgen==0.0.0 - multidict==4.7.6 - munch==2.3.2 - nvidia-ml-py3==7.352.0 - oauthlib==3.1.0 - opencensus==0.7.10 - opencensus-context==0.1.1 - opencv-python==4.2.0.34 - ordered-set==4.0.2 - packaging==20.4 - pandas==1.1.1 - pathlib2==2.3.2 - pathtools==0.1.2 - pbr==4.3.0 - pillow==5.3.0 - pluggy==0.7.1 - portpicker==1.2.0 - probscale==0.2.3 - progressbar2==3.53.1 - prometheus-client==0.8.0 - promise==2.3 - psutil==5.7.2 - py==1.6.0 - py-spy==0.3.3 - pyasn1==0.4.8 - pyasn1-modules==0.2.8 - pycparser==2.20 - pygame==1.9.4 - pyglet==1.5.0 - pyopengl==3.1.5 - pyopengl-accelerate==3.1.5 - pyparsing==2.2.2 - pyrsistent==0.16.0 - pysc2==3.0.0 - pytest==3.8.2 - python-dateutil==2.7.3 - python-utils==2.4.0 - pytz==2020.1 - pyyaml==3.13 - pyzmq==19.0.2 - ray==0.8.0 - redis==3.4.1 - requests==2.24.0 - requests-oauthlib==1.3.0 - rsa==4.6 - s2clientprotocol==4.10.1.75800.0 - s2protocol==4.11.4.78285.0 - sacred==0.7.2 - seaborn==0.10.1 - sentry-sdk==0.18.0 - shortuuid==1.0.1 - sk-video==1.1.10 - smmap==3.0.4 - snakeviz==1.0.0 - soupsieve==2.0.1 - subprocess32==3.5.4 - tabulate==0.8.7 - tensorboard-logger==0.1.0 - tensorboard-plugin-wit==1.7.0 - tensorboardx==2.0 - torch==1.5.1+cu101 - torchvision==0.6.1+cu101 - tornado==5.1.1 - tqdm==4.48.2 - typing-extensions==3.7.4.3 - urllib3==1.23 - wandb==0.10.5 - watchdog==0.10.3 - websocket-client==0.53.0 - whichcraft==0.5.2 - xmltodict==0.12.0 - yarl==1.5.1 - zipp==3.1.0 - zmq==0.0.0 ================================================ FILE: requirements.txt ================================================ absl-py==0.9.0 aiohttp==3.6.2 aioredis==1.3.1 astor==0.8.0 astunparse==1.6.3 async-timeout==3.0.1 atari-py==0.2.6 atomicwrites==1.2.1 attrs==18.2.0 beautifulsoup4==4.9.1 blessings==1.7 cachetools==4.1.1 certifi==2020.4.5.2 cffi==1.14.1 chardet==3.0.4 click==7.1.2 cloudpickle==1.3.0 colorama==0.4.3 colorful==0.5.4 configparser==5.0.1 contextvars==2.4 cycler==0.10.0 Cython==0.29.21 deepdiff==4.3.2 dill==0.3.2 docker-pycreds==0.4.0 docopt==0.6.2 fasteners==0.15 filelock==3.0.12 funcsigs==1.0.2 future==0.16.0 gast==0.2.2 gin==0.1.6 gin-config==0.3.0 gitdb==4.0.5 GitPython==3.1.9 glfw==1.12.0 google==3.0.0 google-api-core==1.22.1 google-auth==1.21.0 google-auth-oauthlib==0.4.1 google-pasta==0.2.0 googleapis-common-protos==1.52.0 gpustat==0.6.0 gql==0.2.0 graphql-core==1.1 grpcio==1.31.0 gym==0.17.2 h5py==2.10.0 hiredis==1.1.0 idna==2.7 idna-ssl==1.1.0 imageio==2.4.1 immutables==0.14 importlib-metadata==1.7.0 joblib==0.16.0 jsonnet==0.16.0 jsonpickle==0.9.6 jsonschema==3.2.0 Keras-Applications==1.0.8 Keras-Preprocessing==1.1.2 kiwisolver==1.0.1 lockfile==0.12.2 Markdown==3.1.1 matplotlib==3.0.0 mkl-fft==1.2.0 mkl-random==1.2.0 mkl-service==2.3.0 mock==2.0.0 monotonic==1.5 more-itertools==4.3.0 mpi4py==3.0.3 mpyq==0.2.5 msgpack==1.0.0 mujoco-py==2.0.2.8 multidict==4.7.6 munch==2.3.2 numpy nvidia-ml-py3==7.352.0 oauthlib==3.1.0 opencensus==0.7.10 opencensus-context==0.1.1 opencv-python==4.2.0.34 opt-einsum==3.1.0 ordered-set==4.0.2 packaging==20.4 pandas==1.1.1 pathlib2==2.3.2 pathtools==0.1.2 pbr==4.3.0 Pillow==5.3.0 pluggy==0.7.1 portpicker==1.2.0 probscale==0.2.3 progressbar2==3.53.1 prometheus-client==0.8.0 promise==2.3 protobuf==3.12.4 psutil==5.7.2 py==1.6.0 py-spy==0.3.3 pyasn1==0.4.8 pyasn1-modules==0.2.8 pycparser==2.20 pygame==1.9.4 pyglet==1.5.0 PyOpenGL==3.1.5 PyOpenGL-accelerate==3.1.5 pyparsing==2.2.2 pyrsistent==0.16.0 PySC2==3.0.0 pytest==3.8.2 python-dateutil==2.7.3 python-utils==2.4.0 pytz==2020.1 PyYAML==3.13 pyzmq==19.0.2 redis==3.4.1 requests==2.24.0 requests-oauthlib==1.3.0 rsa==4.6 s2clientprotocol==4.10.1.75800.0 s2protocol==4.11.4.78285.0 sacred==0.7.2 scipy==1.4.1 seaborn==0.10.1 sentry-sdk==0.18.0 setproctitle==1.1.10 shortuuid==1.0.1 six==1.15.0 sk-video==1.1.10 smmap==3.0.4 snakeviz==1.0.0 soupsieve==2.0.1 subprocess32==3.5.4 tabulate==0.8.7 tensorboard==2.0.2 tensorboard-logger==0.1.0 tensorboard-plugin-wit==1.7.0 tensorboardX==2.0 tensorflow==2.0.0 tensorflow-estimator==2.0.0 termcolor==1.1.0 torch torchvision tornado tqdm==4.48.2 typing-extensions==3.7.4.3 urllib3==1.23 wandb==0.10.5 watchdog==0.10.3 websocket-client==0.53.0 Werkzeug==0.16.1 whichcraft==0.5.2 wrapt==1.12.1 xmltodict==0.12.0 yarl==1.5.1 zipp==3.1.0 zmq==0.0.0