[
  {
    "path": "LICENSE",
    "content": "MIT License\n\n<<<<<<< HEAD\nCopyright (c) 2021 anybodyany\n=======\nCopyright (c) 2020 Tianshou contributors\n>>>>>>> upload macpo code\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n<<<<<<< HEAD\nSOFTWARE.\n=======\nSOFTWARE.\n>>>>>>> upload macpo code\n"
  },
  {
    "path": "MACPO/.gitignore",
    "content": "/.idea/\n*/__pycache__/\n"
  },
  {
    "path": "MACPO/environment.yaml",
    "content": "name: marl\nchannels:\n  - defaults\ndependencies:\n  - _libgcc_mutex=0.1=main\n  - _tflow_select=2.1.0=gpu\n  - absl-py=0.9.0=py36_0\n  - astor=0.8.0=py36_0\n  - blas=1.0=mkl\n  - c-ares=1.15.0=h7b6447c_1001\n  - ca-certificates=2020.1.1=0\n  - certifi=2020.4.5.2=py36_0\n  - cudatoolkit=10.0.130=0\n  - cudnn=7.6.5=cuda10.0_0\n  - cupti=10.0.130=0\n  - gast=0.2.2=py36_0\n  - google-pasta=0.2.0=py_0\n  - grpcio=1.14.1=py36h9ba97e2_0\n  - h5py=2.10.0=py36h7918eee_0\n  - hdf5=1.10.4=hb1b8bf9_0\n  - intel-openmp=2020.1=217\n  - keras-applications=1.0.8=py_0\n  - keras-preprocessing=1.1.0=py_1\n  - libedit=3.1=heed3624_0\n  - libffi=3.2.1=hd88cf55_4\n  - libgcc-ng=9.1.0=hdf63c60_0\n  - libgfortran-ng=7.3.0=hdf63c60_0\n  - libprotobuf=3.12.3=hd408876_0\n  - libstdcxx-ng=9.1.0=hdf63c60_0\n  - markdown=3.1.1=py36_0\n  - mkl=2020.1=217\n  - mkl-service=2.3.0=py36he904b0f_0\n  - mkl_fft=1.1.0=py36h23d657b_0\n  - mkl_random=1.1.1=py36h0573a6f_0\n  - ncurses=6.0=h9df7e31_2\n  - numpy=1.18.1=py36h4f9e942_0\n  - numpy-base=1.18.1=py36hde5b4d6_1\n  - openssl=1.0.2u=h7b6447c_0\n  - opt_einsum=3.1.0=py_0\n  - pip=20.1.1=py36_1\n  - protobuf=3.12.3=py36he6710b0_0\n  - python=3.6.2=hca45abc_19\n  - readline=7.0=ha6073c6_4\n  - scipy=1.4.1=py36h0b6359f_0\n  - setuptools=47.3.0=py36_0\n  - six=1.15.0=py_0\n  - sqlite=3.23.1=he433501_0\n  - tensorboard=2.0.0=pyhb38c66f_1\n  - tensorflow=2.0.0=gpu_py36h6b29c10_0\n  - tensorflow-base=2.0.0=gpu_py36h0ec5d1f_0\n  - tensorflow-estimator=2.0.0=pyh2649769_0\n  - tensorflow-gpu=2.0.0=h0d30ee6_0\n  - termcolor=1.1.0=py36_1\n  - tk=8.6.8=hbc83047_0\n  - werkzeug=0.16.1=py_0\n  - wheel=0.34.2=py36_0\n  - wrapt=1.12.1=py36h7b6447c_1\n  - xz=5.2.5=h7b6447c_0\n  - zlib=1.2.11=h7b6447c_3\n  - pip:\n      - aiohttp==3.6.2\n      - aioredis==1.3.1\n      - astunparse==1.6.3\n      - async-timeout==3.0.1\n      - atari-py==0.2.6\n      - atomicwrites==1.2.1\n      - attrs==18.2.0\n      - beautifulsoup4==4.9.1\n      - blessings==1.7\n      - cachetools==4.1.1\n      - cffi==1.14.1\n      - chardet==3.0.4\n      - click==7.1.2\n      - cloudpickle==1.3.0\n      - colorama==0.4.3\n      - colorful==0.5.4\n      - configparser==5.0.1\n      - contextvars==2.4\n      - cycler==0.10.0\n      - cython==0.29.21\n      - deepdiff==4.3.2\n      - dill==0.3.2\n      - docker-pycreds==0.4.0\n      - docopt==0.6.2\n      - fasteners==0.15\n      - filelock==3.0.12\n      - funcsigs==1.0.2\n      - future==0.16.0\n      - gin==0.1.6\n      - gin-config==0.3.0\n      - gitdb==4.0.5\n      - gitpython==3.1.9\n      - glfw==1.12.0\n      - google==3.0.0\n      - google-api-core==1.22.1\n      - google-auth==1.21.0\n      - google-auth-oauthlib==0.4.1\n      - googleapis-common-protos==1.52.0\n      - gpustat==0.6.0\n      - gql==0.2.0\n      - graphql-core==1.1\n      - gym==0.17.2\n      - hiredis==1.1.0\n      - idna==2.7\n      - idna-ssl==1.1.0\n      - imageio==2.4.1\n      - immutables==0.14\n      - importlib-metadata==1.7.0\n      - joblib==0.16.0\n      - jsonnet==0.16.0\n      - jsonpickle==0.9.6\n      - jsonschema==3.2.0\n      - kiwisolver==1.0.1\n      - lockfile==0.12.2\n      - mappo==0.0.1\n      - matplotlib==3.0.0\n      - mock==2.0.0\n      - monotonic==1.5\n      - more-itertools==4.3.0\n      - mpi4py==3.0.3\n      - mpyq==0.2.5\n      - msgpack==1.0.0\n      - mujoco-py==2.0.2.13\n      - mujoco-worldgen==0.0.0\n      - multidict==4.7.6\n      - munch==2.3.2\n      - nvidia-ml-py3==7.352.0\n      - oauthlib==3.1.0\n      - opencensus==0.7.10\n      - opencensus-context==0.1.1\n      - opencv-python==4.2.0.34\n      - ordered-set==4.0.2\n      - packaging==20.4\n      - pandas==1.1.1\n      - pathlib2==2.3.2\n      - pathtools==0.1.2\n      - pbr==4.3.0\n      - pillow==5.3.0\n      - pluggy==0.7.1\n      - portpicker==1.2.0\n      - probscale==0.2.3\n      - progressbar2==3.53.1\n      - prometheus-client==0.8.0\n      - promise==2.3\n      - psutil==5.7.2\n      - py==1.6.0\n      - py-spy==0.3.3\n      - pyasn1==0.4.8\n      - pyasn1-modules==0.2.8\n      - pycparser==2.20\n      - pygame==1.9.4\n      - pyglet==1.5.0\n      - pyopengl==3.1.5\n      - pyopengl-accelerate==3.1.5\n      - pyparsing==2.2.2\n      - pyrsistent==0.16.0\n      - pysc2==3.0.0\n      - pytest==3.8.2\n      - python-dateutil==2.7.3\n      - python-utils==2.4.0\n      - pytz==2020.1\n      - pyyaml==3.13\n      - pyzmq==19.0.2\n      - ray==0.8.0\n      - redis==3.4.1\n      - requests==2.24.0\n      - requests-oauthlib==1.3.0\n      - rsa==4.6\n      - s2clientprotocol==4.10.1.75800.0\n      - s2protocol==4.11.4.78285.0\n      - sacred==0.7.2\n      - seaborn==0.10.1\n      - sentry-sdk==0.18.0\n      - shortuuid==1.0.1\n      - sk-video==1.1.10\n      - smmap==3.0.4\n      - snakeviz==1.0.0\n      - soupsieve==2.0.1\n      - subprocess32==3.5.4\n      - tabulate==0.8.7\n      - tensorboard-logger==0.1.0\n      - tensorboard-plugin-wit==1.7.0\n      - tensorboardx==2.0\n      - torch==1.5.1+cu101\n      - torchvision==0.6.1+cu101\n      - tornado==5.1.1\n      - tqdm==4.48.2\n      - typing-extensions==3.7.4.3\n      - urllib3==1.23\n      - wandb==0.10.5\n      - watchdog==0.10.3\n      - websocket-client==0.53.0\n      - whichcraft==0.5.2\n      - xmltodict==0.12.0\n      - yarl==1.5.1\n      - zipp==3.1.0\n      - zmq==0.0.0\n"
  },
  {
    "path": "MACPO/macpo/__init__.py",
    "content": "from macpo import algorithms, envs, runner, scripts, utils, config\n\n\n__version__ = \"0.1.0\"\n\n__all__ = [\n    \"algorithms\",\n    \"envs\",\n    \"runner\",\n    \"scripts\",\n    \"utils\",\n    \"config\",\n]"
  },
  {
    "path": "MACPO/macpo/algorithms/__init__.py",
    "content": ""
  },
  {
    "path": "MACPO/macpo/algorithms/r_mappo/__init__.py",
    "content": "def cost_trpo_macppo():\n    return None"
  },
  {
    "path": "MACPO/macpo/algorithms/r_mappo/algorithm/MACPPOPolicy.py",
    "content": "import torch\nfrom macpo.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic\nfrom macpo.utils.util import update_linear_schedule\n\n\nclass MACPPOPolicy:\n    \"\"\"\n    MACPO Policy  class. Wraps actor and critic networks to compute actions and value function predictions.\n\n    :param args: (argparse.Namespace) arguments containing relevant model and policy information.\n    :param obs_space: (gym.Space) observation space.\n    :param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO).\n    :param action_space: (gym.Space) action space.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n\n    def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device(\"cpu\")):\n        self.args = args\n        self.device = device\n        self.lr = args.lr\n        self.critic_lr = args.critic_lr\n        self.opti_eps = args.opti_eps\n        self.weight_decay = args.weight_decay\n\n        self.obs_space = obs_space\n        self.share_obs_space = cent_obs_space\n        self.act_space = act_space\n\n        self.actor = R_Actor(args, self.obs_space, self.act_space, self.device)\n        self.critic = R_Critic(args, self.share_obs_space, self.device)\n        self.cost_critic = R_Critic(args, self.share_obs_space, self.device)\n\n        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),\n                                                lr=self.lr, eps=self.opti_eps,\n                                                weight_decay=self.weight_decay)\n        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),\n                                                 lr=self.critic_lr,\n                                                 eps=self.opti_eps,\n                                                 weight_decay=self.weight_decay)\n        self.cost_optimizer = torch.optim.Adam(self.cost_critic.parameters(),\n                                               lr=self.critic_lr,\n                                               eps=self.opti_eps,\n                                               weight_decay=self.weight_decay)\n\n    def lr_decay(self, episode, episodes):\n        \"\"\"\n        Decay the actor and critic learning rates.\n        :param episode: (int) current training episode.\n        :param episodes: (int) total number of training episodes.\n        \"\"\"\n        update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr)\n        update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr)\n        update_linear_schedule(self.cost_optimizer, episode, episodes, self.critic_lr)\n\n    def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None,\n                    deterministic=False, rnn_states_cost=None):\n        \"\"\"\n        Compute actions and value function predictions for the given inputs.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether the action should be mode of distribution or should be sampled.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return actions: (torch.Tensor) actions to take.\n        :return action_log_probs: (torch.Tensor) log probabilities of chosen actions.\n        :return rnn_states_actor: (torch.Tensor) updated actor network RNN states.\n        :return rnn_states_critic: (torch.Tensor) updated critic network RNN states.\n        \"\"\"\n        actions, action_log_probs, rnn_states_actor = self.actor(obs,\n                                                                 rnn_states_actor,\n                                                                 masks,\n                                                                 available_actions,\n                                                                 deterministic)\n\n        values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks)\n        if rnn_states_cost is None:\n            return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic\n        else:\n            cost_preds, rnn_states_cost = self.cost_critic(cent_obs, rnn_states_cost, masks)\n            return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic, cost_preds, rnn_states_cost\n\n\n    def get_values(self, cent_obs, rnn_states_critic, masks):\n        \"\"\"\n        Get value function predictions.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n\n        :return values: (torch.Tensor) value function predictions.\n        \"\"\"\n        values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n        return values\n\n    def get_cost_values(self, cent_obs, rnn_states_cost, masks):\n        \"\"\"\n        Get constraint cost predictions.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n\n        :return values: (torch.Tensor) value function predictions.\n        \"\"\"\n        cost_preds, _ = self.cost_critic(cent_obs, rnn_states_cost, masks)\n        return cost_preds\n\n    def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks,\n                         available_actions=None, active_masks=None, rnn_states_cost=None):\n        \"\"\"\n        Get action logprobs / entropy and value function predictions for actor update.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param action: (np.ndarray) actions whose log probabilites and entropy to compute.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n\n        \n\n        if self.args.algorithm_name == \"macpo\": # todo: for mactrpo\n            action_log_probs, dist_entropy, action_mu, action_std = self.actor.evaluate_actions(obs,\n                                                                                                rnn_states_actor,\n                                                                                                action,\n                                                                                                masks,\n                                                                                                available_actions,\n                                                                                                active_masks)\n            values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n            cost_values, _ = self.cost_critic(cent_obs, rnn_states_cost, masks)\n            values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n            return values, action_log_probs, dist_entropy, cost_values, action_mu, action_std\n        else: # todo: for lagrangrian\n            action_log_probs, dist_entropy = self.actor.evaluate_actions(obs,\n                                                                         rnn_states_actor,\n                                                                         action,\n                                                                         masks,\n                                                                         available_actions,\n                                                                         active_masks)\n\n            values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n            cost_values, _ = self.cost_critic(cent_obs, rnn_states_cost, masks)\n            return values, action_log_probs, dist_entropy, cost_values\n\n\n    def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False):\n        \"\"\"\n        Compute actions using the given inputs.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether the action should be mode of distribution or should be sampled.\n        \"\"\"\n        actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)\n        return actions, rnn_states_actor\n"
  },
  {
    "path": "MACPO/macpo/algorithms/r_mappo/algorithm/rMAPPOPolicy.py",
    "content": "import torch\nfrom macpo.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic\nfrom macpo.utils.util import update_linear_schedule\n\n\nclass R_MAPPOPolicy:\n    \"\"\"\n    MAPPO Policy  class. Wraps actor and critic networks to compute actions and value function predictions.\n\n    :param args: (argparse.Namespace) arguments containing relevant model and policy information.\n    :param obs_space: (gym.Space) observation space.\n    :param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO).\n    :param action_space: (gym.Space) action space.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n\n    def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device(\"cpu\")):\n        self.device = device\n        self.lr = args.lr\n        self.critic_lr = args.critic_lr\n        self.opti_eps = args.opti_eps\n        self.weight_decay = args.weight_decay\n\n        self.obs_space = obs_space\n        self.share_obs_space = cent_obs_space\n        self.act_space = act_space\n\n        self.actor = R_Actor(args, self.obs_space, self.act_space, self.device)\n        self.critic = R_Critic(args, self.share_obs_space, self.device)\n\n        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),\n                                                lr=self.lr, eps=self.opti_eps,\n                                                weight_decay=self.weight_decay)\n        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),\n                                                 lr=self.critic_lr,\n                                                 eps=self.opti_eps,\n                                                 weight_decay=self.weight_decay)\n\n    def lr_decay(self, episode, episodes):\n        \"\"\"\n        Decay the actor and critic learning rates.\n        :param episode: (int) current training episode.\n        :param episodes: (int) total number of training episodes.\n        \"\"\"\n        update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr)\n        update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr)\n\n    def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None,\n                    deterministic=False):\n        \"\"\"\n        Compute actions and value function predictions for the given inputs.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether the action should be mode of distribution or should be sampled.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return actions: (torch.Tensor) actions to take.\n        :return action_log_probs: (torch.Tensor) log probabilities of chosen actions.\n        :return rnn_states_actor: (torch.Tensor) updated actor network RNN states.\n        :return rnn_states_critic: (torch.Tensor) updated critic network RNN states.\n        \"\"\"\n        actions, action_log_probs, rnn_states_actor = self.actor(obs,\n                                                                 rnn_states_actor,\n                                                                 masks,\n                                                                 available_actions,\n                                                                 deterministic)\n\n        values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks)\n        return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic\n\n    def get_values(self, cent_obs, rnn_states_critic, masks):\n        \"\"\"\n        Get value function predictions.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n\n        :return values: (torch.Tensor) value function predictions.\n        \"\"\"\n        values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n        return values\n\n    def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks,\n                         available_actions=None, active_masks=None):\n        \"\"\"\n        Get action logprobs / entropy and value function predictions for actor update.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param action: (np.ndarray) actions whose log probabilites and entropy to compute.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n        action_log_probs, dist_entropy = self.actor.evaluate_actions(obs,\n                                                                     rnn_states_actor,\n                                                                     action,\n                                                                     masks,\n                                                                     available_actions,\n                                                                     active_masks)\n\n        values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n        return values, action_log_probs, dist_entropy\n\n    def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False):\n        \"\"\"\n        Compute actions using the given inputs.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether the action should be mode of distribution or should be sampled.\n        \"\"\"\n        actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)\n        return actions, rnn_states_actor\n"
  },
  {
    "path": "MACPO/macpo/algorithms/r_mappo/algorithm/r_actor_critic.py",
    "content": "import torch\nimport torch.nn as nn\nfrom macpo.algorithms.utils.util import init, check\nfrom macpo.algorithms.utils.cnn import CNNBase\nfrom macpo.algorithms.utils.mlp import MLPBase\nfrom macpo.algorithms.utils.rnn import RNNLayer\nfrom macpo.algorithms.utils.act import ACTLayer\nfrom macpo.utils.util import get_shape_from_obs_space\n\n\nclass R_Actor(nn.Module):\n    \"\"\"\n    Actor network class for MACPO. Outputs actions given observations.\n    :param args: (argparse.Namespace) arguments containing relevant model information.\n    :param obs_space: (gym.Space) observation space.\n    :param action_space: (gym.Space) action space.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n    def __init__(self, args, obs_space, action_space, device=torch.device(\"cpu\")):\n        super(R_Actor, self).__init__()\n        self.args = args\n        self.hidden_size = args.hidden_size\n\n        self._gain = args.gain\n        self._use_orthogonal = args.use_orthogonal\n        self._use_policy_active_masks = args.use_policy_active_masks\n        self._use_naive_recurrent_policy = args.use_naive_recurrent_policy\n        self._use_recurrent_policy = args.use_recurrent_policy\n        self._recurrent_N = args.recurrent_N\n        self.tpdv = dict(dtype=torch.float32, device=device)\n\n        obs_shape = get_shape_from_obs_space(obs_space)\n        base = CNNBase if len(obs_shape) == 3 else MLPBase\n        self.base = base(args, obs_shape)\n\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)\n\n        self.act = ACTLayer(action_space, self.hidden_size, self._use_orthogonal, self._gain, args)\n\n        self.to(device)\n\n    def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):\n        \"\"\"\n        Compute actions from the given inputs.\n        :param obs: (np.ndarray / torch.Tensor) observation inputs into network.\n        :param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.\n        :param masks: (np.ndarray / torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.\n        :param available_actions: (np.ndarray / torch.Tensor) denotes which actions are available to agent\n                                                              (if None, all actions available)\n        :param deterministic: (bool) whether to sample from action distribution or return the mode.\n\n        :return actions: (torch.Tensor) actions to take.\n        :return action_log_probs: (torch.Tensor) log probabilities of taken actions.\n        :return rnn_states: (torch.Tensor) updated RNN hidden states.\n        \"\"\"\n        obs = check(obs).to(**self.tpdv)\n        rnn_states = check(rnn_states).to(**self.tpdv)\n        masks = check(masks).to(**self.tpdv)\n        if available_actions is not None:\n            available_actions = check(available_actions).to(**self.tpdv)\n\n        actor_features = self.base(obs)\n\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)\n\n        actions, action_log_probs = self.act(actor_features, available_actions, deterministic)\n\n        return actions, action_log_probs, rnn_states\n\n    def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):\n        \"\"\"\n        Compute log probability and entropy of given actions.\n        :param obs: (torch.Tensor) observation inputs into network.\n        :param action: (torch.Tensor) actions whose entropy and log probability to evaluate.\n        :param rnn_states: (torch.Tensor) if RNN network, hidden states for RNN.\n        :param masks: (torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                                              (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n        obs = check(obs).to(**self.tpdv)\n        rnn_states = check(rnn_states).to(**self.tpdv)\n        action = check(action).to(**self.tpdv)\n        masks = check(masks).to(**self.tpdv)\n        if available_actions is not None:\n            available_actions = check(available_actions).to(**self.tpdv)\n\n        if active_masks is not None:\n            active_masks = check(active_masks).to(**self.tpdv)\n\n        actor_features = self.base(obs)\n\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)\n\n        if self.args.algorithm_name == \"macpo\":\n            action_log_probs, dist_entropy, action_mu, action_std = self.act.evaluate_actions_trpo(actor_features,\n                                                                                                   action,\n                                                                                                   available_actions,\n                                                                                                   active_masks=\n                                                                                                   active_masks if self._use_policy_active_masks\n                                                                                                   else None)\n            # print(\"action_log_probs\", action_log_probs)\n            # print(\"action_std\", action_std)\n            return action_log_probs, dist_entropy, action_mu, action_std\n        else:\n            action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features,\n                                                                       action, available_actions,\n                                                                       active_masks=\n                                                                       active_masks if self._use_policy_active_masks\n                                                                       else None)\n            return action_log_probs, dist_entropy\n\n\n\n\n\n\n\nclass R_Critic(nn.Module):\n    \"\"\"\n    Critic network class for MAPPO. Outputs value function predictions given centralized input (MAPPO) or\n                            local observations (IPPO).\n    :param args: (argparse.Namespace) arguments containing relevant model information.\n    :param cent_obs_space: (gym.Space) (centralized) observation space.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n    def __init__(self, args, cent_obs_space, device=torch.device(\"cpu\")):\n        super(R_Critic, self).__init__()\n        self.hidden_size = args.hidden_size\n        self._use_orthogonal = args.use_orthogonal\n        self._use_naive_recurrent_policy = args.use_naive_recurrent_policy\n        self._use_recurrent_policy = args.use_recurrent_policy\n        self._recurrent_N = args.recurrent_N\n        self.tpdv = dict(dtype=torch.float32, device=device)\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]\n\n        cent_obs_shape = get_shape_from_obs_space(cent_obs_space)\n        base = CNNBase if len(cent_obs_shape) == 3 else MLPBase\n        self.base = base(args, cent_obs_shape)\n\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)\n\n        def init_(m):\n            return init(m, init_method, lambda x: nn.init.constant_(x, 0))\n\n        self.v_out = init_(nn.Linear(self.hidden_size, 1))\n\n        self.to(device)\n\n    def forward(self, cent_obs, rnn_states, masks):\n        \"\"\"\n        Compute actions from the given inputs.\n        :param cent_obs: (np.ndarray / torch.Tensor) observation inputs into network.\n        :param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.\n        :param masks: (np.ndarray / torch.Tensor) mask tensor denoting if RNN states should be reinitialized to zeros.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return rnn_states: (torch.Tensor) updated RNN hidden states.\n        \"\"\"\n        cent_obs = check(cent_obs).to(**self.tpdv)\n        rnn_states = check(rnn_states).to(**self.tpdv)\n        masks = check(masks).to(**self.tpdv)\n\n        critic_features = self.base(cent_obs)\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            critic_features, rnn_states = self.rnn(critic_features, rnn_states, masks)\n        values = self.v_out(critic_features)\n\n        return values, rnn_states\n"
  },
  {
    "path": "MACPO/macpo/algorithms/r_mappo/r_macpo.py",
    "content": "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom macpo.utils.util import get_gard_norm, huber_loss, mse_loss\nfrom macpo.utils.popart import PopArt\nfrom macpo.algorithms.utils.util import check\nfrom macpo.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor\nfrom torch.nn.utils import clip_grad_norm\nimport copy\n\n\n# EPS = 1e-8\n\nclass R_MACTRPO_CPO():\n    \"\"\"\n    Trainer class for MACPO to update policies.\n    :param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.\n    :param policy: (R_MAPPO_Policy) policy to update.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n\n    def __init__(self,\n                 args,\n                 policy, attempt_feasible_recovery=False,\n                 attempt_infeasible_recovery=False, revert_to_last_safe_point=False, delta_bound=0.011,\n                 safety_bound=0.1,\n                 _backtrack_ratio=0.8, _max_backtracks=15, _constraint_name_1=\"trust_region\",\n                 _constraint_name_2=\"safety_region\", linesearch_infeasible_recovery=True, accept_violation=False,\n                 learn_margin=False,\n                 device=torch.device(\"cpu\")):\n\n        self.device = device\n        self.tpdv = dict(dtype=torch.float32, device=device)\n        self.policy = policy\n\n        self.clip_param = args.clip_param\n        self.ppo_epoch = args.ppo_epoch\n        self.num_mini_batch = args.num_mini_batch\n        self.data_chunk_length = args.data_chunk_length\n        self.value_loss_coef = args.value_loss_coef\n        self.entropy_coef = args.entropy_coef\n        self.max_grad_norm = args.max_grad_norm\n        self.huber_delta = args.huber_delta\n        self.episode_length = args.episode_length\n\n        self.kl_threshold = args.kl_threshold\n        self.safety_bound = args.safety_bound\n        self.ls_step = args.ls_step\n        self.accept_ratio = args.accept_ratio\n        self.EPS = args.EPS\n        self.gamma = args.gamma\n        self.safety_gamma = args.safety_gamma\n        self.line_search_fraction = args.line_search_fraction\n        self.g_step_dir_coef = args.g_step_dir_coef\n        self.b_step_dir_coef = args.b_step_dir_coef\n        self.fraction_coef = args.fraction_coef\n\n        self._use_recurrent_policy = args.use_recurrent_policy\n        self._use_naive_recurrent = args.use_naive_recurrent_policy\n        self._use_max_grad_norm = args.use_max_grad_norm\n        self._use_clipped_value_loss = args.use_clipped_value_loss\n        self._use_huber_loss = args.use_huber_loss\n        self._use_popart = args.use_popart\n        self._use_value_active_masks = args.use_value_active_masks\n        self._use_policy_active_masks = args.use_policy_active_masks\n\n        # todo:  my args-start\n        self.args = args\n        self.device = device\n        self.tpdv = dict(dtype=torch.float32, device=device)\n        self.policy = policy\n        self._damping = 0.0001\n        self._delta = 0.01\n        self._max_backtracks = 10\n        self._backtrack_coeff = 0.5\n\n        self.clip_param = args.clip_param\n        self.ppo_epoch = args.ppo_epoch\n        self.num_mini_batch = args.num_mini_batch\n        self.data_chunk_length = args.data_chunk_length\n        self.value_loss_coef = args.value_loss_coef\n        self.entropy_coef = args.entropy_coef\n        self.max_grad_norm = args.max_grad_norm\n        self.huber_delta = args.huber_delta\n\n        self._use_recurrent_policy = args.use_recurrent_policy\n        self._use_naive_recurrent = args.use_naive_recurrent_policy\n        self._use_max_grad_norm = args.use_max_grad_norm\n        self._use_clipped_value_loss = args.use_clipped_value_loss\n        self._use_huber_loss = args.use_huber_loss\n        self._use_popart = args.use_popart\n        self._use_value_active_masks = args.use_value_active_masks\n        self._use_policy_active_masks = args.use_policy_active_masks\n\n        self.attempt_feasible_recovery = attempt_feasible_recovery\n        self.attempt_infeasible_recovery = attempt_infeasible_recovery\n        self.revert_to_last_safe_point = revert_to_last_safe_point\n        self._max_quad_constraint_val = args.kl_threshold  # delta_bound\n        self._max_lin_constraint_val = args.safety_bound\n        self._backtrack_ratio = _backtrack_ratio\n        self._max_backtracks = _max_backtracks\n        self._constraint_name_1 = _constraint_name_1\n        self._constraint_name_2 = _constraint_name_2\n        self._linesearch_infeasible_recovery = linesearch_infeasible_recovery\n        self._accept_violation = accept_violation\n\n        hvp_approach = None\n        num_slices = 1\n        self.lamda_coef = 0\n        self.lamda_coef_a_star = 0\n        self.lamda_coef_b_star = 0\n\n        self.margin = 0\n        self.margin_lr = 0.05\n        self.learn_margin = learn_margin\n        self.n_rollout_threads = args.n_rollout_threads\n\n\n        if self._use_popart:\n            self.value_normalizer = PopArt(1, device=self.device)\n        else:\n            self.value_normalizer = None\n\n    def cal_value_loss(self, values, value_preds_batch, return_batch, active_masks_batch):\n        \"\"\"\n        Calculate value function loss.\n        :param values: (torch.Tensor) value function predictions.\n        :param value_preds_batch: (torch.Tensor) \"old\" value  predictions from data batch (used for value clip loss)\n        :param return_batch: (torch.Tensor) reward to go returns.\n        :param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep.\n\n        :return value_loss: (torch.Tensor) value function loss.\n        \"\"\"\n        if self._use_popart:\n            value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,\n                                                                                        self.clip_param)\n            error_clipped = self.value_normalizer(return_batch) - value_pred_clipped\n            error_original = self.value_normalizer(return_batch) - values\n        else:\n            value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,\n                                                                                        self.clip_param)\n            error_clipped = return_batch - value_pred_clipped\n            error_original = return_batch - values\n\n        if self._use_huber_loss:\n            value_loss_clipped = huber_loss(error_clipped, self.huber_delta)\n            value_loss_original = huber_loss(error_original, self.huber_delta)\n        else:\n            value_loss_clipped = mse_loss(error_clipped)\n            value_loss_original = mse_loss(error_original)\n\n        if self._use_clipped_value_loss:\n            value_loss = torch.max(value_loss_original, value_loss_clipped)\n        else:\n            value_loss = value_loss_original\n\n        if self._use_value_active_masks:\n            value_loss = (value_loss * active_masks_batch).sum() / active_masks_batch.sum()\n        else:\n            value_loss = value_loss.mean()\n\n        return value_loss\n\n    def flat_grad(self, grads):\n        grad_flatten = []\n        for grad in grads:\n            if grad is None:\n                continue\n            grad_flatten.append(grad.view(-1))\n        grad_flatten = torch.cat(grad_flatten)\n        return grad_flatten\n\n    def flat_hessian(self, hessians):\n        hessians_flatten = []\n        for hessian in hessians:\n            if hessian is None:\n                continue\n            hessians_flatten.append(hessian.contiguous().view(-1))\n        hessians_flatten = torch.cat(hessians_flatten).data\n        return hessians_flatten\n\n    def flat_params(self, model):\n        params = []\n        for param in model.parameters():\n            params.append(param.data.view(-1))\n        params_flatten = torch.cat(params)\n        return params_flatten\n\n    def update_model(self, model, new_params):\n        index = 0\n        for params in model.parameters():\n            params_length = len(params.view(-1))\n            new_param = new_params[index: index + params_length]\n            new_param = new_param.view(params.size())\n            params.data.copy_(new_param)\n            index += params_length\n\n    def kl_divergence(self, obs, rnn_states, action, masks, available_actions, active_masks, new_actor, old_actor):\n\n        _, _, mu, std = new_actor.evaluate_actions(obs, rnn_states, action, masks, available_actions, active_masks)\n        _, _, mu_old, std_old = old_actor.evaluate_actions(obs, rnn_states, action, masks, available_actions,\n                                                           active_masks)\n        logstd = torch.log(std)\n        mu_old = mu_old.detach()\n        std_old = std_old.detach()\n        logstd_old = torch.log(std_old)\n\n        # kl divergence between old policy and new policy : D( pi_old || pi_new )\n        # pi_old -> mu0, logstd0, std0 / pi_new -> mu, logstd, std\n        # be careful of calculating KL-divergence. It is not symmetric metric\n        kl = logstd_old - logstd + (std_old.pow(2) + (mu_old - mu).pow(2)) / \\\n             (self.EPS + 2.0 * std.pow(2)) - 0.5\n\n        return kl.sum(1, keepdim=True)\n\n    # from openai baseline code\n    # https://github.com/openai/baselines/blob/master/baselines/common/cg.py\n    def conjugate_gradient(self, actor, obs, rnn_states, action, masks, available_actions, active_masks, b, nsteps,\n                           residual_tol=1e-10):\n        x = torch.zeros(b.size()).to(device=self.device)\n        r = b.clone()\n        p = b.clone()\n        rdotr = torch.dot(r, r)\n        for i in range(nsteps):\n            _Avp = self.fisher_vector_product(actor, obs, rnn_states, action, masks, available_actions, active_masks, p)\n            alpha = rdotr / torch.dot(p, _Avp)\n            x += alpha * p\n            r -= alpha * _Avp\n            new_rdotr = torch.dot(r, r)\n            betta = new_rdotr / rdotr\n            p = r + betta * p\n            rdotr = new_rdotr\n            if rdotr < residual_tol:\n                break\n        return x\n\n    def fisher_vector_product(self, actor, obs, rnn_states, action, masks, available_actions, active_masks, p):\n        p.detach()\n        kl = self.kl_divergence(obs, rnn_states, action, masks, available_actions, active_masks, new_actor=actor,\n                                old_actor=actor)\n        kl = kl.mean()\n        kl_grad = torch.autograd.grad(kl, actor.parameters(), create_graph=True, allow_unused=True)\n        kl_grad = self.flat_grad(kl_grad)  # check kl_grad == 0\n\n        kl_grad_p = (kl_grad * p).sum()\n        kl_hessian_p = torch.autograd.grad(kl_grad_p, actor.parameters(), allow_unused=True)\n        kl_hessian_p = self.flat_hessian(kl_hessian_p)\n\n        return kl_hessian_p + 0.1 * p\n\n    def _get_flat_grad(self, y, model, retain_graph=None, create_graph=False):\n        grads = torch.autograd.grad(y, model.parameters(), retain_graph=retain_graph,\n                                    create_graph=create_graph, allow_unused=True)\n        _grads = []\n        for val, p in zip(grads, model.parameters()):\n            if val is not None:\n                _grads.append(val)\n            else:\n                _grads.append(torch.zeros_like(p.data, requires_grad=create_graph))\n        return torch.cat([grad.reshape(-1) for grad in _grads])\n\n    def _flat_grad_(self, f, model, retain_graph=None, create_graph=False):\n        return self.flat_grad(torch.autograd.grad(f, model.parameters(), retain_graph=retain_graph,\n                                                  create_graph=create_graph, allow_unused=True))\n\n    def hessian_vector_product(self, f, model):\n        # for H = grad**2 f, compute Hx\n        g = self._flat_grad_(f, model)\n        # g = self._get_flat_grad(f, model)\n        # x = torch.placeholder(torch.float32, shape=g.shape)\n        x = torch.FloatTensor(g.shape)\n        return x, self._flat_grad_(torch.sum(g * x), model)\n\n    def cg(self, Ax, b, cg_iters=10):\n        x = np.zeros_like(b)\n        r = b.clone()  # Note: should be 'b - Ax(x)', but for x=0, Ax(x)=0. Change if doing warm start.\n        p = r.clone()\n        r_dot_old = torch.dot(r, r)\n        for _ in range(cg_iters):\n            z = Ax(p)\n            alpha = r_dot_old / (torch.dot(p, z) + self.EPS)\n            x += alpha * p\n            r -= alpha * z\n            r_dot_new = torch.dot(r, r)\n            p = r + (r_dot_new / r_dot_old) * p\n            r_dot_old = r_dot_new\n        return x\n\n    def trpo_update(self, sample, update_actor=True):\n        \"\"\"\n        Update actor and critic networks.\n        :param sample: (Tuple) contains data batch with which to update networks.\n        :update_actor: (bool) whether to update actor network.\n\n        :return value_loss: (torch.Tensor) value function loss.\n        :return critic_grad_norm: (torch.Tensor) gradient norm from critic update.\n        ;return policy_loss: (torch.Tensor) actor(policy) loss value.\n        :return dist_entropy: (torch.Tensor) action entropies.\n        :return actor_grad_norm: (torch.Tensor) gradient norm from actor update.\n        :return imp_weights: (torch.Tensor) importance sampling weights.\n        \"\"\"\n        share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, \\\n        value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \\\n        adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_returns_barch, rnn_states_cost_batch, \\\n        cost_adv_targ, aver_episode_costs = sample\n\n        old_action_log_probs_batch = check(old_action_log_probs_batch).to(**self.tpdv)\n        adv_targ = check(adv_targ).to(**self.tpdv)\n        cost_adv_targ = check(cost_adv_targ).to(**self.tpdv)\n\n        value_preds_batch = check(value_preds_batch).to(**self.tpdv)\n        return_batch = check(return_batch).to(**self.tpdv)\n        active_masks_batch = check(active_masks_batch).to(**self.tpdv)\n        factor_batch = check(factor_batch).to(**self.tpdv)\n        cost_returns_barch = check(cost_returns_barch).to(**self.tpdv)\n        cost_preds_batch = check(cost_preds_batch).to(**self.tpdv)\n        # Reshape to do in a single forward pass for all steps\n        # values, action_log_probs, dist_entropy, cost_values, action_mu, action_std\n\n        values, action_log_probs, dist_entropy, cost_values, action_mu, action_std = self.policy.evaluate_actions(\n            share_obs_batch,\n            obs_batch,\n            rnn_states_batch,\n            rnn_states_critic_batch,\n            actions_batch,\n            masks_batch,\n            available_actions_batch,\n            active_masks_batch,\n            rnn_states_cost_batch)\n\n        # todo: reward critic update\n        value_loss = self.cal_value_loss(values, value_preds_batch, return_batch, active_masks_batch)\n        self.policy.critic_optimizer.zero_grad()\n        (value_loss * self.value_loss_coef).backward()\n        if self._use_max_grad_norm:\n            critic_grad_norm = nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.max_grad_norm)\n        else:\n            critic_grad_norm = get_gard_norm(self.policy.critic.parameters())\n        self.policy.critic_optimizer.step()\n\n        # todo: cost critic update\n        cost_loss = self.cal_value_loss(cost_values, cost_preds_batch, cost_returns_barch, active_masks_batch)\n        self.policy.cost_optimizer.zero_grad()\n        (cost_loss * self.value_loss_coef).backward()\n        if self._use_max_grad_norm:\n            cost_grad_norm = nn.utils.clip_grad_norm_(self.policy.cost_critic.parameters(), self.max_grad_norm)\n        else:\n            cost_grad_norm = get_gard_norm(self.policy.cost_critic.parameters())\n        self.policy.cost_optimizer.step()\n\n        # todo: actor update\n\n        rescale_constraint_val = (aver_episode_costs.mean() - self._max_lin_constraint_val) * (1 - self.gamma)\n\n        if rescale_constraint_val == 0:\n            rescale_constraint_val = self.EPS\n\n        # todo:reward-g\n        ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n        if self._use_policy_active_masks:\n            reward_loss = (torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True) *\n                           active_masks_batch).sum() / active_masks_batch.sum()\n        else:\n            reward_loss = torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True).mean()\n        reward_loss = - reward_loss  # todo:\n        reward_loss_grad = torch.autograd.grad(reward_loss, self.policy.actor.parameters(), retain_graph=True,\n                                               allow_unused=True)\n        reward_loss_grad = self.flat_grad(reward_loss_grad)\n\n        # todo:cost-b\n        if self._use_policy_active_masks:\n            cost_loss = (torch.sum(ratio * factor_batch * (cost_adv_targ), dim=-1, keepdim=True) *\n                         active_masks_batch).sum() / active_masks_batch.sum()\n        else:\n            cost_loss = torch.sum(ratio * factor_batch * (cost_adv_targ), dim=-1, keepdim=True).mean()\n        cost_loss_grad = torch.autograd.grad(cost_loss, self.policy.actor.parameters(), retain_graph=True,\n                                             allow_unused=True)\n        cost_loss_grad = self.flat_grad(cost_loss_grad)\n        B_cost_loss_grad = cost_loss_grad.unsqueeze(0)\n        B_cost_loss_grad = self.flat_grad(B_cost_loss_grad)\n\n        # todo: compute lamda_coef and v_coef\n        g_step_dir = self.conjugate_gradient(self.policy.actor,\n                                             obs_batch,\n                                             rnn_states_batch,\n                                             actions_batch,\n                                             masks_batch,\n                                             available_actions_batch,\n                                             active_masks_batch,\n                                             reward_loss_grad.data,\n                                             nsteps=10)  # todo: compute H^{-1} g\n        b_step_dir = self.conjugate_gradient(self.policy.actor,\n                                             obs_batch,\n                                             rnn_states_batch,\n                                             actions_batch,\n                                             masks_batch,\n                                             available_actions_batch,\n                                             active_masks_batch,\n                                             B_cost_loss_grad.data,\n                                             nsteps=10)  # todo: compute H^{-1} b\n\n        q_coef = (reward_loss_grad * g_step_dir).sum(0, keepdim=True)  # todo: compute q_coef: = g^T H^{-1} g\n        r_coef = (reward_loss_grad * b_step_dir).sum(0, keepdim=True)  # todo: compute r_coef: = g^T H^{-1} b\n        s_coef = (cost_loss_grad * b_step_dir).sum(0, keepdim=True)  # todo: compute s_coef: = b^T H^{-1} b\n\n        fraction = self.line_search_fraction #0.5 # 0.5  # line search step size\n        loss_improve = 0  # initialization\n\n        \"\"\"self._max_lin_constraint_val = c, B_cost_loss_grad = c in cpo\"\"\"\n\n        B_cost_loss_grad_dot = torch.dot(B_cost_loss_grad, B_cost_loss_grad)\n        # torch.dot(B_cost_loss_grad, B_cost_loss_grad) # B_cost_loss_grad.mean() * B_cost_loss_grad.mean()\n        if (torch.dot(B_cost_loss_grad, B_cost_loss_grad)) <= self.EPS and rescale_constraint_val < 0:\n            # feasible and cost grad is zero---shortcut to pure TRPO update!\n            # w, r, s, A, B = 0, 0, 0, 0, 0\n            # g_step_dir = torch.tensor(0)\n            b_step_dir = torch.tensor(0)\n            r_coef = torch.tensor(0)\n            s_coef = torch.tensor(0)\n            positive_Cauchy_value = torch.tensor(0)\n            whether_recover_policy_value = torch.tensor(0)\n            optim_case = 4\n          \n        else:\n            # cost grad is nonzero: CPO update!\n            r_coef = (reward_loss_grad * b_step_dir).sum(0, keepdim=True)  # todo: compute r_coef: = g^T H^{-1} b\n            s_coef = (cost_loss_grad * b_step_dir).sum(0, keepdim=True)  # todo: compute s_coef: = b^T H^{-1} b\n            if r_coef == 0:\n                r_coef = self.EPS\n            if s_coef == 0:\n                s_coef = self.EPS\n            positive_Cauchy_value = (\n                        q_coef - (r_coef ** 2) / (self.EPS + s_coef))  # should be always positive (Cauchy-Shwarz)\n            whether_recover_policy_value = 2 * self._max_quad_constraint_val - (\n                    rescale_constraint_val ** 2) / (\n                                                       self.EPS + s_coef)  # does safety boundary intersect trust region? (positive = yes)\n            if rescale_constraint_val < 0 and whether_recover_policy_value < 0:\n                # point in trust region is feasible and safety boundary doesn't intersect\n                # ==> entire trust region is feasible\n                optim_case = 3\n              \n            elif rescale_constraint_val < 0 and whether_recover_policy_value >= 0:\n                # x = 0 is feasible and safety boundary intersects\n                # ==> most of trust region is feasible\n                optim_case = 2\n                \n            elif rescale_constraint_val >= 0 and whether_recover_policy_value >= 0:\n                # x = 0 is infeasible and safety boundary intersects\n                # ==> part of trust region is feasible, recovery possible\n                optim_case = 1\n               \n            else:\n                # x = 0 infeasible, and safety halfspace is outside trust region\n                # ==> whole trust region is infeasible, try to fail gracefully\n                optim_case = 0\n                \n        if whether_recover_policy_value == 0:\n            whether_recover_policy_value = self.EPS\n        \n        if optim_case in [3, 4]:\n            lam = torch.sqrt(\n                (q_coef / (2 * self._max_quad_constraint_val)))  # self.lamda_coef = lam = np.sqrt(q / (2 * target_kl))\n            nu = torch.tensor(0)  # v_coef = 0\n        elif optim_case in [1, 2]:\n            LA, LB = [0, r_coef / rescale_constraint_val], [r_coef / rescale_constraint_val, np.inf]\n            LA, LB = (LA, LB) if rescale_constraint_val < 0 else (LB, LA)\n            proj = lambda x, L: max(L[0], min(L[1], x))\n            lam_a = proj(torch.sqrt(positive_Cauchy_value / whether_recover_policy_value), LA)\n            lam_b = proj(torch.sqrt(q_coef / (torch.tensor(2 * self._max_quad_constraint_val))), LB)\n\n            f_a = lambda lam: -0.5 * (positive_Cauchy_value / (\n                        self.EPS + lam) + whether_recover_policy_value * lam) - r_coef * rescale_constraint_val / (\n                                          self.EPS + s_coef)\n            f_b = lambda lam: -0.5 * (q_coef / (self.EPS + lam) + 2 * self._max_quad_constraint_val * lam)\n            lam = lam_a if f_a(lam_a) >= f_b(lam_b) else lam_b\n            nu = max(0, lam * rescale_constraint_val - r_coef) / (self.EPS + s_coef)\n        else:\n            lam = torch.tensor(0)\n            nu = torch.sqrt(torch.tensor(2 * self._max_quad_constraint_val) / (self.EPS + s_coef))\n\n        x_a = (1. / (lam + self.EPS)) * (g_step_dir + nu * b_step_dir)\n        x_b = (nu * b_step_dir)\n        x = x_a if optim_case > 0 else x_b\n\n        # todo: update actor and learning\n        reward_loss = reward_loss.data.cpu().numpy()\n        cost_loss = cost_loss.data.cpu().numpy()\n        params = self.flat_params(self.policy.actor)\n\n        old_actor = R_Actor(self.policy.args,\n                            self.policy.obs_space,\n                            self.policy.act_space,\n                            self.device)\n        self.update_model(old_actor, params)\n\n        expected_improve = -torch.dot(x, reward_loss_grad).sum(0, keepdim=True)\n        expected_improve = expected_improve.data.cpu().numpy()\n\n        # line search\n        flag = False\n        fraction_coef = self.fraction_coef\n        # print(\"fraction_coef\", fraction_coef)\n        for i in range(self.ls_step):\n            x_norm = torch.norm(x)\n            if x_norm > 0.5:\n                x = x * 0.5 / x_norm\n\n            new_params = params - fraction_coef * (fraction**i) * x\n            self.update_model(self.policy.actor, new_params)\n            values, action_log_probs, dist_entropy, new_cost_values, action_mu, action_std = self.policy.evaluate_actions(\n                share_obs_batch,\n                obs_batch,\n                rnn_states_batch,\n                rnn_states_critic_batch,\n                actions_batch,\n                masks_batch,\n                available_actions_batch,\n                active_masks_batch,\n                rnn_states_cost_batch)\n\n            ratio = torch.exp(action_log_probs - old_action_log_probs_batch)\n            if self._use_policy_active_masks:\n                new_reward_loss = (torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True) *\n                                   active_masks_batch).sum() / active_masks_batch.sum()\n            else:\n                new_reward_loss = torch.sum(ratio * factor_batch * adv_targ, dim=-1, keepdim=True).mean()\n\n            if self._use_policy_active_masks:\n                new_cost_loss = (torch.sum(ratio * factor_batch * cost_adv_targ, dim=-1, keepdim=True) *\n                                 active_masks_batch).sum() / active_masks_batch.sum()\n            else:\n                new_cost_loss = torch.sum(ratio * factor_batch * cost_adv_targ, dim=-1, keepdim=True).mean()\n\n            new_reward_loss = new_reward_loss.data.cpu().numpy()\n            new_reward_loss = -new_reward_loss\n            new_cost_loss = new_cost_loss.data.cpu().numpy()\n            loss_improve = new_reward_loss - reward_loss\n\n            kl = self.kl_divergence(obs_batch,\n                                    rnn_states_batch,\n                                    actions_batch,\n                                    masks_batch,\n                                    available_actions_batch,\n                                    active_masks_batch,\n                                    new_actor=self.policy.actor,\n                                    old_actor=old_actor)\n            kl = kl.mean()\n\n            # see https: // en.wikipedia.org / wiki / Backtracking_line_search\n            if ((kl < self.kl_threshold) and (loss_improve < 0 if optim_case > 1 else True)\n                    and (new_cost_loss.mean() - cost_loss.mean() <= max(-rescale_constraint_val, 0))):\n                flag = True\n                # print(\"line search successful\")\n                break\n            expected_improve *= fraction\n\n        if not flag:\n            # line search failed\n            print(\"line search failed\")\n            params = self.flat_params(old_actor)\n            self.update_model(self.policy.actor, params)\n\n        return value_loss, critic_grad_norm, kl, loss_improve, expected_improve, dist_entropy, ratio, cost_loss, cost_grad_norm, whether_recover_policy_value, cost_preds_batch, cost_returns_barch, B_cost_loss_grad, lam, nu, g_step_dir, b_step_dir, x, action_mu, action_std, B_cost_loss_grad_dot\n\n    def train(self, buffer, shared_buffer=None, update_actor=True):\n        \"\"\"\n        Perform a training update using minibatch GD.\n        :param buffer: (SharedReplayBuffer) buffer containing training data.\n        :param update_actor: (bool) whether to update actor network.\n\n        :return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc).\n        \"\"\"\n        if self._use_popart:\n            advantages = buffer.returns[:-1] - self.value_normalizer.denormalize(buffer.value_preds[:-1])\n        else:\n            advantages = buffer.returns[:-1] - buffer.value_preds[:-1]\n        advantages_copy = advantages.copy()\n        advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan\n        mean_advantages = np.nanmean(advantages_copy)\n        std_advantages = np.nanstd(advantages_copy)\n        advantages = (advantages - mean_advantages) / (std_advantages + 1e-5)\n\n        if self._use_popart:\n            cost_adv = buffer.cost_returns[:-1] - self.value_normalizer.denormalize(buffer.cost_preds[:-1])\n        else:\n            cost_adv = buffer.cost_returns[:-1] - buffer.cost_preds[:-1]\n        cost_adv_copy = cost_adv.copy()\n        cost_adv_copy[buffer.active_masks[:-1] == 0.0] = np.nan\n        mean_cost_adv = np.nanmean(cost_adv_copy)\n        std_cost_adv = np.nanstd(cost_adv_copy)\n        cost_adv = (cost_adv - mean_cost_adv) / (std_cost_adv + 1e-5)\n\n        train_info = {}\n\n        train_info['value_loss'] = 0\n        train_info['kl'] = 0\n        train_info['dist_entropy'] = 0\n        train_info['loss_improve'] = 0\n        train_info['expected_improve'] = 0\n        train_info['critic_grad_norm'] = 0\n        train_info['ratio'] = 0\n        train_info['cost_loss'] = 0\n        train_info['cost_grad_norm'] = 0\n        train_info['whether_recover_policy_value'] = 0\n        train_info['cost_preds_batch'] = 0\n        train_info['cost_returns_barch'] = 0\n        train_info['B_cost_loss_grad'] = 0\n        train_info['lam'] = 0\n        train_info['nu'] = 0\n        train_info['g_step_dir'] = 0\n        train_info['b_step_dir'] = 0\n        train_info['x'] = 0\n        train_info['action_mu'] = 0\n        train_info['action_std'] = 0\n        train_info['B_cost_loss_grad_dot'] = 0\n\n        if self._use_recurrent_policy:\n            data_generator = buffer.recurrent_generator(advantages, self.num_mini_batch, self.data_chunk_length,\n                                                        cost_adv=cost_adv)\n        elif self._use_naive_recurrent:\n            data_generator = buffer.naive_recurrent_generator(advantages, self.num_mini_batch, cost_adv=cost_adv)\n        else:\n            data_generator = buffer.feed_forward_generator(advantages, self.num_mini_batch, cost_adv=cost_adv)\n        # old_actor = copy.deepcopy(self.policy.actor)\n        for sample in data_generator:\n            value_loss, critic_grad_norm, kl, loss_improve, expected_improve, dist_entropy, imp_weights, cost_loss, cost_grad_norm, whether_recover_policy_value, cost_preds_batch, cost_returns_barch, B_cost_loss_grad, lam, nu, g_step_dir, b_step_dir, x, action_mu, action_std, B_cost_loss_grad_dot \\\n                = self.trpo_update(sample, update_actor)\n\n            train_info['value_loss'] += value_loss.item()\n            train_info['kl'] += kl\n            train_info['loss_improve'] += loss_improve\n            train_info['expected_improve'] += expected_improve\n            train_info['dist_entropy'] += dist_entropy.item()\n            train_info['critic_grad_norm'] += critic_grad_norm\n            train_info['ratio'] += imp_weights.mean()\n            train_info['cost_loss'] += value_loss.item()\n            train_info['cost_grad_norm'] += cost_grad_norm\n            train_info['whether_recover_policy_value'] += whether_recover_policy_value\n            train_info['cost_preds_batch'] += cost_preds_batch.mean()\n            train_info['cost_returns_barch'] += cost_returns_barch.mean()\n            train_info['B_cost_loss_grad'] += B_cost_loss_grad.mean()\n\n            train_info['g_step_dir'] += g_step_dir.float().mean()\n            train_info['b_step_dir'] += b_step_dir.float().mean()\n            train_info['x'] = x.float().mean()\n            train_info['action_mu'] += action_mu.float().mean()\n            train_info['action_std'] += action_std.float().mean()\n            train_info['B_cost_loss_grad_dot'] += B_cost_loss_grad_dot.item()\n\n        num_updates = self.ppo_epoch * self.num_mini_batch\n\n        for k in train_info.keys():\n            train_info[k] /= num_updates\n\n        return train_info\n\n    def prep_training(self):\n        self.policy.actor.train()\n        self.policy.critic.train()\n\n    def prep_rollout(self):\n        self.policy.actor.eval()\n        self.policy.critic.eval()\n\n   \n\n"
  },
  {
    "path": "MACPO/macpo/algorithms/utils/act.py",
    "content": "from .distributions import Bernoulli, Categorical, DiagGaussian\nimport torch\nimport torch.nn as nn\n\nclass ACTLayer(nn.Module):\n    \"\"\"\n    MLP Module to compute actions.\n    :param action_space: (gym.Space) action space.\n    :param inputs_dim: (int) dimension of network input.\n    :param use_orthogonal: (bool) whether to use orthogonal initialization.\n    :param gain: (float) gain of the output layer of the network.\n    \"\"\"\n    def __init__(self, action_space, inputs_dim, use_orthogonal, gain, args=None):\n        super(ACTLayer, self).__init__()\n        self.mixed_action = False\n        self.multi_discrete = False\n        # print(\"action_space.__class__.__name__\", action_space.__class__.__name__)\n        if action_space.__class__.__name__ == \"Discrete\":\n            action_dim = action_space.n\n            self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain)\n        elif action_space.__class__.__name__ == \"Box\":\n            action_dim = action_space.shape[0]\n            self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain, args)\n        elif action_space.__class__.__name__ == \"MultiBinary\":\n            action_dim = action_space.shape[0]\n            self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain)\n        elif action_space.__class__.__name__ == \"MultiDiscrete\":\n            self.multi_discrete = True\n            action_dims = action_space.high - action_space.low + 1\n            self.action_outs = []\n            for action_dim in action_dims:\n                self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain))\n            self.action_outs = nn.ModuleList(self.action_outs)\n        else:  # discrete + continous\n            self.mixed_action = True\n            continous_dim = action_space[0].shape[0]\n            discrete_dim = action_space[1].n\n            self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain, args),\n                                              Categorical(inputs_dim, discrete_dim, use_orthogonal, gain)])\n\n    def forward(self, x, available_actions=None, deterministic=False):\n        \"\"\"\n        Compute actions and action logprobs from given input.\n        :param x: (torch.Tensor) input to network.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether to sample from action distribution or return the mode.\n\n        :return actions: (torch.Tensor) actions to take.\n        :return action_log_probs: (torch.Tensor) log probabilities of taken actions.\n        \"\"\"\n        if self.mixed_action :\n            actions = []\n            action_log_probs = []\n            for action_out in self.action_outs:\n                action_logit = action_out(x)\n                action = action_logit.mode() if deterministic else action_logit.sample()\n                action_log_prob = action_logit.log_probs(action)\n                actions.append(action.float())\n                action_log_probs.append(action_log_prob)\n\n            actions = torch.cat(actions, -1)\n            action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)\n\n        elif self.multi_discrete:\n            actions = []\n            action_log_probs = []\n            for action_out in self.action_outs:\n                action_logit = action_out(x)\n                action = action_logit.mode() if deterministic else action_logit.sample()\n                action_log_prob = action_logit.log_probs(action)\n                actions.append(action)\n                action_log_probs.append(action_log_prob)\n\n            actions = torch.cat(actions, -1)\n            action_log_probs = torch.cat(action_log_probs, -1)\n\n        else:\n            action_logits = self.action_out(x, available_actions)\n            actions = action_logits.mode() if deterministic else action_logits.sample()\n            action_log_probs = action_logits.log_probs(actions)\n\n        return actions, action_log_probs\n\n    def get_probs(self, x, available_actions=None):\n        \"\"\"\n        Compute action probabilities from inputs.\n        :param x: (torch.Tensor) input to network.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                  (if None, all actions available)\n\n        :return action_probs: (torch.Tensor)\n        \"\"\"\n        if self.mixed_action or self.multi_discrete:\n            action_probs = []\n            for action_out in self.action_outs:\n                action_logit = action_out(x)\n                action_prob = action_logit.probs\n                action_probs.append(action_prob)\n            action_probs = torch.cat(action_probs, -1)\n        else:\n            action_logits = self.action_out(x, available_actions)\n            action_probs = action_logits.probs\n\n        return action_probs\n\n    def evaluate_actions(self, x, action, available_actions=None, active_masks=None):\n        \"\"\"\n        Compute log probability and entropy of given actions.\n        :param x: (torch.Tensor) input to network.\n        :param action: (torch.Tensor) actions whose entropy and log probability to evaluate.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                                              (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n        if self.mixed_action:\n            a, b = action.split((2, 1), -1)\n            b = b.long()\n            action = [a, b]\n            action_log_probs = []\n            dist_entropy = []\n            for action_out, act in zip(self.action_outs, action):\n                action_logit = action_out(x)\n                action_log_probs.append(action_logit.log_probs(act))\n                if active_masks is not None:\n                    if len(action_logit.entropy().shape) == len(active_masks.shape):\n                        dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum())\n                    else:\n                        dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum())\n                else:\n                    dist_entropy.append(action_logit.entropy().mean())\n\n            action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)\n            dist_entropy = dist_entropy[0] / 2.0 + dist_entropy[1] / 0.98 #! dosen't make sense\n\n        elif self.multi_discrete:\n            action = torch.transpose(action, 0, 1)\n            action_log_probs = []\n            dist_entropy = []\n            for action_out, act in zip(self.action_outs, action):\n                action_logit = action_out(x)\n                action_log_probs.append(action_logit.log_probs(act))\n                if active_masks is not None:\n                    dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum())\n                else:\n                    dist_entropy.append(action_logit.entropy().mean())\n\n            action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong\n            dist_entropy = torch.tensor(dist_entropy).mean()\n\n        else:\n            action_logits = self.action_out(x, available_actions)\n            action_log_probs = action_logits.log_probs(action)\n            if active_masks is not None:\n                dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum()\n                # dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()\n            else:\n                dist_entropy = action_logits.entropy().mean()\n\n        return action_log_probs, dist_entropy\n\n    def evaluate_actions_trpo(self, x, action, available_actions=None, active_masks=None):\n        \"\"\"\n        Compute log probability and entropy of given actions.\n        :param x: (torch.Tensor) input to network.\n        :param action: (torch.Tensor) actions whose entropy and log probability to evaluate.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                                              (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n        if self.mixed_action:\n            a, b = action.split((2, 1), -1)\n            b = b.long()\n            action = [a, b]\n            action_log_probs = []\n            dist_entropy = []\n            for action_out, act in zip(self.action_outs, action):\n                action_logit = action_out(x)\n                action_log_probs.append(action_logit.log_probs(act))\n                if active_masks is not None:\n                    if len(action_logit.entropy().shape) == len(active_masks.shape):\n                        dist_entropy.append((action_logit.entropy() * active_masks).sum() / active_masks.sum())\n                    else:\n                        dist_entropy.append(\n                            (action_logit.entropy() * active_masks.squeeze(-1)).sum() / active_masks.sum())\n                else:\n                    dist_entropy.append(action_logit.entropy().mean())\n\n            action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)\n            dist_entropy = dist_entropy[0] / 2.0 + dist_entropy[1] / 0.98  # ! dosen't make sense\n\n        elif self.multi_discrete:\n            action = torch.transpose(action, 0, 1)\n            action_log_probs = []\n            dist_entropy = []\n            for action_out, act in zip(self.action_outs, action):\n                action_logit = action_out(x)\n                action_log_probs.append(action_logit.log_probs(act))\n                if active_masks is not None:\n                    dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum() / active_masks.sum())\n                else:\n                    dist_entropy.append(action_logit.entropy().mean())\n\n            action_log_probs = torch.cat(action_log_probs, -1)  # ! could be wrong\n            dist_entropy = torch.tensor(dist_entropy).mean()\n\n        else:\n            action_logits = self.action_out(x, available_actions)\n            # print(\"action_logits.mean-macppo-act.py\", action_logits.mean)\n            action_mu = action_logits.mean\n            action_std = action_logits.stddev\n            action_log_probs = action_logits.log_probs(action)\n            # print(\"action_log_probs-act.py\", action_log_probs)\n            if active_masks is not None:\n                dist_entropy = (action_logits.entropy() * active_masks).sum() / active_masks.sum()\n                # dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()\n            else:\n                dist_entropy = action_logits.entropy().mean()\n            # print(\"action_logits-act.py\", action_logits)\n            # print(\"action_mu-act.py\", action_mu)\n\n        return action_log_probs, dist_entropy, action_mu, action_std\n\n\n\n"
  },
  {
    "path": "MACPO/macpo/algorithms/utils/cnn.py",
    "content": "import torch.nn as nn\nfrom .util import init\n\n\"\"\"CNN Modules and utils.\"\"\"\n\nclass Flatten(nn.Module):\n    def forward(self, x):\n        return x.view(x.size(0), -1)\n\n\nclass CNNLayer(nn.Module):\n    def __init__(self, obs_shape, hidden_size, use_orthogonal, use_ReLU, kernel_size=3, stride=1):\n        super(CNNLayer, self).__init__()\n\n        active_func = [nn.Tanh(), nn.ReLU()][use_ReLU]\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n        gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU])\n\n        def init_(m):\n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)\n\n        input_channel = obs_shape[0]\n        input_width = obs_shape[1]\n        input_height = obs_shape[2]\n\n        self.cnn = nn.Sequential(\n            init_(nn.Conv2d(in_channels=input_channel,\n                            out_channels=hidden_size // 2,\n                            kernel_size=kernel_size,\n                            stride=stride)\n                  ),\n            active_func,\n            Flatten(),\n            init_(nn.Linear(hidden_size // 2 * (input_width - kernel_size + stride) * (input_height - kernel_size + stride),\n                            hidden_size)\n                  ),\n            active_func,\n            init_(nn.Linear(hidden_size, hidden_size)), active_func)\n\n    def forward(self, x):\n        x = x / 255.0\n        x = self.cnn(x)\n        return x\n\n\nclass CNNBase(nn.Module):\n    def __init__(self, args, obs_shape):\n        super(CNNBase, self).__init__()\n\n        self._use_orthogonal = args.use_orthogonal\n        self._use_ReLU = args.use_ReLU\n        self.hidden_size = args.hidden_size\n\n        self.cnn = CNNLayer(obs_shape, self.hidden_size, self._use_orthogonal, self._use_ReLU)\n\n    def forward(self, x):\n        x = self.cnn(x)\n        return x\n"
  },
  {
    "path": "MACPO/macpo/algorithms/utils/distributions.py",
    "content": "import torch\nimport torch.nn as nn\nfrom .util import init\n\n\"\"\"\nModify standard PyTorch distributions so they to make compatible with this codebase. \n\"\"\"\n\n#\n# Standardize distribution interfaces\n#\n\n# Categorical\nclass FixedCategorical(torch.distributions.Categorical):\n    def sample(self):\n        return super().sample().unsqueeze(-1)\n\n    def log_probs(self, actions):\n        return (\n            super()\n            .log_prob(actions.squeeze(-1))\n            .view(actions.size(0), -1)\n            .sum(-1)\n            .unsqueeze(-1)\n        )\n\n    def mode(self):\n        return self.probs.argmax(dim=-1, keepdim=True)\n\n\n# Normal\nclass FixedNormal(torch.distributions.Normal):\n    def log_probs(self, actions):\n        return super().log_prob(actions)\n        # return super().log_prob(actions).sum(-1, keepdim=True)\n\n    def entrop(self):\n        return super.entropy().sum(-1)\n\n    def mode(self):\n        return self.mean\n\n\n# Bernoulli\nclass FixedBernoulli(torch.distributions.Bernoulli):\n    def log_probs(self, actions):\n        return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)\n\n    def entropy(self):\n        return super().entropy().sum(-1)\n\n    def mode(self):\n        return torch.gt(self.probs, 0.5).float()\n\n\nclass Categorical(nn.Module):\n    def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):\n        super(Categorical, self).__init__()\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n        def init_(m): \n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)\n\n        self.linear = init_(nn.Linear(num_inputs, num_outputs))\n\n    def forward(self, x, available_actions=None):\n        x = self.linear(x)\n        if available_actions is not None:\n            x[available_actions == 0] = -1e10\n        return FixedCategorical(logits=x)\n\n\n# class DiagGaussian(nn.Module):\n#     def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):\n#         super(DiagGaussian, self).__init__()\n#\n#         init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n#         def init_(m):\n#             return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)\n#\n#         self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))\n#         self.logstd = AddBias(torch.zeros(num_outputs))\n#\n#     def forward(self, x, available_actions=None):\n#         action_mean = self.fc_mean(x)\n#\n#         #  An ugly hack for my KFAC implementation.\n#         zeros = torch.zeros(action_mean.size())\n#         if x.is_cuda:\n#             zeros = zeros.cuda()\n#\n#         action_logstd = self.logstd(zeros)\n#         return FixedNormal(action_mean, action_logstd.exp())\n\nclass DiagGaussian(nn.Module):\n    def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01, args=None):\n        super(DiagGaussian, self).__init__()\n\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n\n        def init_(m):\n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)\n\n        if args is not None:\n            self.std_x_coef = args.std_x_coef\n            self.std_y_coef = args.std_y_coef\n        else:\n            self.std_x_coef = 1.\n            self.std_y_coef = 0.5\n        self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))\n        log_std = torch.ones(num_outputs) * self.std_x_coef\n        self.log_std = torch.nn.Parameter(log_std)\n\n    def forward(self, x, available_actions=None):\n        action_mean = self.fc_mean(x)\n        action_std = torch.sigmoid(self.log_std / self.std_x_coef) * self.std_y_coef\n        # print(\"self.log_std\", self.log_std)\n        # print(\"action_mean\", action_mean)\n        # print(\"_action_std\", action_std)\n        # action_std = torch.zeros_like(_action_std)\n        # print(\"action_std\", action_std)\n        # action_std = torch.where(torch.isnan(action_std), torch.full_like(action_std, 1e-8), action_std)\n        # torch.where((action_std == torch.tensor(0)), torch.tensor(1e-8), action_std)\n        return FixedNormal(action_mean, action_std)\n\nclass Bernoulli(nn.Module):\n    def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):\n        super(Bernoulli, self).__init__()\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n        def init_(m): \n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)\n        \n        self.linear = init_(nn.Linear(num_inputs, num_outputs))\n\n    def forward(self, x):\n        x = self.linear(x)\n        return FixedBernoulli(logits=x)\n\nclass AddBias(nn.Module):\n    def __init__(self, bias):\n        super(AddBias, self).__init__()\n        self._bias = nn.Parameter(bias.unsqueeze(1))\n\n    def forward(self, x):\n        if x.dim() == 2:\n            bias = self._bias.t().view(1, -1)\n        else:\n            bias = self._bias.t().view(1, -1, 1, 1)\n\n        return x + bias\n"
  },
  {
    "path": "MACPO/macpo/algorithms/utils/mlp.py",
    "content": "import torch.nn as nn\nfrom .util import init, get_clones\n\n\"\"\"MLP modules.\"\"\"\n\nclass MLPLayer(nn.Module):\n    def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, use_ReLU):\n        super(MLPLayer, self).__init__()\n        self._layer_N = layer_N\n\n        active_func = [nn.Tanh(), nn.ReLU()][use_ReLU]\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n        gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU])\n\n        def init_(m):\n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)\n\n        self.fc1 = nn.Sequential(\n            init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size))\n        # self.fc_h = nn.Sequential(init_(\n        #     nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size))\n        # self.fc2 = get_clones(self.fc_h, self._layer_N)\n        self.fc2 = nn.ModuleList([nn.Sequential(init_(\n            nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size)) for i in\n            range(self._layer_N)])\n\n    def forward(self, x):\n        x = self.fc1(x)\n        for i in range(self._layer_N):\n            x = self.fc2[i](x)\n        return x\n\n\nclass MLPBase(nn.Module):\n    def __init__(self, args, obs_shape, cat_self=True, attn_internal=False):\n        super(MLPBase, self).__init__()\n\n        self._use_feature_normalization = args.use_feature_normalization\n        self._use_orthogonal = args.use_orthogonal\n        self._use_ReLU = args.use_ReLU\n        self._stacked_frames = args.stacked_frames\n        self._layer_N = args.layer_N\n        self.hidden_size = args.hidden_size\n\n        obs_dim = obs_shape[0]\n\n        if self._use_feature_normalization:\n            self.feature_norm = nn.LayerNorm(obs_dim)\n\n        self.mlp = MLPLayer(obs_dim, self.hidden_size,\n                              self._layer_N, self._use_orthogonal, self._use_ReLU)\n\n    def forward(self, x):\n        if self._use_feature_normalization:\n            x = self.feature_norm(x)\n\n        x = self.mlp(x)\n\n        return x"
  },
  {
    "path": "MACPO/macpo/algorithms/utils/rnn.py",
    "content": "import torch\nimport torch.nn as nn\n\n\"\"\"RNN modules.\"\"\"\n\n\nclass RNNLayer(nn.Module):\n    def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal):\n        super(RNNLayer, self).__init__()\n        self._recurrent_N = recurrent_N\n        self._use_orthogonal = use_orthogonal\n\n        self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N)\n        for name, param in self.rnn.named_parameters():\n            if 'bias' in name:\n                nn.init.constant_(param, 0)\n            elif 'weight' in name:\n                if self._use_orthogonal:\n                    nn.init.orthogonal_(param)\n                else:\n                    nn.init.xavier_uniform_(param)\n        self.norm = nn.LayerNorm(outputs_dim)\n\n    def forward(self, x, hxs, masks):\n        if x.size(0) == hxs.size(0):\n            x, hxs = self.rnn(x.unsqueeze(0),\n                              (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous())\n            x = x.squeeze(0)\n            hxs = hxs.transpose(0, 1)\n        else:\n            # x is a (T, N, -1) tensor that has been flatten to (T * N, -1)\n            N = hxs.size(0)\n            T = int(x.size(0) / N)\n\n            # unflatten\n            x = x.view(T, N, x.size(1))\n\n            # Same deal with masks\n            masks = masks.view(T, N)\n\n            # Let's figure out which steps in the sequence have a zero for any agent\n            # We will always assume t=0 has a zero in it as that makes the logic cleaner\n            has_zeros = ((masks[1:] == 0.0)\n                         .any(dim=-1)\n                         .nonzero()\n                         .squeeze()\n                         .cpu())\n\n            # +1 to correct the masks[1:]\n            if has_zeros.dim() == 0:\n                # Deal with scalar\n                has_zeros = [has_zeros.item() + 1]\n            else:\n                has_zeros = (has_zeros + 1).numpy().tolist()\n\n            # add t=0 and t=T to the list\n            has_zeros = [0] + has_zeros + [T]\n\n            hxs = hxs.transpose(0, 1)\n\n            outputs = []\n            for i in range(len(has_zeros) - 1):\n                # We can now process steps that don't have any zeros in masks together!\n                # This is much faster\n                start_idx = has_zeros[i]\n                end_idx = has_zeros[i + 1]\n                temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous()\n                rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)\n                outputs.append(rnn_scores)\n\n            # assert len(outputs) == T\n            # x is a (T, N, -1) tensor\n            x = torch.cat(outputs, dim=0)\n\n            # flatten\n            x = x.reshape(T * N, -1)\n            hxs = hxs.transpose(0, 1)\n\n        x = self.norm(x)\n        return x, hxs\n"
  },
  {
    "path": "MACPO/macpo/algorithms/utils/util.py",
    "content": "import copy\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\ndef init(module, weight_init, bias_init, gain=1):\n    weight_init(module.weight.data, gain=gain)\n    bias_init(module.bias.data)\n    return module\n\ndef get_clones(module, N):\n    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\ndef check(input):\n    output = torch.from_numpy(input) if type(input) == np.ndarray else input\n    return output\n"
  },
  {
    "path": "MACPO/macpo/config.py",
    "content": "import argparse\n\n\ndef get_config():\n    \"\"\"\n    The configuration parser for common hyperparameters of all environment. \n    Please reach each `scripts/train/<env>_runner.py` file to find private hyperparameters\n    only used in <env>.\n\n    Prepare parameters:\n        --algorithm_name <algorithm_name>\n            specifiy the algorithm, including `[\"rmappo\", \"mappo\", \"rmappg\", \"mappg\", \"trpo\"]`\n        --experiment_name <str>\n            an identifier to distinguish different experiment.\n        --seed <int>\n            set seed for numpy and torch \n        --cuda\n            by default True, will use GPU to train; or else will use CPU; \n        --cuda_deterministic\n            by default, make sure random seed effective. if set, bypass such function.\n        --n_training_threads <int>\n            number of training threads working in parallel. by default 1\n        --n_rollout_threads <int>\n            number of parallel envs for training rollout. by default 32\n        --n_eval_rollout_threads <int>\n            number of parallel envs for evaluating rollout. by default 1\n        --n_render_rollout_threads <int>\n            number of parallel envs for rendering, could only be set as 1 for some environments.\n        --num_env_steps <int>\n            number of env steps to train (default: 10e6)\n        --user_name <str>\n            [for wandb usage], to specify user's name for simply collecting training data.\n        --use_wandb\n            [for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.\n    \n    Env parameters:\n        --env_name <str>\n            specify the name of environment\n        --use_obs_instead_of_state\n            [only for some env] by default False, will use global state; or else will use concatenated local obs.\n    \n    Replay Buffer parameters:\n        --episode_length <int>\n            the max length of episode in the buffer. \n    \n    Network parameters:\n        --share_policy\n            by default True, all agents will share the same network; set to make training agents use different policies. \n        --use_centralized_V\n            by default True, use centralized training mode; or else will decentralized training mode.\n        --stacked_frames <int>\n            Number of input frames which should be stack together.\n        --hidden_size <int>\n            Dimension of hidden layers for actor/critic networks\n        --layer_N <int>\n            Number of layers for actor/critic networks\n        --use_ReLU\n            by default True, will use ReLU. or else will use Tanh.\n        --use_popart\n            by default True, use running mean and std to normalize rewards. \n        --use_feature_normalization\n            by default True, apply layernorm to normalize inputs. \n        --use_orthogonal\n            by default True, use Orthogonal initialization for weights and 0 initialization for biases. or else, will use xavier uniform inilialization.\n        --gain\n            by default 0.01, use the gain # of last action layer\n        --use_naive_recurrent_policy\n            by default False, use the whole trajectory to calculate hidden states.\n        --use_recurrent_policy\n            by default, use Recurrent Policy. If set, do not use.\n        --recurrent_N <int>\n            The number of recurrent layers ( default 1).\n        --data_chunk_length <int>\n            Time length of chunks used to train a recurrent_policy, default 10.\n    \n    Optimizer parameters:\n        --lr <float>\n            learning rate parameter,  (default: 5e-4, fixed).\n        --critic_lr <float>\n            learning rate of critic  (default: 5e-4, fixed)\n        --opti_eps <float>\n            RMSprop optimizer epsilon (default: 1e-5)\n        --weight_decay <float>\n            coefficience of weight decay (default: 0)\n    \n    PPO parameters:\n        --ppo_epoch <int>\n            number of ppo epochs (default: 15)\n        --use_clipped_value_loss \n            by default, clip loss value. If set, do not clip loss value.\n        --clip_param <float>\n            ppo clip parameter (default: 0.2)\n        --num_mini_batch <int>\n            number of batches for ppo (default: 1)\n        --entropy_coef <float>\n            entropy term coefficient (default: 0.01)\n        --use_max_grad_norm \n            by default, use max norm of gradients. If set, do not use.\n        --max_grad_norm <float>\n            max norm of gradients (default: 0.5)\n        --use_gae\n            by default, use generalized advantage estimation. If set, do not use gae.\n        --gamma <float>\n            discount factor for rewards (default: 0.99)\n        --gae_lambda <float>\n            gae lambda parameter (default: 0.95)\n        --use_proper_time_limits\n            by default, the return value does consider limits of time. If set, compute returns with considering time limits factor.\n        --use_huber_loss\n            by default, use huber loss. If set, do not use huber loss.\n        --use_value_active_masks\n            by default True, whether to mask useless data in value loss.  \n        --huber_delta <float>\n            coefficient of huber loss.  \n    \n    PPG parameters:\n        --aux_epoch <int>\n            number of auxiliary epochs. (default: 4)\n        --clone_coef <float>\n            clone term coefficient (default: 0.01)\n    \n    Run parameters：\n        --use_linear_lr_decay\n            by default, do not apply linear decay to learning rate. If set, use a linear schedule on the learning rate\n    \n    Save & Log parameters:\n        --save_interval <int>\n            time duration between contiunous twice models saving.\n        --log_interval <int>\n            time duration between contiunous twice log printing.\n    \n    Eval parameters:\n        --use_eval\n            by default, do not start evaluation. If set`, start evaluation alongside with training.\n        --eval_interval <int>\n            time duration between contiunous twice evaluation progress.\n        --eval_episodes <int>\n            number of episodes of a single evaluation.\n    \n    Render parameters:\n        --save_gifs\n            by default, do not save render video. If set, save video.\n        --use_render\n            by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.\n        --render_episodes <int>\n            the number of episodes to render a given env\n        --ifi <float>\n            the play interval of each rendered image in saved video.\n    \n    Pretrained parameters:\n        --model_dir <str>\n            by default None. set the path to pretrained model.\n    \"\"\"\n    parser = argparse.ArgumentParser(\n        description='macpo', formatter_class=argparse.RawDescriptionHelpFormatter)\n\n    # prepare parameters\n    parser.add_argument(\"--algorithm_name\", type=str,\n                        default=' ', choices=[\"macpo\"])\n\n    parser.add_argument(\"--experiment_name\", type=str, default=\"check\", help=\"an identifier to distinguish different experiment.\")\n    parser.add_argument(\"--seed\", type=int, default=1, help=\"Random seed for numpy/torch\")\n    parser.add_argument(\"--cuda\", action='store_false', default=False, help=\"by default True, will use GPU to train; or else will use CPU;\")\n    parser.add_argument(\"--cuda_deterministic\",\n                        action='store_false', default=True, help=\"by default, make sure random seed effective. if set, bypass such function.\")\n    parser.add_argument(\"--n_training_threads\", type=int,\n                        default=1, help=\"Number of torch threads for training\")\n    parser.add_argument(\"--n_rollout_threads\", type=int, default=32,\n                        help=\"Number of parallel envs for training rollouts\")\n    parser.add_argument(\"--n_eval_rollout_threads\", type=int, default=1,\n                        help=\"Number of parallel envs for evaluating rollouts\")\n    parser.add_argument(\"--n_render_rollout_threads\", type=int, default=1,\n                        help=\"Number of parallel envs for rendering rollouts\")\n    parser.add_argument(\"--num_env_steps\", type=int, default=10e6,\n                        help='Number of environment steps to train (default: 10e6)')\n    parser.add_argument(\"--user_name\", type=str, default='marl',help=\"[for wandb usage], to specify user's name for simply collecting training data.\")\n    parser.add_argument(\"--use_wandb\", action='store_false', default=False, help=\"[for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.\")\n\n    # env parameters\n    parser.add_argument(\"--env_name\", type=str, default='StarCraft2', help=\"specify the name of environment\")\n    parser.add_argument(\"--use_obs_instead_of_state\", action='store_true',\n                        default=False, help=\"Whether to use global state or concatenated obs\")\n\n    # replay buffer parameters\n    parser.add_argument(\"--episode_length\", type=int,\n                        default=200, help=\"Max length for any episode\")\n\n    # network parameters\n    parser.add_argument(\"--share_policy\", action='store_false',\n                        default=True, help='Whether agent share the same policy')\n    parser.add_argument(\"--use_centralized_V\", action='store_false',\n                        default=True, help=\"Whether to use centralized V function\")\n    parser.add_argument(\"--stacked_frames\", type=int, default=1,\n                        help=\"Dimension of hidden layers for actor/critic networks\")\n    parser.add_argument(\"--use_stacked_frames\", action='store_true',\n                        default=False, help=\"Whether to use stacked_frames\")\n    parser.add_argument(\"--hidden_size\", type=int, default=64,\n                        help=\"Dimension of hidden layers for actor/critic networks\") \n    parser.add_argument(\"--layer_N\", type=int, default=1,\n                        help=\"Number of layers for actor/critic networks\")\n    parser.add_argument(\"--use_ReLU\", action='store_false',\n                        default=True, help=\"Whether to use ReLU\")\n    parser.add_argument(\"--use_popart\", action='store_false', default=True, help=\"by default True, use running mean and std to normalize rewards.\")\n    parser.add_argument(\"--use_valuenorm\", action='store_false', default=True, help=\"by default True, use running mean and std to normalize rewards.\")\n    parser.add_argument(\"--use_feature_normalization\", action='store_false',\n                        default=True, help=\"Whether to apply layernorm to the inputs\")\n    parser.add_argument(\"--use_orthogonal\", action='store_false', default=True,\n                        help=\"Whether to use Orthogonal initialization for weights and 0 initialization for biases\")\n    parser.add_argument(\"--gain\", type=float, default=0.01,\n                        help=\"The gain # of last action layer\")\n\n    # recurrent parameters\n    parser.add_argument(\"--use_naive_recurrent_policy\", action='store_true',\n                        default=False, help='Whether to use a naive recurrent policy')\n    parser.add_argument(\"--use_recurrent_policy\", action='store_true',\n                        default=False, help='use a recurrent policy')\n    parser.add_argument(\"--recurrent_N\", type=int, default=1, help=\"The number of recurrent layers.\")\n    parser.add_argument(\"--data_chunk_length\", type=int, default=10,\n                        help=\"Time length of chunks used to train a recurrent_policy\")\n    \n    # optimizer parameters\n    parser.add_argument(\"--lr\", type=float, default=5e-4,\n                        help='learning rate (default: 5e-4)')\n    parser.add_argument(\"--critic_lr\", type=float, default=5e-4,\n                        help='critic learning rate (default: 5e-4)')\n    parser.add_argument(\"--opti_eps\", type=float, default=1e-5,\n                        help='RMSprop optimizer epsilon (default: 1e-5)')\n    parser.add_argument(\"--weight_decay\", type=float, default=0)\n    parser.add_argument(\"--std_x_coef\", type=float, default=1)\n    parser.add_argument(\"--std_y_coef\", type=float, default=0.5)\n\n    # trpo parameters\n    parser.add_argument(\"--kl_threshold\", type=float, default=0.01,\n                        help='the threshold of kl-divergence (default: 0.01)')\n    parser.add_argument(\"--safety_bound\", type=float, default=0.1,\n                        help='safety')\n\n    parser.add_argument(\"--ls_step\", type=int, default=10,\n                        help='number of line search (default: 10)')\n    parser.add_argument(\"--accept_ratio\", type=float, default=0.5,\n                        help='accept ratio of loss improve (default: 0.5)')\n    parser.add_argument(\"--EPS\", type=float, default=1e-8,\n                        help='hyper parameter, close to zero')\n\n    # ppo parameters\n    parser.add_argument(\"--ppo_epoch\", type=int, default=15,\n                        help='number of ppo epochs (default: 15)')\n    parser.add_argument(\"--use_clipped_value_loss\",\n                        action='store_false', default=True, help=\"by default, clip loss value. If set, do not clip loss value.\")\n    parser.add_argument(\"--clip_param\", type=float, default=0.2,\n                        help='ppo clip parameter (default: 0.2)')\n    parser.add_argument(\"--num_mini_batch\", type=int, default=1,\n                        help='number of batches for ppo (default: 1)')\n    parser.add_argument(\"--entropy_coef\", type=float, default=0.01,\n                        help='entropy term coefficient (default: 0.01)')\n    # todo: lagrangian_coef is the lagrangian coefficient for mappo_lagrangian\n    parser.add_argument(\"--lagrangian_coef\", type=float, default=0.01,\n                        help='entropy term coefficient (default: 0.01)')\n    parser.add_argument(\"--value_loss_coef\", type=float,\n                        default=1, help='value loss coefficient (default: 0.5)')\n    parser.add_argument(\"--use_max_grad_norm\",\n                        action='store_false', default=True, help=\"by default, use max norm of gradients. If set, do not use.\")\n    parser.add_argument(\"--max_grad_norm\", type=float, default=10.0,\n                        help='max norm of gradients (default: 0.5)')\n    parser.add_argument(\"--use_gae\", action='store_false',\n                        default=True, help='use generalized advantage estimation')\n    parser.add_argument(\"--gamma\", type=float, default=0.99,\n                        help='discount factor for rewards (default: 0.99)')\n    parser.add_argument(\"--safety_gamma\", type=float, default=0.2,\n                        help='discount factor for rewards (default: 0.2)')\n    parser.add_argument(\"--gae_lambda\", type=float, default=0.95,\n                        help='gae lambda parameter (default: 0.95)')\n    parser.add_argument(\"--use_proper_time_limits\", action='store_true',\n                        default=False, help='compute returns taking into account time limits')\n    parser.add_argument(\"--use_huber_loss\", action='store_false', default=True, help=\"by default, use huber loss. If set, do not use huber loss.\")\n    parser.add_argument(\"--use_value_active_masks\",\n                        action='store_false', default=True, help=\"by default True, whether to mask useless data in value loss.\")\n    parser.add_argument(\"--use_policy_active_masks\",\n                        action='store_false', default=True, help=\"by default True, whether to mask useless data in policy loss.\")\n    parser.add_argument(\"--huber_delta\", type=float, default=10.0, help=\" coefficience of huber loss.\")\n\n    # run parameters\n    parser.add_argument(\"--use_linear_lr_decay\", action='store_true',\n                        default=False, help='use a linear schedule on the learning rate')\n    # save parameters\n    parser.add_argument(\"--save_interval\", type=int, default=1, help=\"time duration between contiunous twice models saving.\")\n\n    # log parameters\n    parser.add_argument(\"--log_interval\", type=int, default=5, help=\"time duration between contiunous twice log printing.\")\n\n    # eval parameters\n    parser.add_argument(\"--use_eval\", action='store_true', default=False, help=\"by default, do not start evaluation. If set`, start evaluation alongside with training.\")\n    parser.add_argument(\"--eval_interval\", type=int, default=25, help=\"time duration between contiunous twice evaluation progress.\")\n    parser.add_argument(\"--eval_episodes\", type=int, default=32, help=\"number of episodes of a single evaluation.\")\n\n    # render parameters\n    parser.add_argument(\"--save_gifs\", action='store_true', default=False, help=\"by default, do not save render video. If set, save video.\")\n    parser.add_argument(\"--use_render\", action='store_true', default=False, help=\"by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.\")\n    parser.add_argument(\"--render_episodes\", type=int, default=5, help=\"the number of episodes to render a given env\")\n    parser.add_argument(\"--ifi\", type=float, default=0.1, help=\"the play interval of each rendered image in saved video.\")\n\n    # pretrained parameters\n    parser.add_argument(\"--model_dir\", type=str, default=None, help=\"by default None. set the path to pretrained model.\")\n\n    # safe parameters fraction\n    parser.add_argument(\"--safty_bound\", type=float, default=0.1, help=\" \")\n    parser.add_argument(\"--line_search_fraction\", type=float, default=0.5, help=\"line search step size\")\n    parser.add_argument(\"--g_step_dir_coef\", type=float, default=0.1, help=\"rescale g\")\n    parser.add_argument(\"--b_step_dir_coef\", type=float, default=0.1, help=\"rescale b\")\n    parser.add_argument(\"--fraction_coef\", type=float, default=0.1, help=\"the coef of line search step size\")\n\n    return parser\n"
  },
  {
    "path": "MACPO/macpo/envs/__init__.py",
    "content": "\r\nimport socket\r\nfrom absl import flags\r\nFLAGS = flags.FLAGS\r\nFLAGS(['train_sc.py'])\r\n\r\n\r\n"
  },
  {
    "path": "MACPO/macpo/envs/env_wrappers.py",
    "content": "\"\"\"\nModified from OpenAI Baselines code to work with multi-agent envs\n\"\"\"\nimport numpy as np\nimport torch\nfrom multiprocessing import Process, Pipe\nfrom abc import ABC, abstractmethod\nfrom macpo.utils.util import tile_images\n\nclass CloudpickleWrapper(object):\n    \"\"\"\n    Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)\n    \"\"\"\n\n    def __init__(self, x):\n        self.x = x\n\n    def __getstate__(self):\n        import cloudpickle\n        return cloudpickle.dumps(self.x)\n\n    def __setstate__(self, ob):\n        import pickle\n        self.x = pickle.loads(ob)\n\n\nclass ShareVecEnv(ABC):\n    \"\"\"\n    An abstract asynchronous, vectorized environment.\n    Used to batch data from multiple copies of an environment, so that\n    each observation becomes an batch of observations, and expected action is a batch of actions to\n    be applied per-environment.\n    \"\"\"\n    closed = False\n    viewer = None\n\n    metadata = {\n        'render.modes': ['human', 'rgb_array']\n    }\n\n    def __init__(self, num_envs, observation_space, share_observation_space, action_space):\n        self.num_envs = num_envs\n        self.observation_space = observation_space\n        self.share_observation_space = share_observation_space\n        self.action_space = action_space\n\n    @abstractmethod\n    def reset(self):\n        \"\"\"\n        Reset all the environments and return an array of\n        observations, or a dict of observation arrays.\n\n        If step_async is still doing work, that work will\n        be cancelled and step_wait() should not be called\n        until step_async() is invoked again.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def step_async(self, actions):\n        \"\"\"\n        Tell all the environments to start taking a step\n        with the given actions.\n        Call step_wait() to get the results of the step.\n\n        You should not call this if a step_async run is\n        already pending.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def step_wait(self):\n        \"\"\"\n        Wait for the step taken with step_async().\n\n        Returns (obs, rews, cos, dones, infos):\n         - obs: an array of observations, or a dict of\n                arrays of observations.\n         - rews: an array of rewards\n         - cos: an array of costs\n         - dones: an array of \"episode done\" booleans\n         - infos: a sequence of info objects\n        \"\"\"\n        pass\n\n    def close_extras(self):\n        \"\"\"\n        Clean up the  extra resources, beyond what's in this base class.\n        Only runs when not self.closed.\n        \"\"\"\n        pass\n\n    def close(self):\n        if self.closed:\n            return\n        if self.viewer is not None:\n            self.viewer.close()\n        self.close_extras()\n        self.closed = True\n\n    def step(self, actions):\n        \"\"\"\n        Step the environments synchronously.\n\n        This is available for backwards compatibility.\n        \"\"\"\n        self.step_async(actions)\n        return self.step_wait()\n\n    def render(self, mode='human'):\n        imgs = self.get_images()\n        bigimg = tile_images(imgs)\n        if mode == 'human':\n            self.get_viewer().imshow(bigimg)\n            return self.get_viewer().isopen\n        elif mode == 'rgb_array':\n            return bigimg\n        else:\n            raise NotImplementedError\n\n    def get_images(self):\n        \"\"\"\n        Return RGB images from each environment\n        \"\"\"\n        raise NotImplementedError\n\n    @property\n    def unwrapped(self):\n        if isinstance(self, VecEnvWrapper):\n            return self.venv.unwrapped\n        else:\n            return self\n\n    def get_viewer(self):\n        if self.viewer is None:\n            from gym.envs.classic_control import rendering\n            self.viewer = rendering.SimpleImageViewer()\n        return self.viewer\n\n\ndef worker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, reward, done, info = env.step(data)\n            if 'bool' in done.__class__.__name__:\n                if done:\n                    ob = env.reset()\n            else:\n                if np.all(done):\n                    ob = env.reset()\n\n            remote.send((ob, reward, info[\"cost\"], done, info))\n        elif cmd == 'reset':\n            ob = env.reset()\n            remote.send((ob))\n        elif cmd == 'render':\n            if data == \"rgb_array\":\n                fr = env.render(mode=data)\n                remote.send(fr)\n            elif data == \"human\":\n                env.render(mode=data)\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'get_spaces':\n            remote.send((env.observation_space, env.share_observation_space, env.action_space))\n        else:\n            raise NotImplementedError\n\n\nclass GuardSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = False  # could cause zombie process\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv()\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, rews, cos, dones, infos = zip(*results)\n        return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos\n\n    def reset(self):\n        for remote in self.remotes:\n            remote.send(('reset', None))\n        obs = [remote.recv() for remote in self.remotes]\n        return np.stack(obs)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\nclass SubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = True  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv()\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, rews, cos, dones, infos = zip(*results)\n        return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos\n\n    def reset(self):\n        for remote in self.remotes:\n            remote.send(('reset', None))\n        obs = [remote.recv() for remote in self.remotes]\n        return np.stack(obs)\n\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n    def render(self, mode=\"rgb_array\"):\n        for remote in self.remotes:\n            remote.send(('render', mode))\n        if mode == \"rgb_array\":   \n            frame = [remote.recv() for remote in self.remotes]\n            return np.stack(frame) \n\n\ndef shareworker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, s_ob, reward, done, info, available_actions = env.step(data)\n            if 'bool' in done.__class__.__name__:\n                if done:\n                    ob, s_ob, available_actions = env.reset()\n            else:\n                if np.all(done):\n                    ob, s_ob, available_actions = env.reset()\n\n            remote.send((ob, s_ob, reward, done, info, available_actions))\n        elif cmd == 'reset':\n            ob, s_ob, available_actions = env.reset()\n            remote.send((ob, s_ob, available_actions))\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'render':\n            if data == \"rgb_array\":\n                fr = env.render(mode=data)\n                remote.send(fr)\n            elif data == \"human\":\n                env.render(mode=data)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'get_spaces':\n            remote.send(\n                (env.observation_space, env.share_observation_space, env.action_space))\n        elif cmd == 'render_vulnerability':\n            fr = env.render_vulnerability(data)\n            remote.send((fr))\n        elif cmd == 'get_num_agents':\n            remote.send((env.n_agents))\n        else:\n            raise NotImplementedError\n\n\nclass ShareSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = True  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n        self.remotes[0].send(('get_num_agents', None))\n        self.n_agents = self.remotes[0].recv()\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv(\n        )\n        # print(\"wrapper:\", share_observation_space)\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, share_obs, rews, dones, infos, available_actions = zip(*results)\n\n        cost_x= np.array([item[0]['cost'] for item in infos])\n        # print(\"=====cost_x=====: \", cost_x.sum())\n        # print(\"=====np.stack(dones)=====: \", np.stack(dones))\n        return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(cost_x), np.stack(dones), infos, np.stack(available_actions)\n\n    def reset(self):\n        for remote in self.remotes:\n            remote.send(('reset', None))\n        results = [remote.recv() for remote in self.remotes]\n        obs, share_obs, available_actions = zip(*results)\n        return np.stack(obs), np.stack(share_obs), np.stack(available_actions)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\ndef choosesimpleworker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, reward, done, info = env.step(data)\n            remote.send((ob, reward, info[\"cost\"], done, info))\n        elif cmd == 'reset':\n            ob = env.reset(data)\n            remote.send((ob))\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'render':\n            if data == \"rgb_array\":\n                fr = env.render(mode=data)\n                remote.send(fr)\n            elif data == \"human\":\n                env.render(mode=data)\n        elif cmd == 'get_spaces':\n            remote.send(\n                (env.observation_space, env.share_observation_space, env.action_space))\n        else:\n            raise NotImplementedError\n\n\nclass ChooseSimpleSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=choosesimpleworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = True  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv()\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, rews, cos, dones, infos = zip(*results)\n        return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos\n\n    def reset(self, reset_choose):\n        for remote, choose in zip(self.remotes, reset_choose):\n            remote.send(('reset', choose))\n        obs = [remote.recv() for remote in self.remotes]\n        return np.stack(obs)\n\n    def render(self, mode=\"rgb_array\"):\n        for remote in self.remotes:\n            remote.send(('render', mode))\n        if mode == \"rgb_array\":   \n            frame = [remote.recv() for remote in self.remotes]\n            return np.stack(frame)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\ndef chooseworker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, s_ob, reward, done, info, available_actions = env.step(data)\n            remote.send((ob, s_ob, reward, info[\"cost\"], done, info, available_actions))\n        elif cmd == 'reset':\n            ob, s_ob, available_actions = env.reset(data)\n            remote.send((ob, s_ob, available_actions))\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'render':\n            remote.send(env.render(mode='rgb_array'))\n        elif cmd == 'get_spaces':\n            remote.send(\n                (env.observation_space, env.share_observation_space, env.action_space))\n        else:\n            raise NotImplementedError\n\n\nclass ChooseSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = True  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv(\n        )\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, share_obs, rews, cos, dones, infos, available_actions = zip(*results)\n        return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(cos), np.stack(dones), infos, np.stack(available_actions)\n\n    def reset(self, reset_choose):\n        for remote, choose in zip(self.remotes, reset_choose):\n            remote.send(('reset', choose))\n        results = [remote.recv() for remote in self.remotes]\n        obs, share_obs, available_actions = zip(*results)\n        return np.stack(obs), np.stack(share_obs), np.stack(available_actions)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\ndef chooseguardworker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, reward, done, info = env.step(data)\n            remote.send((ob, reward, info[\"cost\"], done, info))\n        elif cmd == 'reset':\n            ob = env.reset(data)\n            remote.send((ob))\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'get_spaces':\n            remote.send(\n                (env.observation_space, env.share_observation_space, env.action_space))\n        else:\n            raise NotImplementedError\n\n\nclass ChooseGuardSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = False  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv(\n        )\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, rews, cos, dones, infos = zip(*results)\n        return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos\n\n    def reset(self, reset_choose):\n        for remote, choose in zip(self.remotes, reset_choose):\n            remote.send(('reset', choose))\n        obs = [remote.recv() for remote in self.remotes]\n        return np.stack(obs)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\n# single env\nclass DummyVecEnv(ShareVecEnv):\n    def __init__(self, env_fns):\n        self.envs = [fn() for fn in env_fns]\n        env = self.envs[0]\n        ShareVecEnv.__init__(self, len(\n            env_fns), env.observation_space, env.share_observation_space, env.action_space)\n        self.actions = None\n\n    def step_async(self, actions):\n        self.actions = actions\n\n    def step_wait(self):\n        results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]\n        obs, rews, cos, dones, infos = map(np.array, zip(*results))\n\n        for (i, done) in enumerate(dones):\n            if 'bool' in done.__class__.__name__:\n                if done:\n                    obs[i] = self.envs[i].reset()\n            else:\n                if np.all(done):\n                    obs[i] = self.envs[i].reset()\n\n        self.actions = None\n        return obs, rews, cos, dones, infos\n\n    def reset(self):\n        obs = [env.reset() for env in self.envs]\n        return np.array(obs)\n\n    def close(self):\n        for env in self.envs:\n            env.close()\n\n    def render(self, mode=\"human\"):\n        if mode == \"rgb_array\":\n            return np.array([env.render(mode=mode) for env in self.envs])\n        elif mode == \"human\":\n            for env in self.envs:\n                env.render(mode=mode)\n        else:\n            raise NotImplementedError\n\n\n\nclass ShareDummyVecEnv(ShareVecEnv):\n    def __init__(self, env_fns):\n        self.envs = [fn() for fn in env_fns]\n        env = self.envs[0]\n        ShareVecEnv.__init__(self, len(\n            env_fns), env.observation_space, env.share_observation_space, env.action_space)\n        self.actions = None\n\n    def step_async(self, actions):\n        self.actions = actions\n\n    def step_wait(self):\n        results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]\n        obs, share_obs, rews, cos, dones, infos, available_actions = map(\n            np.array, zip(*results))\n\n        for (i, done) in enumerate(dones):\n            if 'bool' in done.__class__.__name__:\n                if done:\n                    obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()\n            else:\n                if np.all(done):\n                    obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()\n        self.actions = None\n\n        return obs, share_obs, rews, cos, dones, infos, available_actions\n\n    def reset(self):\n        results = [env.reset() for env in self.envs]\n        obs, share_obs, available_actions = map(np.array, zip(*results))\n        return obs, share_obs, available_actions\n\n    def close(self):\n        for env in self.envs:\n            env.close()\n    \n    def render(self, mode=\"human\"):\n        if mode == \"rgb_array\":\n            return np.array([env.render(mode=mode) for env in self.envs])\n        elif mode == \"human\":\n            for env in self.envs:\n                env.render(mode=mode)\n        else:\n            raise NotImplementedError\n\n\nclass ChooseDummyVecEnv(ShareVecEnv):\n    def __init__(self, env_fns):\n        self.envs = [fn() for fn in env_fns]\n        env = self.envs[0]\n        ShareVecEnv.__init__(self, len(\n            env_fns), env.observation_space, env.share_observation_space, env.action_space)\n        self.actions = None\n\n    def step_async(self, actions):\n        self.actions = actions\n\n    def step_wait(self):\n        results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]\n        obs, share_obs, rews, cos, dones, infos, available_actions = map(\n            np.array, zip(*results))\n        self.actions = None\n        return obs, share_obs, rews, cos, dones, infos, available_actions\n\n    def reset(self, reset_choose):\n        results = [env.reset(choose)\n                   for (env, choose) in zip(self.envs, reset_choose)]\n        obs, share_obs, available_actions = map(np.array, zip(*results))\n        return obs, share_obs, available_actions\n\n    def close(self):\n        for env in self.envs:\n            env.close()\n\n    def render(self, mode=\"human\"):\n        if mode == \"rgb_array\":\n            return np.array([env.render(mode=mode) for env in self.envs])\n        elif mode == \"human\":\n            for env in self.envs:\n                env.render(mode=mode)\n        else:\n            raise NotImplementedError\n\nclass ChooseSimpleDummyVecEnv(ShareVecEnv):\n    def __init__(self, env_fns):\n        self.envs = [fn() for fn in env_fns]\n        env = self.envs[0]\n        ShareVecEnv.__init__(self, len(\n            env_fns), env.observation_space, env.share_observation_space, env.action_space)\n        self.actions = None\n\n    def step_async(self, actions):\n        self.actions = actions\n\n    def step_wait(self):\n        results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]\n        obs, rews, cos, dones, infos = map(np.array, zip(*results))\n        self.actions = None\n        return obs, rews, cos, dones, infos\n\n    def reset(self, reset_choose):\n        obs = [env.reset(choose)\n                   for (env, choose) in zip(self.envs, reset_choose)]\n        return np.array(obs)\n\n    def close(self):\n        for env in self.envs:\n            env.close()\n\n    def render(self, mode=\"human\"):\n        if mode == \"rgb_array\":\n            return np.array([env.render(mode=mode) for env in self.envs])\n        elif mode == \"human\":\n            for env in self.envs:\n                env.render(mode=mode)\n        else:\n            raise NotImplementedError\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/MUJOCO_LOG.TXT",
    "content": "Sun Aug 29 11:16:41 2021\nERROR: Expired activation key\n\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/README.md",
    "content": "#### Safety Multi-agent Mujoco \n\n\n## 1. Sate Many Agent Ant\n\nAccording to Zanger's work, \n\nThe reward function is equal to the rewards in the common Ant-v2 environment and comprises the torso velocity in global x-direction, a negative control reward on exerted torque, a negative contact reward and a constant positive reward for survival, which results in\n\n<img src=\"https://latex.codecogs.com/png.image?\\dpi{110}&space;r=\\frac{x_{\\text&space;{torso&space;},&space;t&plus;1}-x_{\\text&space;{torso&space;},&space;t}}{d&space;t}-\\frac{1}{2}\\left\\|\\boldsymbol{a}_{t}\\right\\|_{2}^{2}-\\frac{1}{2&space;*&space;10^{3}}&space;\\|&space;\\text&space;{&space;contact&space;}_{t}&space;\\|_{2}^{2}&plus;1\" title=\"r=\\frac{x_{\\text {torso }, t+1}-x_{\\text {torso }, t}}{d t}-\\frac{1}{2}\\left\\|\\boldsymbol{a}_{t}\\right\\|_{2}^{2}-\\frac{1}{2 * 10^{3}} \\| \\text { contact }_{t} \\|_{2}^{2}+1\" />\n\n```python\nxposafter = self.get_body_com(\"torso_0\")[0]\nforward_reward = (xposafter - xposbefore)/self.dt\nctrl_cost = .5 * np.square(a).sum()\ncontact_cost = 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\nsurvive_reward = 1.0\n        \nreward = forward_reward - ctrl_cost - contact_cost + survive_reward\n```\n\nAnd the cost,\n\n\n<img src=\"https://latex.codecogs.com/png.image?\\dpi{110}&space;c=&space;\\begin{cases}0,&space;&&space;\\text&space;{&space;for&space;}&space;\\quad&space;0.2&space;\\leq&space;z_{\\text&space;{torso&space;},&space;t&plus;1}&space;\\leq&space;1.0&space;\\\\&space;&&space;\\text&space;{&space;and&space;}\\left\\|\\boldsymbol{x}_{\\text&space;{torso&space;},&space;t&plus;1}-\\boldsymbol{x}_{\\text&space;{wall&space;}}\\right\\|_{2}&space;\\geq&space;1.8&space;\\\\&space;1,&space;&&space;\\text&space;{&space;else&space;}\\end{cases}\" title=\"c= \\begin{cases}0, & \\text { for } \\quad 0.2 \\leq z_{\\text {torso }, t+1} \\leq 1.0 \\\\ & \\text { and }\\left\\|\\boldsymbol{x}_{\\text {torso }, t+1}-\\boldsymbol{x}_{\\text {wall }}\\right\\|_{2} \\geq 1.8 \\\\ 1, & \\text { else }\\end{cases}\" />\n\n\n\n```python\nyposafter = self.get_body_com(\"torso_0\")[1]\nywall = np.array([-5, 5])\nif xposafter < 20:\n  y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall\nelif xposafter>20 and xposafter<60:\n  y_walldist = yposafter + (xposafter-40)*np.tan(30/360*2*np.pi) - ywall\nelif xposafter>60 and xposafter<100:\n  y_walldist = yposafter - (xposafter-80)*np.tan(30/360*2*np.pi) + ywall\nelse:\n  y_walldist = yposafter - 20*np.tan(30/360*2*np.pi) + ywall\nobj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n\nbody_quat = self.data.get_body_xquat('torso_0')\n z_rot = 1-2*(body_quat[1]**2+body_quat[2]**2)  ### normally xx-rotation, not sure what axes mujoco uses\n\nstate = self.state_vector()\nnotdone = np.isfinite(state).all() \\\n                      and state[2] >= 0.2 and state[2] <= 1.0\\\n                      and z_rot>=-0.7 #ADDED\ndone = not notdone\ndone_cost = done * 1.0\n\ncost = np.clip(obj_cost + done_cost, 0, 1)\n```\n\n\n[1] Zanger, Moritz A., Karam Daaboul, and J. Marius Zöllner. 2021. “Safe Continuous Control with Constrained Model-Based Policy Optimization.” arXiv [cs.LG]. arXiv. http://arxiv.org/abs/2104.06922.\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/__init__.py",
    "content": ""
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/__init__.py",
    "content": "from .mujoco_multi import MujocoMulti\nfrom .coupled_half_cheetah import CoupledHalfCheetah\nfrom .manyagent_swimmer import ManyAgentSwimmerEnv\nfrom .manyagent_ant import ManyAgentAntEnv\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/ant.py",
    "content": "import numpy as np\n# from mujoco_safety_gym.envs import mujoco_env\nfrom macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nfrom gym import utils\nimport mujoco_py as mjp\n\n\nclass AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)\n        utils.EzPickle.__init__(self)\n\n    def step(self, a):\n        xposbefore = self.get_body_com(\"torso\")[0]\n        self.do_simulation(a, self.frame_skip)\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        xposafter = self.get_body_com(\"torso\")[0]\n        forward_reward = (xposafter - xposbefore) / self.dt\n        ctrl_cost = .5 * np.square(a).sum()\n        contact_cost = 0.5 * 1e-3 * np.sum(\n            np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n        survive_reward = 1.0\n\n        ### safety stuff\n        yposafter = self.get_body_com(\"torso\")[1]\n        ywall = np.array([-5, 5])\n        if xposafter < 20:\n            y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall\n        elif xposafter > 20 and xposafter < 60:\n            y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall\n        elif xposafter > 60 and xposafter < 100:\n            y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall\n        else:\n            y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall\n\n        obj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n        reward = forward_reward - ctrl_cost - contact_cost + survive_reward\n\n        body_quat = self.data.get_body_xquat('torso')\n        z_rot = 1 - 2 * (\n                    body_quat[1] ** 2 + body_quat[2] ** 2)  ### normally xx-rotation, not sure what axes mujoco uses\n        state = self.state_vector()\n        notdone = np.isfinite(state).all() \\\n                  and state[2] >= 0.2 and state[2] <= 1.0 \\\n                  and z_rot >= -0.7\n        done = not notdone\n        done_cost = done * 1.0\n        cost = np.clip(obj_cost + done_cost, 0, 1)\n        ob = self._get_obs()\n        return ob, reward, done, dict(\n            reward_forward=forward_reward,\n            reward_ctrl=-ctrl_cost,\n            reward_contact=-contact_cost,\n            reward_survive=survive_reward,\n            cost_obj=obj_cost,\n            cost_done=done_cost,\n            cost=cost,\n        )\n\n    def _get_obs(self):\n        x = self.sim.data.qpos.flat[0]\n        y = self.sim.data.qpos.flat[1]\n        if x < 20:\n            y_off = y - x * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 20 and x < 60:\n            y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 60 and x < 100:\n            y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi)\n        else:\n            y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi)\n\n        return np.concatenate([\n            self.sim.data.qpos.flat[2:-42],\n            self.sim.data.qvel.flat[:-36],\n            [x / 5],\n            [y_off],\n            # np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\n        ])\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n        qpos[-42:] = self.init_qpos[-42:]\n        qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n        qvel[-36:] = self.init_qvel[-36:]\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.distance = self.model.stat.extent * 0.5"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/.gitignore",
    "content": "*.auto.xml\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/__init__.py",
    "content": ""
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/ant.xml",
    "content": "<mujoco model=\"ant\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option integrator=\"RK4\" timestep=\"0.01\"/>\n  <custom>\n    <numeric data=\"0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0\" name=\"init_qpos\"/>\n  </custom>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"0\" condim=\"3\" density=\"5.0\" friction=\"1 0.5 0.5\" margin=\"0.01\" rgba=\"0.8 0.6 0.4 1\"/>\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texbox\" rgb1=\"#ff66ff\" rgb2=\"#ff66ff\" type=\"2d\" width=\"100\"/>\n    <material name=\"BoxMat\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texbox\"/>\n\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"30 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"70 25 40\" type=\"plane\"/>\n    <!-- <geom conaffinity=\"1\" condim=\"3\" name=\"obj11\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"10  0 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj12\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"10 -10 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj13\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"10  10 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj21\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"20 -4 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj22\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"20  4 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj23\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"20 -14 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj24\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"20  14 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj31\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30  0 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj32\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30 -9 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj33\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30  11 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj34\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30 -16 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj35\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30  19 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" material=\"BoxMat\" size=\"0.1 14 1.0\" pos=\"-14  0 1\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" material=\"BoxMat\" size=\"14 .1 1.0\" pos=\"0  14 1\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" material=\"BoxMat\" size=\"14 0.1 1.0\" pos=\"0  -14 1.0\"    rgba=\"#ff66ff\"/> -->\n    <!-- <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"0   6 1.0\"   euler='0 0 30'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"40 -6 1.0\"  euler='0 0 -30'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"40  6 1.0\"  euler='0 0 -30'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"80 -6 1.0\"   euler='0 0 30'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"80  6 1.0\"   euler='0 0 30'  rgba=\"1 0.5 0.5 1\"/> -->\n    <body name=\"torso\" pos=\"0 0 0.75\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -10 -10\" xyaxes=\".8 .4 0 0 .4 .6\"/>\n      <geom name=\"torso_geom\" pos=\"0 0 0\" size=\"0.25\" type=\"sphere\"/>\n      <joint armature=\"0\" damping=\"0\" limited=\"false\" margin=\"0.01\" name=\"root\" pos=\"0 0 0\" type=\"free\"/>\n      <body name=\"front_left_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"aux_1_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_1\" pos=\"0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_1\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"left_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle_1\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 0.4 0.0\" name=\"left_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"front_right_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"aux_2_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_2\" pos=\"-0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_2\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"-0.2 0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle_2\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"back_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"aux_3_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_3\" pos=\"-0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_3\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"-0.2 -0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle_3\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"right_back_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"aux_4_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_4\" pos=\"0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_4\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"rightback_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 -0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle_4\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 -0.4 0.0\" name=\"fourth_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n    <body name='b1' pos=\"0 5 1\" euler='0 0 30'>\n      <freejoint name=\"b1_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b2' pos=\"0 -5 1\" euler='0 0 30'>\n      <freejoint name=\"b2_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b3' pos=\"40 5 1\" euler='0 0 -30'>\n      <freejoint name=\"b3_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b4' pos=\"40 -5 1\" euler='0 0 -30'>\n      <freejoint name=\"b4_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b5' pos=\"80 5 1\" euler='0 0 30'>\n      <freejoint name=\"b5_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b6' pos=\"80 -5 1\" euler='0 0 30'>\n      <freejoint name=\"b6_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_3\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_3\" gear=\"150\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/coupled_half_cheetah.xml",
    "content": "<!-- Cheetah Model\n    The state space is populated with joints in the order that they are\n    defined in this file. The actuators also operate on joints.\n    State-Space (name/joint/parameter):\n        - rootx     slider      position (m)\n        - rootz     slider      position (m)\n        - rooty     hinge       angle (rad)\n        - bthigh    hinge       angle (rad)\n        - bshin     hinge       angle (rad)\n        - bfoot     hinge       angle (rad)\n        - fthigh    hinge       angle (rad)\n        - fshin     hinge       angle (rad)\n        - ffoot     hinge       angle (rad)\n        - rootx     slider      velocity (m/s)\n        - rootz     slider      velocity (m/s)\n        - rooty     hinge       angular velocity (rad/s)\n        - bthigh    hinge       angular velocity (rad/s)\n        - bshin     hinge       angular velocity (rad/s)\n        - bfoot     hinge       angular velocity (rad/s)\n        - fthigh    hinge       angular velocity (rad/s)\n        - fshin     hinge       angular velocity (rad/s)\n        - ffoot     hinge       angular velocity (rad/s)\n    Actuators (name/actuator/parameter):\n        - bthigh    hinge       torque (N m)\n        - bshin     hinge       torque (N m)\n        - bfoot     hinge       torque (N m)\n        - fthigh    hinge       torque (N m)\n        - fshin     hinge       torque (N m)\n        - ffoot     hinge       torque (N m)\n-->\n<mujoco model=\"cheetah\">\n  <compiler angle=\"radian\" coordinate=\"local\" inertiafromgeom=\"true\" settotalmass=\"14\"/>\n  <default>\n    <joint armature=\".1\" damping=\".01\" limited=\"true\" solimplimit=\"0 .8 .03\" solreflimit=\".02 1\" stiffness=\"8\"/>\n    <geom conaffinity=\"0\" condim=\"3\" contype=\"1\" friction=\".4 .1 .1\" rgba=\"0.8 0.6 .4 1\" solimp=\"0.0 0.8 0.01\" solref=\"0.02 1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\"/>\n  </default>\n  <size nstack=\"300000\" nuser_geom=\"1\"/>\n  <option gravity=\"0 0 -9.81\" timestep=\"0.01\"/>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"65 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"150 40 40\" type=\"plane\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -7.3 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   7.3 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50 -4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50  4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall7\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"150 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall8\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"150  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall9\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"-50  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall10\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"-50  -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n\n    <body name=\"obj1\" pos=\"-39 0 .7\">\n        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>\n          <!--<joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"5 0 .7\" range=\"-10000 10000\" stiffness=\".0\" type=\"slide\"/>-->\n          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"2 0 .7\" range=\"-30 30\" stiffness=\".0\" type=\"slide\"/>\n          <!--<joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint1\" pos=\"2 0 .7\" range=\"10 20\" stiffness=\".0\" type=\"slide\"/>-->\n    </body>\n\n<!--    <body name=\"obj2\" pos=\"5 0 .7\">-->\n<!--        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom1\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>-->\n<!--          &lt;!&ndash;<joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"5 0 .7\" range=\"-10000 10000\" stiffness=\".0\" type=\"slide\"/>&ndash;&gt;-->\n<!--          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint1\" pos=\"2 0 .7\" range=\"-10000 10000\" stiffness=\".0\" type=\"slide\"/>-->\n<!--          &lt;!&ndash;<joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint1\" pos=\"2 0 .7\" range=\"10 20\" stiffness=\".0\" type=\"slide\"/>&ndash;&gt;-->\n<!--    </body>-->\nwallpos1\n    <!--<geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 0\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 40\" type=\"plane\"/>-->\n    <body name=\"torso\" pos=\"0 -1 .7\">\n      <site name=\"t1\" pos=\"0.0 0 0\" size=\"0.1\"/>\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty\" pos=\"0 0 0\" stiffness=\"0\" type=\"hinge\"/>\n      <geom fromto=\"-.5 0 0 .5 0 0\" name=\"torso\" size=\"0.046\" type=\"capsule\"/>\n      <geom axisangle=\"0 1 0 .87\" name=\"head\" pos=\".6 0 .1\" size=\"0.046 .15\" type=\"capsule\"/>\n      <!-- <site name='tip'  pos='.15 0 .11'/>-->\n      <body name=\"bthigh\" pos=\"-.5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"6\" name=\"bthigh\" pos=\"0 0 0\" range=\"-.52 1.05\" stiffness=\"240\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 -3.8\" name=\"bthigh\" pos=\".1 0 -.13\" size=\"0.046 .145\" type=\"capsule\"/>\n        <body name=\"bshin\" pos=\".16 0 -.25\">\n          <joint axis=\"0 1 0\" damping=\"4.5\" name=\"bshin\" pos=\"0 0 0\" range=\"-.785 .785\" stiffness=\"180\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -2.03\" name=\"bshin\" pos=\"-.14 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .15\" type=\"capsule\"/>\n          <body name=\"bfoot\" pos=\"-.28 0 -.14\">\n            <joint axis=\"0 1 0\" damping=\"3\" name=\"bfoot\" pos=\"0 0 0\" range=\"-.4 .785\" stiffness=\"120\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.27\" name=\"bfoot\" pos=\".03 0 -.097\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .094\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"fthigh\" pos=\".5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"4.5\" name=\"fthigh\" pos=\"0 0 0\" range=\"-1 .7\" stiffness=\"180\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 .52\" name=\"fthigh\" pos=\"-.07 0 -.12\" size=\"0.046 .133\" type=\"capsule\"/>\n        <body name=\"fshin\" pos=\"-.14 0 -.24\">\n          <joint axis=\"0 1 0\" damping=\"3\" name=\"fshin\" pos=\"0 0 0\" range=\"-1.2 .87\" stiffness=\"120\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -.6\" name=\"fshin\" pos=\".065 0 -.09\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .106\" type=\"capsule\"/>\n          <body name=\"ffoot\" pos=\".13 0 -.18\">\n            <joint axis=\"0 1 0\" damping=\"1.5\" name=\"ffoot\" pos=\"0 0 0\" range=\"-.5 .5\" stiffness=\"60\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.6\" name=\"ffoot\" pos=\".045 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .07\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n    <!-- second cheetah definition -->\n    <body name=\"torso2\" pos=\"0 1 .7\">\n      <site name=\"t2\" pos=\"0 0 0\" size=\"0.1\"/>\n      <camera name=\"track2\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx2\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz2\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty2\" pos=\"0 0 0\" stiffness=\"0\" type=\"hinge\"/>\n      <geom fromto=\"-.5 0 0 .5 0 0\" name=\"torso2\" size=\"0.046\" type=\"capsule\"/>\n      <geom axisangle=\"0 1 0 .87\" name=\"head2\" pos=\".6 0 .1\" size=\"0.046 .15\" type=\"capsule\"/>\n      <!-- <site name='tip'  pos='.15 0 .11'/>-->\n      <body name=\"bthigh2\" pos=\"-.5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"6\" name=\"bthigh2\" pos=\"0 0 0\" range=\"-.52 1.05\" stiffness=\"240\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 -3.8\" name=\"bthigh2\" pos=\".1 0 -.13\" size=\"0.046 .145\" type=\"capsule\"/>\n        <body name=\"bshin2\" pos=\".16 0 -.25\">\n          <joint axis=\"0 1 0\" damping=\"4.5\" name=\"bshin2\" pos=\"0 0 0\" range=\"-.785 .785\" stiffness=\"180\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -2.03\" name=\"bshin2\" pos=\"-.14 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .15\" type=\"capsule\"/>\n          <body name=\"bfoot2\" pos=\"-.28 0 -.14\">\n            <joint axis=\"0 1 0\" damping=\"3\" name=\"bfoot2\" pos=\"0 0 0\" range=\"-.4 .785\" stiffness=\"120\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.27\" name=\"bfoot2\" pos=\".03 0 -.097\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .094\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"fthigh2\" pos=\".5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"4.5\" name=\"fthigh2\" pos=\"0 0 0\" range=\"-1 .7\" stiffness=\"180\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 .52\" name=\"fthigh2\" pos=\"-.07 0 -.12\" size=\"0.046 .133\" type=\"capsule\"/>\n        <body name=\"fshin2\" pos=\"-.14 0 -.24\">\n          <joint axis=\"0 1 0\" damping=\"3\" name=\"fshin2\" pos=\"0 0 0\" range=\"-1.2 .87\" stiffness=\"120\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -.6\" name=\"fshin2\" pos=\".065 0 -.09\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .106\" type=\"capsule\"/>\n          <body name=\"ffoot2\" pos=\".13 0 -.18\">\n            <joint axis=\"0 1 0\" damping=\"1.5\" name=\"ffoot2\" pos=\"0 0 0\" range=\"-.5 .5\" stiffness=\"60\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.6\" name=\"ffoot2\" pos=\".045 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .07\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n\n  </worldbody>\n  <tendon>\n    <spatial name=\"tendon1\" width=\"0.05\" rgba=\".95 .3 .3 1\" limited=\"true\" range=\"1.5 3.5\" stiffness=\"0.1\">\n        <site site=\"t1\"/>\n        <site site=\"t2\"/>\n    </spatial>\n  </tendon>-\n  <actuator>\n    <motor gear=\"120\" joint=\"bthigh\" name=\"bthigh\"/>\n    <motor gear=\"90\" joint=\"bshin\" name=\"bshin\"/>\n    <motor gear=\"60\" joint=\"bfoot\" name=\"bfoot\"/>\n    <motor gear=\"120\" joint=\"fthigh\" name=\"fthigh\"/>\n    <motor gear=\"60\" joint=\"fshin\" name=\"fshin\"/>\n    <motor gear=\"30\" joint=\"ffoot\" name=\"ffoot\"/>\n    <motor gear=\"120\" joint=\"bthigh2\" name=\"bthigh2\"/>\n    <motor gear=\"90\" joint=\"bshin2\" name=\"bshin2\"/>\n    <motor gear=\"60\" joint=\"bfoot2\" name=\"bfoot2\"/>\n    <motor gear=\"120\" joint=\"fthigh2\" name=\"fthigh2\"/>\n    <motor gear=\"60\" joint=\"fshin2\" name=\"fshin2\"/>\n    <motor gear=\"30\" joint=\"ffoot2\" name=\"ffoot2\"/>\n    <motor gear=\"120\" joint=\"wall_joint\" name=\"wall_joint_ac\"/>\n    <!--<motor gear=\"120\" joint=\"wall_joint1\" name=\"wall_joint_ac1\"/>-->\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/half_cheetah.xml",
    "content": "<!-- Cheetah Model\n    The state space is populated with joints in the order that they are\n    defined in this file. The actuators also operate on joints.\n    State-Space (name/joint/parameter):\n        - rootx     slider      position (m)\n        - rootz     slider      position (m)\n        - rooty     hinge       angle (rad)\n        - bthigh    hinge       angle (rad)\n        - bshin     hinge       angle (rad)\n        - bfoot     hinge       angle (rad)\n        - fthigh    hinge       angle (rad)\n        - fshin     hinge       angle (rad)\n        - ffoot     hinge       angle (rad)\n        - rootx     slider      velocity (m/s)\n        - rootz     slider      velocity (m/s)\n        - rooty     hinge       angular velocity (rad/s)\n        - bthigh    hinge       angular velocity (rad/s)\n        - bshin     hinge       angular velocity (rad/s)\n        - bfoot     hinge       angular velocity (rad/s)\n        - fthigh    hinge       angular velocity (rad/s)\n        - fshin     hinge       angular velocity (rad/s)\n        - ffoot     hinge       angular velocity (rad/s)\n    Actuators (name/actuator/parameter):\n        - bthigh    hinge       torque (N m)\n        - bshin     hinge       torque (N m)\n        - bfoot     hinge       torque (N m)\n        - fthigh    hinge       torque (N m)\n        - fshin     hinge       torque (N m)\n        - ffoot     hinge       torque (N m)\n-->\n<mujoco model=\"cheetah\">\n  <compiler angle=\"radian\" coordinate=\"local\" inertiafromgeom=\"true\" settotalmass=\"14\"/>\n  <default>\n    <joint armature=\".1\" damping=\".01\" limited=\"true\" solimplimit=\"0 .8 .03\" solreflimit=\".02 1\" stiffness=\"8\"/>\n    <geom conaffinity=\"0\" condim=\"3\" contype=\"1\" friction=\".4 .1 .1\" rgba=\"0.8 0.6 .4 1\" solimp=\"0.0 0.8 0.01\" solref=\"0.02 1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\"/>\n  </default>\n  <size nstack=\"300000\" nuser_geom=\"1\"/>\n  <option gravity=\"0 0 -9.81\" timestep=\"0.01\"/>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"65 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"150 40 40\" type=\"plane\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50 -4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50  4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall7\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"150 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall8\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"150  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n\n<!--    <body name=\"obj1\" pos=\"5 0 .7\">-->\n<!--        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>-->\n<!--          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"5 0 .7\" range=\"-10000 10000\" stiffness=\".0\" type=\"slide\"/>-->\n<!--    </body>-->\n<!--    <body name=\"obj1\" pos=\"-39 0 .7\">-->\n<!--        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>-->\n<!--          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"2 0 .7\" range=\"-30 30\" stiffness=\".0\" type=\"slide\"/>-->\n<!--    </body>-->\n    <body name=\"obj1\" pos=\"5 0 .7\">\n        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>\n          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"5 0 .7\" range=\"-5000 5000\" stiffness=\".0\" type=\"slide\"/>\n    </body>\n\n    <body name=\"torso\" pos=\"0 0 .7\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty\" pos=\"0 0 0\" stiffness=\"0\" type=\"hinge\"/>\n      <geom fromto=\"-.5 0 0 .5 0 0\" name=\"torso\" size=\"0.046\" type=\"capsule\"/>\n      <geom axisangle=\"0 1 0 .87\" name=\"head\" pos=\".6 0 .1\" size=\"0.046 .15\" type=\"capsule\"/>\n      <!-- <site name='tip'  pos='.15 0 .11'/>-->\n      <body name=\"bthigh\" pos=\"-.5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"6\" name=\"bthigh\" pos=\"0 0 0\" range=\"-.52 1.05\" stiffness=\"240\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 -3.8\" name=\"bthigh\" pos=\".1 0 -.13\" size=\"0.046 .145\" type=\"capsule\"/>\n        <body name=\"bshin\" pos=\".16 0 -.25\">\n          <joint axis=\"0 1 0\" damping=\"4.5\" name=\"bshin\" pos=\"0 0 0\" range=\"-.785 .785\" stiffness=\"180\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -2.03\" name=\"bshin\" pos=\"-.14 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .15\" type=\"capsule\"/>\n          <body name=\"bfoot\" pos=\"-.28 0 -.14\">\n            <joint axis=\"0 1 0\" damping=\"3\" name=\"bfoot\" pos=\"0 0 0\" range=\"-.4 .785\" stiffness=\"120\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.27\" name=\"bfoot\" pos=\".03 0 -.097\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .094\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"fthigh\" pos=\".5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"4.5\" name=\"fthigh\" pos=\"0 0 0\" range=\"-1 .7\" stiffness=\"180\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 .52\" name=\"fthigh\" pos=\"-.07 0 -.12\" size=\"0.046 .133\" type=\"capsule\"/>\n        <body name=\"fshin\" pos=\"-.14 0 -.24\">\n          <joint axis=\"0 1 0\" damping=\"3\" name=\"fshin\" pos=\"0 0 0\" range=\"-1.2 .87\" stiffness=\"120\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -.6\" name=\"fshin\" pos=\".065 0 -.09\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .106\" type=\"capsule\"/>\n          <body name=\"ffoot\" pos=\".13 0 -.18\">\n            <joint axis=\"0 1 0\" damping=\"1.5\" name=\"ffoot\" pos=\"0 0 0\" range=\"-.5 .5\" stiffness=\"60\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.6\" name=\"ffoot\" pos=\".045 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .07\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <!-- <equality>\n    <weld name=\"weld1\" body1=\"mocap1\" body2=\"obj1\" solref=\".02 2.5\"/>\n  </equality> -->\n  <actuator>\n    <motor gear=\"120\" joint=\"bthigh\" name=\"bthigh\"/>\n    <motor gear=\"90\" joint=\"bshin\" name=\"bshin\"/>\n    <motor gear=\"60\" joint=\"bfoot\" name=\"bfoot\"/>\n    <motor gear=\"120\" joint=\"fthigh\" name=\"fthigh\"/>\n    <motor gear=\"60\" joint=\"fshin\" name=\"fshin\"/>\n    <motor gear=\"30\" joint=\"ffoot\" name=\"ffoot\"/>\n    <motor gear=\"120\" joint=\"wall_joint\" name=\"wall_joint_ac\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/hopper.xml",
    "content": "<mujoco model=\"hopper\">\n  <compiler angle=\"degree\" coordinate=\"global\" inertiafromgeom=\"true\"/>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" margin=\"0.001\" material=\"geom\" rgba=\"0.8 0.6 .4 1\" solimp=\".8 .8 .01\" solref=\".02 1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-.4 .4\"/>\n  </default>\n  <option integrator=\"RK4\" timestep=\"0.002\"/>\n  <visual>\n    <map znear=\"0.02\"/>\n  </visual>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"floor\" pos=\"40 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"100 25 .125\" type=\"plane\" material=\"MatPlane\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50 -4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50  4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <body name=\"mocap1\" pos=\"5 0 .5\" mocap=\"true\">\n        <geom conaffinity=\"0\" condim=\"3\" name=\"mocap_geom\" pos='5 0 .5' type=\"box\" size=\".3 0.3 0.3\"  rgba=\"1 0.5 0.5 0\"/>\n    </body>\n    <body name=\"obj1\" pos=\"5 0 .5\">\n        <freejoint name=\"obj1_fj\"/>\n        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .5' type=\"box\" size=\".3 0.3 0.3\"  rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name=\"torso\" pos=\"0 1 1.25\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 1\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz\" pos=\"0 0 0\" ref=\"1.25\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty\" pos=\"0 0 1.25\" stiffness=\"0\" type=\"hinge\"/>\n      <geom friction=\"0.9\" fromto=\"0 0 1.45 0 0 1.05\" name=\"torso_geom\" size=\"0.05\" type=\"capsule\"/>\n      <body name=\"thigh\" pos=\"0 0 1.05\">\n        <joint axis=\"0 -1 0\" name=\"thigh_joint\" pos=\"0 0 1.05\" range=\"-150 0\" type=\"hinge\"/>\n        <geom friction=\"0.9\" fromto=\"0 0 1.05 0 0 0.6\" name=\"thigh_geom\" size=\"0.05\" type=\"capsule\"/>\n        <body name=\"leg\" pos=\"0 0 0.35\">\n          <joint axis=\"0 -1 0\" name=\"leg_joint\" pos=\"0 0 0.6\" range=\"-150 0\" type=\"hinge\"/>\n          <geom friction=\"0.9\" fromto=\"0 0 0.6 0 0 0.1\" name=\"leg_geom\" size=\"0.04\" type=\"capsule\"/>\n          <body name=\"foot\" pos=\"0.13/2 0 0.1\">\n            <joint axis=\"0 -1 0\" name=\"foot_joint\" pos=\"0 0 0.1\" range=\"-45 45\" type=\"hinge\"/>\n            <geom friction=\"2.0\" fromto=\"-0.13 0 0.1 0.26 0 0.1\" name=\"foot_geom\" size=\"0.06\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <equality>\n    <weld name=\"weld1\" body1=\"mocap1\" body2=\"obj1\" solref=\".02 .5\"/>\n  </equality>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"thigh_joint\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"leg_joint\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"foot_joint\"/>\n  </actuator>\n    <asset>\n        <texture type=\"skybox\" builtin=\"gradient\" rgb1=\".4 .5 .6\" rgb2=\"0 0 0\"\n            width=\"100\" height=\"100\"/>\n        <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n        <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n        <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n        <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n    </asset>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/humanoid.xml",
    "content": "<mujoco model=\"humanoid\">\n    <compiler angle=\"degree\" inertiafromgeom=\"true\"/>\n    <default>\n        <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n        <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" margin=\"0.001\" material=\"geom\" rgba=\"0.8 0.6 .4 1\"/>\n        <motor ctrllimited=\"true\" ctrlrange=\"-.4 .4\"/>\n    </default>\n    <option integrator=\"RK4\" iterations=\"50\" solver=\"PGS\" timestep=\"0.003\">\n        <!-- <flags solverstat=\"enable\" energy=\"enable\"/>-->\n    </option>\n    <size nkey=\"5\" nuser_geom=\"1\"/>\n    <visual>\n        <map fogend=\"5\" fogstart=\"3\"/>\n    </visual>\n    <asset>\n        <texture builtin=\"gradient\" height=\"100\" rgb1=\".4 .5 .6\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n        <!-- <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>-->\n        <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n        <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n\n        <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n        <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n\n    </asset>\n    <worldbody>\n        <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n        <geom condim=\"3\" friction=\"1 .1 .1\" material=\"MatPlane\" name=\"floor\" pos=\"30 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"70 25 0.125\" type=\"plane\"/>\n\n        <!-- <geom condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 0\" size=\"10 10 0.125\" type=\"plane\"/>-->\n        <body name=\"torso\" pos=\"0 0 1.4\">\n            <camera name=\"track\" mode=\"trackcom\" pos=\"0 -4 0\" xyaxes=\"1 0 0 0 0 1\"/>\n            <joint armature=\"0\" damping=\"0\" limited=\"false\" name=\"root\" pos=\"0 0 0\" stiffness=\"0\" type=\"free\"/>\n            <geom fromto=\"0 -.07 0 0 .07 0\" name=\"torso1\" size=\"0.07\" type=\"capsule\"/>\n            <geom name=\"head\" pos=\"0 0 .19\" size=\".09\" type=\"sphere\" user=\"258\"/>\n            <geom fromto=\"-.01 -.06 -.12 -.01 .06 -.12\" name=\"uwaist\" size=\"0.06\" type=\"capsule\"/>\n            <body name=\"lwaist\" pos=\"-.01 0 -0.260\" quat=\"1.000 0 -0.002 0\">\n                <geom fromto=\"0 -.06 0 0 .06 0\" name=\"lwaist\" size=\"0.06\" type=\"capsule\"/>\n                <joint armature=\"0.02\" axis=\"0 0 1\" damping=\"5\" name=\"abdomen_z\" pos=\"0 0 0.065\" range=\"-45 45\" stiffness=\"20\" type=\"hinge\"/>\n                <joint armature=\"0.02\" axis=\"0 1 0\" damping=\"5\" name=\"abdomen_y\" pos=\"0 0 0.065\" range=\"-75 30\" stiffness=\"10\" type=\"hinge\"/>\n                <body name=\"pelvis\" pos=\"0 0 -0.165\" quat=\"1.000 0 -0.002 0\">\n                    <joint armature=\"0.02\" axis=\"1 0 0\" damping=\"5\" name=\"abdomen_x\" pos=\"0 0 0.1\" range=\"-35 35\" stiffness=\"10\" type=\"hinge\"/>\n                    <geom fromto=\"-.02 -.07 0 -.02 .07 0\" name=\"butt\" size=\"0.09\" type=\"capsule\"/>\n                    <body name=\"right_thigh\" pos=\"0 -0.1 -0.04\">\n                        <joint armature=\"0.01\" axis=\"1 0 0\" damping=\"5\" name=\"right_hip_x\" pos=\"0 0 0\" range=\"-25 5\" stiffness=\"10\" type=\"hinge\"/>\n                        <joint armature=\"0.01\" axis=\"0 0 1\" damping=\"5\" name=\"right_hip_z\" pos=\"0 0 0\" range=\"-60 35\" stiffness=\"10\" type=\"hinge\"/>\n                        <joint armature=\"0.0080\" axis=\"0 1 0\" damping=\"5\" name=\"right_hip_y\" pos=\"0 0 0\" range=\"-110 20\" stiffness=\"20\" type=\"hinge\"/>\n                        <geom fromto=\"0 0 0 0 0.01 -.34\" name=\"right_thigh1\" size=\"0.06\" type=\"capsule\"/>\n                        <body name=\"right_shin\" pos=\"0 0.01 -0.403\">\n                            <joint armature=\"0.0060\" axis=\"0 -1 0\" name=\"right_knee\" pos=\"0 0 .02\" range=\"-160 -2\" type=\"hinge\"/>\n                            <geom fromto=\"0 0 0 0 0 -.3\" name=\"right_shin1\" size=\"0.049\" type=\"capsule\"/>\n                            <body name=\"right_foot\" pos=\"0 0 -0.45\">\n                                <geom name=\"right_foot\" pos=\"0 0 0.1\" size=\"0.075\" type=\"sphere\" user=\"0\"/>\n                            </body>\n                        </body>\n                    </body>\n                    <body name=\"left_thigh\" pos=\"0 0.1 -0.04\">\n                        <joint armature=\"0.01\" axis=\"-1 0 0\" damping=\"5\" name=\"left_hip_x\" pos=\"0 0 0\" range=\"-25 5\" stiffness=\"10\" type=\"hinge\"/>\n                        <joint armature=\"0.01\" axis=\"0 0 -1\" damping=\"5\" name=\"left_hip_z\" pos=\"0 0 0\" range=\"-60 35\" stiffness=\"10\" type=\"hinge\"/>\n                        <joint armature=\"0.01\" axis=\"0 1 0\" damping=\"5\" name=\"left_hip_y\" pos=\"0 0 0\" range=\"-110 20\" stiffness=\"20\" type=\"hinge\"/>\n                        <geom fromto=\"0 0 0 0 -0.01 -.34\" name=\"left_thigh1\" size=\"0.06\" type=\"capsule\"/>\n                        <body name=\"left_shin\" pos=\"0 -0.01 -0.403\">\n                            <joint armature=\"0.0060\" axis=\"0 -1 0\" name=\"left_knee\" pos=\"0 0 .02\" range=\"-160 -2\" stiffness=\"1\" type=\"hinge\"/>\n                            <geom fromto=\"0 0 0 0 0 -.3\" name=\"left_shin1\" size=\"0.049\" type=\"capsule\"/>\n                            <body name=\"left_foot\" pos=\"0 0 -0.45\">\n                                <geom name=\"left_foot\" type=\"sphere\" size=\"0.075\" pos=\"0 0 0.1\" user=\"0\" />\n                            </body>\n                        </body>\n                    </body>\n                </body>\n            </body>\n            <body name=\"right_upper_arm\" pos=\"0 -0.17 0.06\">\n                <joint armature=\"0.0068\" axis=\"2 1 1\" name=\"right_shoulder1\" pos=\"0 0 0\" range=\"-85 60\" stiffness=\"1\" type=\"hinge\"/>\n                <joint armature=\"0.0051\" axis=\"0 -1 1\" name=\"right_shoulder2\" pos=\"0 0 0\" range=\"-85 60\" stiffness=\"1\" type=\"hinge\"/>\n                <geom fromto=\"0 0 0 .16 -.16 -.16\" name=\"right_uarm1\" size=\"0.04 0.16\" type=\"capsule\"/>\n                <body name=\"right_lower_arm\" pos=\".18 -.18 -.18\">\n                    <joint armature=\"0.0028\" axis=\"0 -1 1\" name=\"right_elbow\" pos=\"0 0 0\" range=\"-90 50\" stiffness=\"0\" type=\"hinge\"/>\n                    <geom fromto=\"0.01 0.01 0.01 .17 .17 .17\" name=\"right_larm\" size=\"0.031\" type=\"capsule\"/>\n                    <geom name=\"right_hand\" pos=\".18 .18 .18\" size=\"0.04\" type=\"sphere\"/>\n                    <camera pos=\"0 0 0\"/>\n                </body>\n            </body>\n            <body name=\"left_upper_arm\" pos=\"0 0.17 0.06\">\n                <joint armature=\"0.0068\" axis=\"2 -1 1\" name=\"left_shoulder1\" pos=\"0 0 0\" range=\"-60 85\" stiffness=\"1\" type=\"hinge\"/>\n                <joint armature=\"0.0051\" axis=\"0 1 1\" name=\"left_shoulder2\" pos=\"0 0 0\" range=\"-60 85\" stiffness=\"1\" type=\"hinge\"/>\n                <geom fromto=\"0 0 0 .16 .16 -.16\" name=\"left_uarm1\" size=\"0.04 0.16\" type=\"capsule\"/>\n                <body name=\"left_lower_arm\" pos=\".18 .18 -.18\">\n                    <joint armature=\"0.0028\" axis=\"0 -1 -1\" name=\"left_elbow\" pos=\"0 0 0\" range=\"-90 50\" stiffness=\"0\" type=\"hinge\"/>\n                    <geom fromto=\"0.01 -0.01 0.01 .17 -.17 .17\" name=\"left_larm\" size=\"0.031\" type=\"capsule\"/>\n                    <geom name=\"left_hand\" pos=\".18 -.18 .18\" size=\"0.04\" type=\"sphere\"/>\n                </body>\n            </body>\n        </body>\n        <body name='b1' pos=\"0 2.3 1\" euler='0 0 30'>\n            <freejoint name=\"b1_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b2' pos=\"0 -2.3 1\" euler='0 0 30'>\n            <freejoint name=\"b2_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b3' pos=\"40 2.3 1\" euler='0 0 -30'>\n            <freejoint name=\"b3_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b4' pos=\"40 -2.3 1\" euler='0 0 -30'>\n            <freejoint name=\"b4_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b5' pos=\"80 2.3 1\" euler='0 0 30'>\n            <freejoint name=\"b5_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b6' pos=\"80 -2.3 1\" euler='0 0 30'>\n            <freejoint name=\"b6_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n    </worldbody>\n    <tendon>\n        <fixed name=\"left_hipknee\">\n            <joint coef=\"-1\" joint=\"left_hip_y\"/>\n            <joint coef=\"1\" joint=\"left_knee\"/>\n        </fixed>\n        <fixed name=\"right_hipknee\">\n            <joint coef=\"-1\" joint=\"right_hip_y\"/>\n            <joint coef=\"1\" joint=\"right_knee\"/>\n        </fixed>\n    </tendon>\n\n    <actuator>\n        <motor gear=\"100\" joint=\"abdomen_y\" name=\"abdomen_y\"/>\n        <motor gear=\"100\" joint=\"abdomen_z\" name=\"abdomen_z\"/>\n        <motor gear=\"100\" joint=\"abdomen_x\" name=\"abdomen_x\"/>\n        <motor gear=\"100\" joint=\"right_hip_x\" name=\"right_hip_x\"/>\n        <motor gear=\"100\" joint=\"right_hip_z\" name=\"right_hip_z\"/>\n        <motor gear=\"300\" joint=\"right_hip_y\" name=\"right_hip_y\"/>\n        <motor gear=\"200\" joint=\"right_knee\" name=\"right_knee\"/>\n        <motor gear=\"100\" joint=\"left_hip_x\" name=\"left_hip_x\"/>\n        <motor gear=\"100\" joint=\"left_hip_z\" name=\"left_hip_z\"/>\n        <motor gear=\"300\" joint=\"left_hip_y\" name=\"left_hip_y\"/>\n        <motor gear=\"200\" joint=\"left_knee\" name=\"left_knee\"/>\n        <motor gear=\"25\" joint=\"right_shoulder1\" name=\"right_shoulder1\"/>\n        <motor gear=\"25\" joint=\"right_shoulder2\" name=\"right_shoulder2\"/>\n        <motor gear=\"25\" joint=\"right_elbow\" name=\"right_elbow\"/>\n        <motor gear=\"25\" joint=\"left_shoulder1\" name=\"left_shoulder1\"/>\n        <motor gear=\"25\" joint=\"left_shoulder2\" name=\"left_shoulder2\"/>\n        <motor gear=\"25\" joint=\"left_elbow\" name=\"left_elbow\"/>\n    </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant.xml",
    "content": "<mujoco model=\"ant\">\n  <size nconmax=\"200\"/>\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option integrator=\"RK4\" timestep=\"0.01\"/>\n  <custom>\n    <numeric data=\"0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0\" name=\"init_qpos\"/>\n  </custom>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"0\" condim=\"3\" density=\"5.0\" friction=\"1 0.5 0.5\" margin=\"0.01\" rgba=\"0.8 0.6 0.4 1\"/>\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n\n    <texture builtin=\"checker\" height=\"100\" name=\"texbox\" rgb1=\"#ff66ff\" rgb2=\"#ff66ff\" type=\"2d\" width=\"100\"/>\n    <material name=\"BoxMat\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texbox\"/>\n\n\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n<!--    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 0\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 40\" type=\"plane\"/>-->\n\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"30 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"70 25 40\" type=\"plane\"/>\n\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" material=\"BoxMat\" size=\"0.1 14 1.0\" pos=\"-14  0 1\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" material=\"BoxMat\" size=\"14 .1 1.0\" pos=\"0  14 1\"    rgba=\"#ff66ff\"/>\n\n    <body name=\"torso\" pos=\"0 0 0.75\">\n<!--      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>-->\n\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -10 -10\" xyaxes=\".8 .4 0 0 .4 .6\"/>\n\n      <geom name=\"torso_geom\" pos=\"0 0 0\" size=\"0.25\" type=\"sphere\"/>\n      <joint armature=\"0\" damping=\"0\" limited=\"false\" margin=\"0.01\" name=\"root\" pos=\"0 0 0\" type=\"free\"/>\n      <body name=\"front_left_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"aux_1_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_1\" pos=\"0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_1\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"left_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle_1\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 0.4 0.0\" name=\"left_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"right_back_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"aux_4_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_4\" pos=\"0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_4\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"rightback_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 -0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle_4\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 -0.4 0.0\" name=\"fourth_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"midx\" pos=\"0.0 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <!--<joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>-->\n        <body name=\"front_right_legx\" pos=\"-1 0 0\">\n          <geom fromto=\"0.0 0.0 0.0 0.0 0.2 0.0\" name=\"aux_2_geomx\" size=\"0.08\" type=\"capsule\"/>\n          <body name=\"aux_2x\" pos=\"0.0 0.2 0\">\n            <joint axis=\"0 0 1\" name=\"hip_2x\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geomx\" size=\"0.08\" type=\"capsule\"/>\n            <body pos=\"-0.2 0.2 0\">\n              <joint axis=\"1 1 0\" name=\"ankle_2x\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geomx\" size=\"0.08\" type=\"capsule\"/>\n            </body>\n          </body>\n        </body>\n        <body name=\"back_legx\" pos=\"-1 0 0\">\n          <geom fromto=\"0.0 0.0 0.0 0.0 -0.2 0.0\" name=\"aux_3_geomx\" size=\"0.08\" type=\"capsule\"/>\n          <body name=\"aux_3x\" pos=\"0.0 -0.2 0\">\n            <joint axis=\"0 0 1\" name=\"hip_3x\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geomx\" size=\"0.08\" type=\"capsule\"/>\n            <body pos=\"-0.2 -0.2 0\">\n              <joint axis=\"-1 1 0\" name=\"ankle_3x\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geomx\" size=\"0.08\" type=\"capsule\"/>\n            </body>\n          </body>\n        </body>\n        <body name=\"mid\" pos=\"-1 0 0\">\n          <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n          <!--<joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>-->\n          <!--<body name=\"front_right_leg\" pos=\"-1 0 0\">\n            <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"aux_2_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body name=\"aux_2\" pos=\"-0.2 0.2 0\">\n              <joint axis=\"0 0 1\" name=\"hip_2\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n              <body pos=\"-0.2 0.2 0\">\n                <joint axis=\"1 1 0\" name=\"ankle_2\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n              </body>\n            </body>\n          </body>\n          <body name=\"back_leg\" pos=\"-1 0 0\">\n            <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"aux_3_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body name=\"aux_3\" pos=\"-0.2 -0.2 0\">\n              <joint axis=\"0 0 1\" name=\"hip_3\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n              <body pos=\"-0.2 -0.2 0\">\n                <joint axis=\"-1 1 0\" name=\"ankle_3\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n              </body>\n            </body>\n          </body>-->\n          <body name=\"front_right_leg\" pos=\"-1 0 0\">\n            <geom fromto=\"0.0 0.0 0.0 0.0 0.2 0.0\" name=\"aux_2_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body name=\"aux_2\" pos=\"0.0 0.2 0\">\n              <joint axis=\"0 0 1\" name=\"hip_2\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n              <body pos=\"-0.2 0.2 0\">\n                <joint axis=\"1 1 0\" name=\"ankle_2\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n              </body>\n            </body>\n          </body>\n          <body name=\"back_leg\" pos=\"-1 0 0\">\n            <geom fromto=\"0.0 0.0 0.0 0.0 -0.2 0.0\" name=\"aux_3_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body name=\"aux_3\" pos=\"0.0 -0.2 0\">\n              <joint axis=\"0 0 1\" name=\"hip_3\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n              <body pos=\"-0.2 -0.2 0\">\n                <joint axis=\"-1 1 0\" name=\"ankle_3\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n              </body>\n            </body>\n          </body>\n        </body>\n      </body>\n    </body>\n        <body name='b1' pos=\"0 5 1\" euler='0 0 30'>\n      <freejoint name=\"b1_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b2' pos=\"0 -5 1\" euler='0 0 30'>\n      <freejoint name=\"b2_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b3' pos=\"40 5 1\" euler='0 0 -30'>\n      <freejoint name=\"b3_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b4' pos=\"40 -5 1\" euler='0 0 -30'>\n      <freejoint name=\"b4_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b5' pos=\"80 5 1\" euler='0 0 30'>\n      <freejoint name=\"b5_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b6' pos=\"80 -5 1\" euler='0 0 30'>\n      <freejoint name=\"b6_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_3\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_3\" gear=\"150\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant.xml.template",
    "content": "<mujoco model=\"ant\">\n  <size nconmax=\"200\"/>\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option integrator=\"RK4\" timestep=\"0.005\"/>\n  <custom>\n    <numeric data=\"0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0\" name=\"init_qpos\"/>\n  </custom>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"0\" condim=\"3\" density=\"5.0\" friction=\"1 0.5 0.5\" margin=\"0.01\" rgba=\"0.8 0.6 0.4 1\"/>\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"1.0 1.0 1.0\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texbox\" rgb1=\"#ff66ff\" rgb2=\"#ff66ff\" type=\"2d\" width=\"100\"/>\n    <material name=\"BoxMat\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texbox\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"30 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"70 25 40\" type=\"plane\"/>\n    <body name=\"torso_0\" pos=\"0 0 0.75\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -10 -10\" xyaxes=\".8 .4 0 0 .4 .6\"/>\n      <!--<geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>-->\n      <joint armature=\"0\" damping=\"0\" limited=\"false\" margin=\"0.01\" name=\"root\" pos=\"0 0 0\" type=\"free\"/>\n      <body name=\"front_left_leg_0\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"aux1_geom_0\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux1_0\" pos=\"0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip1_0\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"left_leg_geom_0\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle1_0\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 0.4 0.0\" name=\"left_ankle_geom_0\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"right_back_leg_0\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"aux2_geom_0\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux2_0\" pos=\"0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip2_0\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"rightback_leg_geom_0\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 -0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle2_0\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 -0.4 0.0\" name=\"second_ankle_geom_0\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      {{ body }}\n    </body>\n    <body name='b1' pos=\"0 4.5 1\" euler='0 0 30'>\n      <freejoint name=\"b1_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b2' pos=\"0 -4.5 1\" euler='0 0 30'>\n      <freejoint name=\"b2_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n  </worldbody>\n  <actuator>\n    {{ actuators }}\n  </actuator>\n</mujoco>\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant__stage1.xml",
    "content": "<mujoco model=\"ant\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option integrator=\"RK4\" timestep=\"0.01\"/>\n  <custom>\n    <numeric data=\"0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0\" name=\"init_qpos\"/>\n  </custom>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"0\" condim=\"3\" density=\"5.0\" friction=\"1 0.5 0.5\" margin=\"0.01\" rgba=\"0.8 0.6 0.4 1\"/>\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 0\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 40\" type=\"plane\"/>\n    <body name=\"torso\" pos=\" 0 0.75\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>\n      <!--<geom name=\"torso_geom\" pos=\"0 0 0\" size=\"0.25\" type=\"sphere\"/>-->\n      <joint armature=\"0\" damping=\"0\" limited=\"false\" margin=\"0.01\" name=\"root\" pos=\"0 0 0\" type=\"free\"/>\n      <body name=\"front_left_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"aux_1_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_1\" pos=\"0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_1\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"left_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle_1\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 0.4 0.0\" name=\"left_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"right_back_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"aux_4_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_4\" pos=\"0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_4\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"rightback_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 -0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle_4\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 -0.4 0.0\" name=\"fourth_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"mid\" pos=\"0.0 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        <body name=\"front_right_leg\" pos=\"-1 0 0\">\n          <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"aux_2_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body name=\"aux_2\" pos=\"-0.2 0.2 0\">\n            <joint axis=\"0 0 1\" name=\"hip_2\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body pos=\"-0.2 0.2 0\">\n              <joint axis=\"1 1 0\" name=\"ankle_2\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n            </body>\n          </body>\n        </body>\n        <body name=\"back_leg\" pos=\"-1 0 0\">\n          <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"aux_3_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body name=\"aux_3\" pos=\"-0.2 -0.2 0\">\n            <joint axis=\"0 0 1\" name=\"hip_3\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body pos=\"-0.2 -0.2 0\">\n              <joint axis=\"-1 1 0\" name=\"ankle_3\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n            </body>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_3\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_3\" gear=\"150\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer.xml.template",
    "content": "<mujoco model=\"swimmer\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option collision=\"predefined\" density=\"4000\" integrator=\"RK4\" timestep=\"0.005\" viscosity=\"0.1\"/>\n  <default>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" material=\"geom\" rgba=\"0.8 0.6 .4 1\"/>\n    <joint armature='0.1'  />\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"30 30\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 -0.1\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 0.1\" type=\"plane\"/>\n    <!--  ================= SWIMMER ================= /-->\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -2.3 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   2.3 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n\n    <body name=\"torso\" pos=\"0 0 0\">\n      <geom density=\"1000\" fromto=\"1.5 0 0 0.5 0 0\" size=\"0.1\" type=\"capsule\"/>\n      <joint axis=\"1 0 0\" name=\"slider1\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 1 0\" name=\"slider2\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 0 1\" name=\"rot\" pos=\"0 0 0\" type=\"hinge\"/>\n      <body name=\"mid0\" pos=\"0.5 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <joint axis=\"0 0 1\" limited=\"true\" name=\"rot0\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        {{ body }}\n      </body>\n    </body>\n  </worldbody>\n  <actuator>\n{{ actuators }}\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer__bckp2.xml",
    "content": "<mujoco model=\"swimmer\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option collision=\"predefined\" density=\"4000\" integrator=\"RK4\" timestep=\"0.01\" viscosity=\"0.1\"/>\n  <default>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" material=\"geom\" rgba=\"0.8 0.6 .4 1\"/>\n    <joint armature='0.1'  />\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"30 30\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 -0.1\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 0.1\" type=\"plane\"/>\n    <!--  ================= SWIMMER ================= /-->\n    <body name=\"torso\" pos=\"0 0 0\">\n      <geom density=\"1000\" fromto=\"1.5 0 0 0.5 0 0\" size=\"0.1\" type=\"capsule\"/>\n      <joint axis=\"1 0 0\" name=\"slider1\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 1 0\" name=\"slider2\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 0 1\" name=\"rot\" pos=\"0 0 0\" type=\"hinge\"/>\n      <body name=\"mid1\" pos=\"0.5 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <joint axis=\"0 0 1\" limited=\"true\" name=\"rot0\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        <body name=\"mid2\" pos=\"-1 0 0\">\n          <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n          <joint axis=\"0 0 -1\" limited=\"true\" name=\"rot1\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n          <body name=\"mid3\" pos=\"-1 0 0\">\n            <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n            <joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n            <body name=\"back\" pos=\"-1 0 0\">\n              <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n              <joint axis=\"0 0 1\" limited=\"true\" name=\"rot3\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n            </body>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot0\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot2\"/>\n     <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot3\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer_bckp.xml",
    "content": "<mujoco model=\"swimmer\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option collision=\"predefined\" density=\"4000\" integrator=\"RK4\" timestep=\"0.01\" viscosity=\"0.1\"/>\n  <default>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" material=\"geom\" rgba=\"0.8 0.6 .4 1\"/>\n    <joint armature='0.1'  />\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"30 30\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 -0.1\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 0.1\" type=\"plane\"/>\n    <!--  ================= SWIMMER ================= /-->\n    <body name=\"torso\" pos=\"0 0 0\">\n      <geom density=\"1000\" fromto=\"1.5 0 0 0.5 0 0\" size=\"0.1\" type=\"capsule\"/>\n      <joint axis=\"1 0 0\" name=\"slider1\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 1 0\" name=\"slider2\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 0 1\" name=\"rot\" pos=\"0 0 0\" type=\"hinge\"/>\n      <body name=\"mid1\" pos=\"0.5 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <joint axis=\"0 0 1\" limited=\"true\" name=\"rot0\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        <body name=\"mid2\" pos=\"-1 0 0\">\n          <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n          <joint axis=\"0 0 -1\" limited=\"true\" name=\"rot1\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n          <body name=\"back\" pos=\"-1 0 0\">\n            <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n            <joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot0\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot2\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py",
    "content": "import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\nfrom macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nimport os\nimport mujoco_py as mjp\nfrom gym import error, spaces\n\nclass CoupledHalfCheetah(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        mujoco_env.MujocoEnv.__init__(self, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'coupled_half_cheetah.xml'), 5)\n        utils.EzPickle.__init__(self)\n\n    def step(self, action):\n\n        #ADDED\n        # xposbefore = self.sim.data.qpos[1]\n        # t = self.data.time\n        # wall_act = .02 * np.sin(t / 3) ** 2 - .004\n        # mjp.functions.mj_rnePostConstraint(self.sim.model,\n        #                                    self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        # action_p_wall = np.concatenate((np.squeeze(action), [wall_act]))\n        # self.do_simulation(action_p_wall, self.frame_skip)\n        # xposafter = self.sim.data.qpos[1]\n        # wallpos = self.data.get_geom_xpos(\"obj_geom\")[0]\n        # wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        # xdist = wallpos - xposafter\n        # obj_cost = int(np.abs(xdist) < 2)\n        # if obj_cost > 0:\n        #     self.model.geom_rgba[9] = [1.0, 0, 0, 1.0]\n        # else:\n        #     self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8]\n        # ob = self._get_obs()\n        # reward_ctrl = - 0.1 * np.square(action).sum()\n        # reward_run = (xposafter - xposbefore) / self.dt\n        # reward = reward_ctrl + reward_run\n        # done = False\n\n\n\n\n        # xposbefore1 = self.sim.data.qpos[0]\n        # xposbefore2 = self.sim.data.qpos[len(self.sim.data.qpos) // 2]\n        # print(\"self.sim.data.qpos\", self.sim.data.qpos)\n\n        xposbefore1 = self.get_body_com(\"torso\")[0]\n        xposbefore2 = self.get_body_com(\"torso2\")[0]\n\n        yposbefore1 = self.get_body_com(\"torso\")[1]\n        yposbefore2 = self.get_body_com(\"torso2\")[1]\n\n        # ADDED\n        t = self.data.time\n        wall_act = .02 * np.sin(t / 3) ** 2 - .004\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        action_p_wall = np.concatenate((np.squeeze(action), [wall_act]))\n        # print(\"action_p_wall\", np.array(action_p_wall).shape)\n        # print(\"action\", np.array(action).shape)\n        # print(\"self.frame_skip\", self.frame_skip)\n        self.do_simulation(action_p_wall, self.frame_skip)\n\n        # self.do_simulation(action, self.frame_skip)\n        # xposafter1 = self.sim.data.qpos[0]\n        # xposafter2 = self.sim.data.qpos[len(self.sim.data.qpos)//2]\n        xposafter1 = self.get_body_com(\"torso\")[0]\n        xposafter2 = self.get_body_com(\"torso2\")[0]\n\n        yposafter1 = self.get_body_com(\"torso\")[1]\n        yposafter2 = self.get_body_com(\"torso2\")[1]\n\n        # ADDED\n        wallpos = self.data.get_geom_xpos(\"obj_geom\")[0]\n        # wallpos1 = self.data.get_geom_xpos(\"obj_geom1\")[0]\n        y_wallpos1 = self.data.get_geom_xpos(\"wall1\")[1]\n        y_wallpos2 = self.data.get_geom_xpos(\"wall2\")[1]\n        # print(\"x_wallpos1 = self.data.get_geom_xpos\", x_wallpos1)\n        # print(\"x_wallpos2 = self.data.get_geom_xpos\", x_wallpos2)\n        wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        xdist = np.abs(wallpos - xposafter1)  #+ np.abs(wallpos - xposafter2) #+ (wallpos1 - xposafter1)  + (wallpos1 - xposafter2)\n        obj_cost = 0 # or int(np.abs(wallpos1 - xposafter2) < 5) or int(np.abs(wallpos1 - xposafter2) < 5)\\\n        #\n        if int(np.abs(wallpos - xposafter1) < 5) or int(np.abs(wallpos - xposafter2) < 5) \\\n                or int(np.abs(y_wallpos1 - yposafter1) < 5) or int(np.abs(y_wallpos2 - yposafter2) < 5):\n            obj_cost = 1\n\n        # obj_cost = int(np.abs(xdist) < 5)\n        # print(\"xposbefore1\", xposbefore1)\n        # print(\"xposbefore2\", xposbefore2)\n        # print(\"yposafter1\", yposafter1)\n        # print(\"yposafter2\", yposafter2)\n        # print(\"np.abs(x_wallpos1 - yposafter1)\", np.abs(x_wallpos1 - yposafter1))\n        # print(\"xposafter1\", xposafter1)\n        # print(\"xposafter2\", xposafter2)\n        # print(\"wallpos\", wallpos)\n        # print(\"wallpos1\", wallpos1)\n        # print(\"xdist\", xdist)\n        # print(\"(wallpos1 - xposafter2)\", (wallpos1 - xposafter2))\n        # print(\"(wallpos - xposafter1)\", (wallpos - xposafter1))\n        # print(\"(wallpos - xposafter2)\", (wallpos - xposafter2))\n        if obj_cost > 0:\n            self.model.geom_rgba[9] = [1.0, 0, 0, 1.0]\n        else:\n            self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8]\n        ob = self._get_obs()\n\n        ob = self._get_obs()\n        reward_ctrl1 = - 0.1 * np.square(action[0:len(action)//2]).sum()\n        reward_ctrl2 = - 0.1 * np.square(action[len(action)//2:]).sum()\n        reward_run1 = (xposafter1 - xposbefore1)/self.dt\n        reward_run2 = (xposafter2 - xposbefore2) / self.dt\n        reward = (reward_ctrl1 + reward_ctrl2)/2.0 + (reward_run1 + reward_run2)/2.0\n        done = False\n        return ob, reward, done, dict(cost=obj_cost, reward_run1=reward_run1, reward_ctrl1=reward_ctrl1,\n                                      reward_run2=reward_run2, reward_ctrl2=reward_ctrl2)\n\n    def _get_obs(self):\n\n        #AADED\n        wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004\n        xdist = (self.data.get_geom_xpos(\"obj_geom\")[0] - self.sim.data.qpos[1]) / 10\n\n        return np.concatenate([\n            self.sim.data.qpos.flat[2:],\n            self.sim.data.qvel.flat[1:],\n            [wallvel],\n            [wall_f],\n            np.clip([xdist], -5, 5),\n        ])\n\n        # return np.concatenate([\n        #     self.sim.data.qpos.flat[1:],\n        #     self.sim.data.qvel.flat,\n        # ])\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n        qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.distance = self.model.stat.extent * 0.5\n\n    def get_env_info(self):\n        return {\"episode_limit\": self.episode_limit}\n\n    def _set_action_space(self):\n        bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)\n        low, high = bounds.T\n        low, high = low[:-1], high[:-1]\n        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n        return self.action_space"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/half_cheetah.py",
    "content": "import numpy as np\nfrom gym import utils\n# from mujoco_safety_gym.envs import mujoco_env\n# from gym.envs.mujoco import mujoco_env\nfrom macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nimport mujoco_py as mjp\nfrom gym import error, spaces\n\n\nclass HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        # print(\"half_aaaa\")\n        mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 5)\n        utils.EzPickle.__init__(self)\n\n    def step(self, action):\n        xposbefore = self.sim.data.qpos[1]\n\n        t = self.data.time\n        wall_act = .02 * np.sin(t / 3) ** 2 - .004\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        action_p_wall = np.concatenate((np.squeeze(action), [wall_act]))\n\n        self.do_simulation(action_p_wall, self.frame_skip)\n        xposafter = self.sim.data.qpos[1]\n\n        wallpos = self.data.get_geom_xpos(\"obj_geom\")[0]\n        wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        xdist = wallpos - xposafter\n        # print(\"wallpos\", wallpos)\n        # print(\"xposafter\", xposafter)\n        # print(\"xdist\", xdist)\n        obj_cost = int(np.abs(xdist) < 9)\n        if obj_cost > 0:\n            self.model.geom_rgba[9] = [1.0, 0, 0, 1.0]\n        else:\n            self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8]\n        ob = self._get_obs()\n        reward_ctrl = - 0.1 * np.square(action).sum()\n        reward_run = (xposafter - xposbefore) / self.dt\n        reward = reward_ctrl + reward_run\n        cost = obj_cost\n        # print(\"cost1\", cost)\n        done = False\n        return ob, reward, done, dict(cost=cost, reward_run=reward_run, reward_ctrl=reward_ctrl)\n\n    def _get_obs(self):\n        wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004\n        xdist = (self.data.get_geom_xpos(\"obj_geom\")[0] - self.sim.data.qpos[1]) / 10\n\n        return np.concatenate([\n            self.sim.data.qpos.flat[2:],\n            self.sim.data.qvel.flat[1:],\n            [wallvel],\n            [wall_f],\n            np.clip([xdist], -5, 5),\n        ])\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n        qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.distance = self.model.stat.extent * 0.5\n\n    def _set_action_space(self):\n        bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)\n        low, high = bounds.T\n        low, high = low[:-1], high[:-1]\n        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n        return self.action_space"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/hopper.py",
    "content": "import numpy as np\nfrom macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nfrom gym import utils\nimport mujoco_py as mjp\n\n\nclass HopperEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        mujoco_env.MujocoEnv.__init__(self, 'hopper.xml', 4)\n        utils.EzPickle.__init__(self)\n        self.last_mocx = 5  #### vel readings are super noisy for mocap weld\n\n    def step(self, a):\n        posbefore = self.sim.data.qpos[3]\n        t = self.data.time\n        pos = (t + np.sin(t)) + 3\n        self.data.set_mocap_pos('mocap1', [pos, 0, 0.5])\n\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        self.do_simulation(a, self.frame_skip)\n        posafter, height, ang = self.sim.data.qpos[3:6]\n        alive_bonus = 1.0\n\n        mocapx = self.sim.data.qpos[0]\n        xdist = mocapx - posafter\n        cost = int(np.abs(xdist) < 1)\n\n        reward = (posafter - posbefore) / self.dt\n        reward += alive_bonus\n        reward -= 1e-3 * np.square(a).sum()\n        s = self.state_vector()\n        # done = not (np.isfinite(s).all() and (np.abs(s[5:]) < 100).all() and\n        #             (height > .7) and (abs(ang) < .2))\n\n        done = not (\n                np.isfinite(s).all()\n                and (np.abs(s[2:]) < 100).all()\n                and (height > 0.7)\n                and (abs(ang) < 0.2)\n        )\n        print(\"np.isfinite(s).all()\", np.isfinite(s).all())\n        print(\"np.abs(s[5:])\", (np.abs(s[2:]) < 100).all())\n        print(\"height\", (height > 0.7))\n        print(\"abs(ang) \", (abs(ang) < 0.2))\n\n        ob = self._get_obs()\n        return ob, reward, done, dict(cost=cost)\n\n    def _get_obs(self):\n        x = self.sim.data.qpos[3]\n        mocapx = self.sim.data.qpos[0]\n        mocvel = 1 + np.cos(self.data.time)\n        mocacc = -np.sin(self.data.time)\n        return np.concatenate([\n            self.sim.data.qpos.flat[4:],\n            np.clip(self.sim.data.qvel[3:].flat, -10, 10),\n            [mocvel],\n            [mocacc],\n            [mocapx - x],\n        ])\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq)\n        qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def last_mocap_x(self):\n        return self.last_mocx\n\n    def viewer_setup(self):\n        self.viewer.cam.trackbodyid = 2\n        self.viewer.cam.distance = self.model.stat.extent * 0.75\n        self.viewer.cam.lookat[2] = 1.15\n        self.viewer.cam.elevation = -20"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/humanoid.py",
    "content": "import numpy as np\n# from mujoco_safety_gym.envs import mujoco_env\nfrom macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nfrom gym import utils\nimport mujoco_py as mjp\n\n\ndef mass_center(model, sim):\n    mass = np.expand_dims(model.body_mass, 1)\n    xpos = sim.data.xipos\n    return (np.sum(mass * xpos, 0) / np.sum(mass))[0]\n\n\nclass HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        mujoco_env.MujocoEnv.__init__(self, 'humanoid.xml', 5)\n        utils.EzPickle.__init__(self)\n\n    def _get_obs(self):\n        data = self.sim.data\n        x = data.qpos.flat[0]\n        y = data.qpos.flat[1]\n        if x < 20:\n            y_off = y - x * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 20 and x < 60:\n            y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 60 and x < 100:\n            y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi)\n        else:\n            y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi)\n\n        return np.concatenate([data.qpos.flat[2:-42],\n                               data.qvel.flat[:-36],\n                               [x / 5],\n                               [y_off]])\n\n        # return np.concatenate([data.qpos.flat[2:],\n        #                        data.qvel.flat,\n        #                        data.cinert.flat,\n        #                        data.cvel.flat,\n        #                        data.qfrc_actuator.flat,\n        #                        data.cfrc_ext.flat])\n\n    def step(self, a):\n        pos_before = mass_center(self.model, self.sim)\n        self.do_simulation(a, self.frame_skip)\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        pos_after = mass_center(self.model, self.sim)\n        alive_bonus = 5.0\n        data = self.sim.data\n        lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt\n        quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()\n        quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()\n        quad_impact_cost = min(quad_impact_cost, 10)\n        reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus\n\n        yposafter = self.get_body_com(\"torso\")[1]\n        ywall = np.array([-2.3, 2.3])\n        if pos_after < 20:\n            y_walldist = yposafter - pos_after * np.tan(30 / 360 * 2 * np.pi) + ywall\n        elif pos_after > 20 and pos_after < 60:\n            y_walldist = yposafter + (pos_after - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall\n        elif pos_after > 60 and pos_after < 100:\n            y_walldist = yposafter - (pos_after - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall\n        else:\n            y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall\n        obj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n\n        qpos = self.sim.data.qpos\n        done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0))\n\n        done_cost = done * 1.0\n        cost = np.clip(obj_cost + done_cost, 0, 1)\n\n        return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost,\n                                                   reward_quadctrl=-quad_ctrl_cost,\n                                                   reward_alive=alive_bonus,\n                                                   reward_impact=-quad_impact_cost,\n                                                   cost_obj=obj_cost,\n                                                   cost_done=done_cost,\n                                                   cost=cost,\n                                                   )\n\n    def reset_model(self):\n        c = 0.01\n        # self.set_state(\n        #     self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),\n        #     self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,)\n        # )\n        # return self._get_obs()\n        qpos = self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq)\n        qpos[-42:] = self.init_qpos[-42:]\n        qvel = self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv, )\n        qvel[-36:] = self.init_qvel[-36:]\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.trackbodyid = 1\n        self.viewer.cam.distance = self.model.stat.extent * 1.0\n        self.viewer.cam.lookat[2] = 2.0\n        self.viewer.cam.elevation = -20"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_ant.py",
    "content": "import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\nfrom jinja2 import Template\n\nimport mujoco_py as mjp\n\nimport os\n\nclass ManyAgentAntEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        # Return Flag: Distinguish the mujoco and Wrapper env.\n        self.rflag = 0\n        agent_conf = kwargs.get(\"agent_conf\")\n        n_agents = int(agent_conf.split(\"x\")[0])\n        n_segs_per_agents = int(agent_conf.split(\"x\")[1])\n        n_segs = n_agents * n_segs_per_agents\n\n        # Check whether asset file exists already, otherwise create it\n        asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',\n                                                  'manyagent_ant_{}_agents_each_{}_segments.auto.xml'.format(n_agents,\n                                                                                                                 n_segs_per_agents))\n        # if not os.path.exists(asset_path):\n        # print(\"Auto-Generating Manyagent Ant asset with {} segments at {}.\".format(n_segs, asset_path))\n        self._generate_asset(n_segs=n_segs, asset_path=asset_path)\n\n        #asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p\n        #                          'manyagent_swimmer.xml')\n\n        mujoco_env.MujocoEnv.__init__(self, asset_path, 4)\n        utils.EzPickle.__init__(self)\n\n    def _generate_asset(self, n_segs, asset_path):\n        template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',\n                                                  'manyagent_ant.xml.template')\n        with open(template_path, \"r\") as f:\n            t = Template(f.read())\n        body_str_template = \"\"\"\n        <body name=\"torso_{:d}\" pos=\"-1 0 0\">\n           <!--<joint axis=\"0 1 0\" name=\"nnn_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-1 1\" type=\"hinge\"/>-->\n            <geom density=\"100\" fromto=\"1 0 0 0 0 0\" size=\"0.1\" type=\"capsule\"/>\n            <body name=\"front_right_leg_{:d}\" pos=\"0 0 0\">\n              <geom fromto=\"0.0 0.0 0.0 0.0 0.2 0.0\" name=\"aux1_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n              <body name=\"aux_2_{:d}\" pos=\"0.0 0.2 0\">\n                <joint axis=\"0 0 1\" name=\"hip1_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n                <body pos=\"-0.2 0.2 0\">\n                  <joint axis=\"1 1 0\" name=\"ankle1_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                  <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n                </body>\n              </body>\n            </body>\n            <body name=\"back_leg_{:d}\" pos=\"0 0 0\">\n              <geom fromto=\"0.0 0.0 0.0 0.0 -0.2 0.0\" name=\"aux2_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n              <body name=\"aux2_{:d}\" pos=\"0.0 -0.2 0\">\n                <joint axis=\"0 0 1\" name=\"hip2_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n                <body pos=\"-0.2 -0.2 0\">\n                  <joint axis=\"-1 1 0\" name=\"ankle2_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                  <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n                </body>\n              </body>\n            </body>\n        \"\"\"\n\n        body_close_str_template =\"</body>\\n\"\n        actuator_str_template = \"\"\"\\t     <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip1_{:d}\" gear=\"150\"/>\n                                          <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle1_{:d}\" gear=\"150\"/>\n                                          <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip2_{:d}\" gear=\"150\"/>\n                                          <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle2_{:d}\" gear=\"150\"/>\\n\"\"\"\n\n        body_str = \"\"\n        for i in range(1,n_segs):\n            body_str += body_str_template.format(*([i]*16))\n        body_str += body_close_str_template*(n_segs-1)\n\n        actuator_str = \"\"\n        for i in range(n_segs):\n            actuator_str += actuator_str_template.format(*([i]*8))\n\n        rt = t.render(body=body_str, actuators=actuator_str)\n        with open(asset_path, \"w\") as f:\n            f.write(rt)\n        pass\n\n    def step(self, a):\n        xposbefore = self.get_body_com(\"torso_0\")[0]\n        self.do_simulation(a, self.frame_skip)\n\n        #ADDED\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n\n        xposafter = self.get_body_com(\"torso_0\")[0]\n        forward_reward = (xposafter - xposbefore)/self.dt\n        ctrl_cost = .5 * np.square(a).sum()\n        contact_cost = 0.5 * 1e-3 * np.sum(\n            np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n        survive_reward = 1.0\n\n        ### ADDED safety stuff\n        yposafter = self.get_body_com(\"torso_0\")[1]\n        ywall = np.array([-4.5, 4.5])\n        if xposafter < 20:\n            y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall\n        elif xposafter>20 and xposafter<60:\n            y_walldist = yposafter + (xposafter-40)*np.tan(30/360*2*np.pi) - ywall\n        elif xposafter>60 and xposafter<100:\n            y_walldist = yposafter - (xposafter-80)*np.tan(30/360*2*np.pi) + ywall\n        else:\n            y_walldist = yposafter - 20*np.tan(30/360*2*np.pi) + ywall\n        obj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n\n        reward = forward_reward - ctrl_cost - contact_cost + survive_reward\n\n        #### ADDED\n        body_quat = self.data.get_body_xquat('torso_0')\n        z_rot = 1-2*(body_quat[1]**2+body_quat[2]**2)  ### normally xx-rotation, not sure what axes mujoco uses\n\n        state = self.state_vector()\n        notdone = np.isfinite(state).all() \\\n            and state[2] >= 0.2 and state[2] <= 1.0\\\n            and z_rot>=-0.7 #ADDED\n\n        done = not notdone\n        # print(\"done\", done)\n        \n        #ADDED\n        done_cost = done * 1.0\n        cost = np.clip(obj_cost + done_cost, 0, 1)\n        # print(\"reward\", reward)\n        # print(\"cost-manyagent_ant.py\",cost)\n        ob = self._get_obs()\n        if self.rflag == 0:\n            self.rflag += 1\n            return ob, reward, done, dict(\n                cost=cost,\n                reward_forward=forward_reward, #\n                reward_ctrl=-ctrl_cost,\n                reward_contact=-contact_cost,\n                reward_survive=survive_reward,\n                cost_obj=obj_cost,  # ADDED\n                cost_done=done_cost,  # ADDED\n            )\n        else:\n            return ob, reward, done, dict(\n                cost=cost,\n                reward_forward=forward_reward, # cost = cost,\n                reward_ctrl=-ctrl_cost,\n                reward_contact=-contact_cost,\n                reward_survive=survive_reward,\n                cost_obj=obj_cost, #ADDED\n                cost_done=done_cost, #ADDED\n            )\n\n    def _get_obs(self):\n        x = self.sim.data.qpos.flat[0] #ADDED\n        y = self.sim.data.qpos.flat[1] #ADDED\n\n        #ADDED\n        if x<20:\n            y_off = y - x*np.tan(30/360*2*np.pi)\n        elif x>20 and x<60:\n            y_off = y + (x-40)*np.tan(30/360*2*np.pi)\n        elif x>60 and x<100:\n            y_off = y - (x-80)*np.tan(30/360*2*np.pi)\n        else:\n            y_off = y - 20*np.tan(30/360*2*np.pi)\n        # return np.concatenate([\n        #     self.sim.data.qpos.flat[2:],\n        #     self.sim.data.qvel.flat,\n        #     # np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\n        # ])\n        return np.concatenate([\n            self.sim.data.qpos.flat[2:-42], # size = 3\n            self.sim.data.qvel.flat[:-36], # size = 6\n            [x/5],\n            [y_off],\n            # np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\n        ])\n\n    # def reset_model(self):\n    #     qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n    #     qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n    #     self.set_state(qpos, qvel)\n    #     return self._get_obs()\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n        qpos[-42:] = self.init_qpos[-42:]\n        qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n        qvel[-36:] = self.init_qvel[-36:]\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.distance = self.model.stat.extent * 0.5\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_swimmer.py",
    "content": "import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\n\nimport os\nfrom jinja2 import Template\nimport mujoco_py as mjp\n\nclass ManyAgentSwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        agent_conf = kwargs.get(\"agent_conf\")\n        n_agents = int(agent_conf.split(\"x\")[0])\n        n_segs_per_agents = int(agent_conf.split(\"x\")[1])\n        n_segs = n_agents * n_segs_per_agents\n\n        # Check whether asset file exists already, otherwise create it\n        asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',\n                                                  'manyagent_swimmer_{}_agents_each_{}_segments.auto.xml'.format(n_agents,\n                                                                                                                 n_segs_per_agents))\n        # if not os.path.exists(asset_path):\n        print(\"Auto-Generating Manyagent Swimmer asset with {} segments at {}.\".format(n_segs, asset_path))\n        self._generate_asset(n_segs=n_segs, asset_path=asset_path)\n\n        #asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p\n        #                          'manyagent_swimmer.xml')\n\n        mujoco_env.MujocoEnv.__init__(self, asset_path, 4)\n        utils.EzPickle.__init__(self)\n\n    def _generate_asset(self, n_segs, asset_path):\n        template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',\n                                                  'manyagent_swimmer.xml.template')\n        with open(template_path, \"r\") as f:\n            t = Template(f.read())\n        body_str_template = \"\"\"\n        <body name=\"mid{:d}\" pos=\"-1 0 0\">\n          <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n          <joint axis=\"0 0 {:d}\" limited=\"true\" name=\"rot{:d}\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        \"\"\"\n\n        body_end_str_template = \"\"\"\n        <body name=\"back\" pos=\"-1 0 0\">\n            <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n            <joint axis=\"0 0 1\" limited=\"true\" name=\"rot{:d}\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n          </body>\n        \"\"\"\n\n        body_close_str_template =\"</body>\\n\"\n        actuator_str_template = \"\"\"\\t <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot{:d}\"/>\\n\"\"\"\n\n        body_str = \"\"\n        for i in range(1,n_segs-1):\n            body_str += body_str_template.format(i, (-1)**(i+1), i)\n        body_str += body_end_str_template.format(n_segs-1)\n        body_str += body_close_str_template*(n_segs-2)\n\n        actuator_str = \"\"\n        for i in range(n_segs):\n            actuator_str += actuator_str_template.format(i)\n\n        rt = t.render(body=body_str, actuators=actuator_str)\n        with open(asset_path, \"w\") as f:\n            f.write(rt)\n        pass\n\n    def step(self, a):\n\n        # ctrl_cost_coeff = 0.0001\n        # xposbefore = self.sim.data.qpos[0]\n        # self.do_simulation(a, self.frame_skip)\n        # xposafter = self.sim.data.qpos[0]\n        # reward_fwd = (xposafter - xposbefore) / self.dt\n        # reward_ctrl = -ctrl_cost_coeff * np.square(a).sum()\n        # reward = reward_fwd + reward_ctrl\n\n        ctrl_cost_coeff = 0.0001\n        xposbefore = self.sim.data.qpos[0]\n        # yposbefore = self.sim.data.qpos[1]\n        self.do_simulation(a, self.frame_skip)\n        # ADDED\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)\n        xposafter = self.sim.data.qpos[0]\n        # yposbefore = self.sim.data.qpos[1]\n        y_wallpos1 = self.data.get_geom_xpos(\"wall1\")[1]\n        y_wallpos2 = self.data.get_geom_xpos(\"wall2\")[1]\n        reward_fwd = (xposafter - xposbefore) / self.dt\n        reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()\n        reward = reward_fwd + reward_ctrl\n        ### ADDED safety stuff\n        yposafter = self.get_body_com(\"torso\")[1]\n        ywall = np.array([-2.3, 2.3])\n        if xposafter < 20:\n            y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall\n        elif xposafter > 20 and xposafter < 60:\n            y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall\n        elif xposafter > 60 and xposafter < 100:\n            y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall\n        else:\n            y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall\n        obj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n\n        # print(\"y_wallpos1-yposafter\", y_wallpos1-yposafter)\n        # print(\"y_wallpos2-yposafter\", y_wallpos2-yposafter)\n        #### ADDED\n        # body_quat = self.data.get_body_xquat('torso')\n        # z_rot = 1 - 2 * (\n        #             body_quat[1] ** 2 + body_quat[2] ** 2)  ### normally xx-rotation, not sure what axes mujoco uses\n        #\n        # state = self.state_vector()\n\n        done  = False\n\n        # ADDED\n        # print(\"y_walldist\", y_walldist)\n        # print(\"obj_cost\", obj_cost)\n        # print(\"done_cost\", done_cost)\n        cost = np.clip(obj_cost, 0, 1)\n        #cost = obj_cost\n        ob = self._get_obs()\n        return ob, reward, done, dict(cost=cost, reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)\n\n    def _get_obs(self):\n        qpos = self.sim.data.qpos\n        qvel = self.sim.data.qvel\n\n        #ADDED\n        x = self.sim.data.qpos.flat[0]  # ADDED\n        y = self.sim.data.qpos.flat[1]  # ADDED\n\n        # ADDED\n        if x < 20:\n            y_off = y - x * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 20 and x < 60:\n            y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 60 and x < 100:\n            y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi)\n        else:\n            y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi)\n\n        return np.concatenate([qpos.flat[2:], qvel.flat, [x/5],\n            [y_off]])\n\n    def reset_model(self):\n        self.set_state(\n            self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),\n            self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)\n        )\n        return self._get_obs()"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_env.py",
    "content": "from collections import OrderedDict\nimport os\n\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\n\ntry:\n    import mujoco_py\nexcept ImportError as e:\n    raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nDEFAULT_SIZE = 500\n\n\ndef convert_observation_to_space(observation):\n    if isinstance(observation, dict):\n        space = spaces.Dict(OrderedDict([\n            (key, convert_observation_to_space(value))\n            for key, value in observation.items()\n        ]))\n    elif isinstance(observation, np.ndarray):\n        low = np.full(observation.shape, -float('inf'), dtype=np.float32)\n        high = np.full(observation.shape, float('inf'), dtype=np.float32)\n        space = spaces.Box(low, high, dtype=observation.dtype)\n    else:\n        raise NotImplementedError(type(observation), observation)\n\n    return space\n\n\nclass MujocoEnv(gym.Env):\n    \"\"\"Superclass for all MuJoCo environments.\n    \"\"\"\n\n    def __init__(self, model_path, frame_skip):\n        if model_path.startswith(\"/\"):\n            fullpath = model_path\n        else:\n            fullpath = os.path.join(os.path.dirname(__file__), \"./assets\", model_path)\n        if not path.exists(fullpath):\n            raise IOError(\"File %s does not exist\" % fullpath)\n        self.frame_skip = frame_skip\n        self.model = mujoco_py.load_model_from_path(fullpath)\n        self.sim = mujoco_py.MjSim(self.model)\n        self.data = self.sim.data\n        self.viewer = None\n        self._viewers = {}\n\n        self.metadata = {\n            'render.modes': ['human', 'rgb_array', 'depth_array'],\n            'video.frames_per_second': int(np.round(1.0 / self.dt))\n        }\n\n        self.init_qpos = self.sim.data.qpos.ravel().copy()\n        self.init_qvel = self.sim.data.qvel.ravel().copy()\n\n        self._set_action_space()\n\n        action = self.action_space.sample()\n        observation, _reward, done, _info = self.step(action)\n        # assert not done\n\n        self._set_observation_space(observation)\n\n        self.seed()\n\n    def _set_action_space(self):\n        bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)\n        low, high = bounds.T\n        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n        return self.action_space\n\n    def _set_observation_space(self, observation):\n        self.observation_space = convert_observation_to_space(observation)\n        return self.observation_space\n\n    def seed(self, seed=None):\n        self.np_random, seed = seeding.np_random(seed)\n        return [seed]\n\n    # methods to override:\n    # ----------------------------\n\n    def reset_model(self):\n        \"\"\"\n        Reset the robot degrees of freedom (qpos and qvel).\n        Implement this in each subclass.\n        \"\"\"\n        raise NotImplementedError\n\n    def viewer_setup(self):\n        \"\"\"\n        This method is called when the viewer is initialized.\n        Optionally implement this method, if you need to tinker with camera position\n        and so forth.\n        \"\"\"\n        pass\n\n    # -----------------------------\n\n    def reset(self):\n        self.sim.reset()\n        ob = self.reset_model()\n        return ob\n\n    def set_state(self, qpos, qvel):\n        assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n        old_state = self.sim.get_state()\n        new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n                                         old_state.act, old_state.udd_state)\n        self.sim.set_state(new_state)\n        self.sim.forward()\n\n    @property\n    def dt(self):\n        return self.model.opt.timestep * self.frame_skip\n\n    def do_simulation(self, ctrl, n_frames):\n        self.sim.data.ctrl[:] = ctrl\n        for _ in range(n_frames):\n            self.sim.step()\n\n    def render(self,\n               mode='human',\n               width=DEFAULT_SIZE,\n               height=DEFAULT_SIZE,\n               camera_id=None,\n               camera_name=None):\n        if mode == 'rgb_array':\n            if camera_id is not None and camera_name is not None:\n                raise ValueError(\"Both `camera_id` and `camera_name` cannot be\"\n                                 \" specified at the same time.\")\n\n            no_camera_specified = camera_name is None and camera_id is None\n            if no_camera_specified:\n                camera_name = 'track'\n\n            if camera_id is None and camera_name in self.model._camera_name2id:\n                camera_id = self.model.camera_name2id(camera_name)\n\n            self._get_viewer(mode).render(width, height, camera_id=camera_id)\n            # window size used for old mujoco-py:\n            data = self._get_viewer(mode).read_pixels(width, height, depth=False)\n            # original image is upside-down, so flip it\n            return data[::-1, :, :]\n        elif mode == 'depth_array':\n            self._get_viewer(mode).render(width, height)\n            # window size used for old mujoco-py:\n            # Extract depth part of the read_pixels() tuple\n            data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1]\n            # original image is upside-down, so flip it\n            return data[::-1, :]\n        elif mode == 'human':\n            self._get_viewer(mode).render()\n\n    def close(self):\n        if self.viewer is not None:\n            # self.viewer.finish()\n            self.viewer = None\n            self._viewers = {}\n\n    def _get_viewer(self, mode):\n        self.viewer = self._viewers.get(mode)\n        if self.viewer is None:\n            if mode == 'human':\n                self.viewer = mujoco_py.MjViewer(self.sim)\n            elif mode == 'rgb_array' or mode == 'depth_array':\n                self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)\n\n            self.viewer_setup()\n            self._viewers[mode] = self.viewer\n        return self.viewer\n\n    def get_body_com(self, body_name):\n        return self.data.get_body_xpos(body_name)\n\n    def state_vector(self):\n        return np.concatenate([\n            self.sim.data.qpos.flat,\n            self.sim.data.qvel.flat\n        ])\n\n    def place_random_objects(self):\n        for i in range(9):\n            random_color_array = np.append(np.random.uniform(0, 1, size=3), 1)\n            random_pos_array = np.append(np.random.uniform(-10., 10., size=2), 0.5)\n            site_id = self.sim.model.geom_name2id('obj' + str(i))\n            self.sim.model.geom_rgba[site_id] = random_color_array\n            self.sim.model.geom_pos[site_id] = random_pos_array\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_multi.py",
    "content": "from functools import partial\nimport gym\nfrom gym.spaces import Box\nfrom gym.wrappers import TimeLimit\nimport numpy as np\n\nfrom .multiagentenv import MultiAgentEnv\nfrom .manyagent_ant import ManyAgentAntEnv\nfrom .manyagent_swimmer import ManyAgentSwimmerEnv\nfrom .obsk import get_joints_at_kdist, get_parts_and_edges, build_obs\n\n\ndef env_fn(env, **kwargs) -> MultiAgentEnv:  # TODO: this may be a more complex function\n    # env_args = kwargs.get(\"env_args\", {})\n    return env(**kwargs)\n\n\n# env_REGISTRY = {}\n# env_REGISTRY[\"manyagent_ant\"] = partial(env_fn, env=ManyAgentAntEnv)\n#\n# env_REGISTRY = {}\n# env_REGISTRY[\"manyagent_swimmer\"] = partial(env_fn, env=ManyAgentSwimmerEnv)\n\n\n# using code from https://github.com/ikostrikov/pytorch-ddpg-naf\nclass NormalizedActions(gym.ActionWrapper):\n\n    def _action(self, action):\n        action = (action + 1) / 2\n        action *= (self.action_space.high - self.action_space.low)\n        action += self.action_space.low\n        return action\n\n    def action(self, action_):\n        return self._action(action_)\n\n    def _reverse_action(self, action):\n        action -= self.action_space.low\n        action /= (self.action_space.high - self.action_space.low)\n        action = action * 2 - 1\n        return action\n\n\nclass MujocoMulti(MultiAgentEnv):\n\n    def __init__(self, batch_size=None, **kwargs):\n        super().__init__(batch_size, **kwargs)\n        self.scenario = kwargs[\"env_args\"][\"scenario\"]  # e.g. Ant-v2\n        self.agent_conf = kwargs[\"env_args\"][\"agent_conf\"]  # e.g. '2x3'\n\n        self.agent_partitions, self.mujoco_edges, self.mujoco_globals = get_parts_and_edges(self.scenario,\n                                                                                            self.agent_conf)\n\n        self.n_agents = len(self.agent_partitions)\n        self.n_actions = max([len(l) for l in self.agent_partitions])\n        self.obs_add_global_pos = kwargs[\"env_args\"].get(\"obs_add_global_pos\", False)\n\n        self.agent_obsk = kwargs[\"env_args\"].get(\"agent_obsk\",\n                                                 None)  # if None, fully observable else k>=0 implies observe nearest k agents or joints\n        self.agent_obsk_agents = kwargs[\"env_args\"].get(\"agent_obsk_agents\",\n                                                        False)  # observe full k nearest agents (True) or just single joints (False)\n\n        if self.agent_obsk is not None:\n            # print(\"this is agent_obsk\")\n            self.k_categories_label = kwargs[\"env_args\"].get(\"k_categories\")\n            if self.k_categories_label is None:\n                if self.scenario in [\"Ant-v2\", \"manyagent_ant\"]:\n                    self.k_categories_label = \"qpos,qvel,cfrc_ext|qpos\"\n                    # print(\"this is agent_obsk --- ant\")\n                elif self.scenario in [\"Swimmer-v2\", \"manyagent_swimmer\"]:\n                    self.k_categories_label = \"qpos,qvel|qpos\"\n                    # print(\"this is agent_obsk --- swimmer\")\n                elif self.scenario in [\"Humanoid-v2\", \"HumanoidStandup-v2\"]:\n                    self.k_categories_label = \"qpos,qvel,cfrc_ext,cvel,cinert,qfrc_actuator|qpos\"\n                elif self.scenario in [\"Reacher-v2\"]:\n                    self.k_categories_label = \"qpos,qvel,fingertip_dist|qpos\"\n                elif self.scenario in [\"coupled_half_cheetah\"]:\n                    self.k_categories_label = \"qpos,qvel,ten_J,ten_length,ten_velocity|\"\n                else:\n                    self.k_categories_label = \"qpos,qvel|qpos\"\n\n            k_split = self.k_categories_label.split(\"|\")\n            self.k_categories = [k_split[k if k < len(k_split) else -1].split(\",\") for k in range(self.agent_obsk + 1)]\n\n            self.global_categories_label = kwargs[\"env_args\"].get(\"global_categories\")\n            self.global_categories = self.global_categories_label.split(\n                \",\") if self.global_categories_label is not None else []\n\n        if self.agent_obsk is not None:\n            self.k_dicts = [get_joints_at_kdist(agent_id,\n                                                self.agent_partitions,\n                                                self.mujoco_edges,\n                                                k=self.agent_obsk,\n                                                kagents=False, ) for agent_id in range(self.n_agents)]\n\n        # load scenario from script\n        self.episode_limit = self.args.episode_limit\n\n        self.env_version = kwargs[\"env_args\"].get(\"env_version\", 2)\n        if self.env_version == 2:\n            if self.scenario in [\"manyagent_ant\"]:\n                from .manyagent_ant import ManyAgentAntEnv as this_env\n            elif self.scenario in [\"manyagent_swimmer\"]:\n                from .manyagent_swimmer import ManyAgentSwimmerEnv as this_env\n            elif self.scenario in [\"coupled_half_cheetah\"]:\n                from .coupled_half_cheetah import CoupledHalfCheetah as this_env\n            elif self.scenario in [\"HalfCheetah-v2\"]:\n                from .half_cheetah import HalfCheetahEnv as this_env\n                # print(\"HalfCheetahEnv1111\") Hopper-v2 #\n            elif self.scenario in [\"Hopper-v2\"]:\n                from .hopper import HopperEnv as this_env\n                # print(\"Hopper-v2\")\n            elif self.scenario in [\"Humanoid-v2\"]:\n                from .humanoid import HumanoidEnv as this_env\n                # print(\"Hopper-v2\")\n            elif self.scenario in [\"Ant-v2\"]:\n                from .ant import AntEnv as this_env\n            else:\n                raise NotImplementedError('Custom env not implemented!')\n            # print(\"self.scenario\", self.scenario)\n            # aaa= this_env(**kwargs[\"env_args\"])\n            # print(\"aaa\", aaa)\n            self.wrapped_env = NormalizedActions(\n                TimeLimit(this_env(**kwargs[\"env_args\"]), max_episode_steps=self.episode_limit))\n            # try:\n            #     self.wrapped_env = NormalizedActions(gym.make(self.scenario))\n            #     print(\"this managent1\")\n            # except gym.error.Error:\n            #     if self.scenario in [\"manyagent_ant\"]:\n            #         from .manyagent_ant import ManyAgentAntEnv as this_env\n            #     elif self.scenario in [\"manyagent_swimmer\"]:\n            #         from .manyagent_swimmer import ManyAgentSwimmerEnv as this_env\n            #     elif self.scenario in [\"coupled_half_cheetah\"]:\n            #         from .coupled_half_cheetah import CoupledHalfCheetah as this_env\n            #     elif self.scenario in [\"HalfCheetah-v2\"]:\n            #         from .half_cheetah import HalfCheetahEnv as this_env\n            #         print(\"HalfCheetahEnv1111\")\n            #     else:\n            #         raise NotImplementedError('Custom env not implemented!')\n            #     self.wrapped_env = NormalizedActions(\n            #         TimeLimit(this_env(**kwargs[\"env_args\"]), max_episode_steps=self.episode_limit))\n                # if self.scenario == \"manyagent_swimmer\":\n                #     env_REGISTRY = {}\n                #     env_REGISTRY[\"manyagent_swimmer\"] = partial(env_fn, env=ManyAgentSwimmerEnv)\n                #     print(\"this is swimmer 2\")\n                # elif self.scenario == \"manyagent_ant\":\n                #     env_REGISTRY = {}\n                #     env_REGISTRY[\"manyagent_ant\"] = partial(env_fn, env=ManyAgentAntEnv)\n                #     print(\"this managent2\")\n                # self.wrapped_env = NormalizedActions(\n                #     TimeLimit(partial(env_REGISTRY[self.scenario], **kwargs[\"env_args\"])(),\n                #               max_episode_steps=self.episode_limit))\n        else:\n            assert False, \"not implemented!\"\n        self.timelimit_env = self.wrapped_env.env\n        self.timelimit_env._max_episode_steps = self.episode_limit\n        self.env = self.timelimit_env.env\n        self.timelimit_env.reset()\n        self.obs_size = self.get_obs_size()\n        self.share_obs_size = self.get_state_size()\n\n        # COMPATIBILITY\n        self.n = self.n_agents\n        # self.observation_space = [Box(low=np.array([-10]*self.n_agents), high=np.array([10]*self.n_agents)) for _ in range(self.n_agents)]\n        self.observation_space = [Box(low=-10, high=10, shape=(self.obs_size,)) for _ in range(self.n_agents)]\n        self.share_observation_space = [Box(low=-10, high=10, shape=(self.share_obs_size,)) for _ in\n                                        range(self.n_agents)]\n\n        acdims = [len(ap) for ap in self.agent_partitions]\n        self.action_space = tuple([Box(self.env.action_space.low[sum(acdims[:a]):sum(acdims[:a + 1])],\n                                       self.env.action_space.high[sum(acdims[:a]):sum(acdims[:a + 1])]) for a in\n                                   range(self.n_agents)])\n\n        pass\n\n    def step(self, actions):\n\n        # need to remove dummy actions that arise due to unequal action vector sizes across agents\n        flat_actions = np.concatenate([actions[i][:self.action_space[i].low.shape[0]] for i in range(self.n_agents)])\n        obs_n, reward_n, done_n, info_n = self.wrapped_env.step(flat_actions)\n        self.steps += 1\n\n        info = {}\n        info.update(info_n)\n\n        # if done_n:\n        #     if self.steps < self.episode_limit:\n        #         info[\"episode_limit\"] = False   # the next state will be masked out\n        #     else:\n        #         info[\"episode_limit\"] = True    # the next state will not be masked out\n        if done_n:\n            if self.steps < self.episode_limit:\n                info[\"bad_transition\"] = False  # the next state will be masked out\n            else:\n                info[\"bad_transition\"] = True  # the next state will not be masked out\n\n        # return reward_n, done_n, info\n        rewards = [[reward_n]] * self.n_agents\n        # print(\"self.n_agents\", self.n_agents)\n        info[\"cost\"] = [[info[\"cost\"]]] * self.n_agents\n        dones = [done_n] * self.n_agents\n        infos = [info for _ in range(self.n_agents)]\n        return self.get_obs(), self.get_state(), rewards, dones, infos, self.get_avail_actions()\n\n    def get_obs(self):\n        \"\"\" Returns all agent observat3ions in a list \"\"\"\n        state = self.env._get_obs()\n        obs_n = []\n        for a in range(self.n_agents):\n            agent_id_feats = np.zeros(self.n_agents, dtype=np.float32)\n            agent_id_feats[a] = 1.0\n            # obs_n.append(self.get_obs_agent(a))\n            # obs_n.append(np.concatenate([state, self.get_obs_agent(a), agent_id_feats]))\n            # obs_n.append(np.concatenate([self.get_obs_agent(a), agent_id_feats]))\n            obs_i = np.concatenate([state, agent_id_feats])\n            obs_i = (obs_i - np.mean(obs_i)) / np.std(obs_i)\n            obs_n.append(obs_i)\n        return obs_n\n\n    def get_obs_agent(self, agent_id):\n        if self.agent_obsk is None:\n            return self.env._get_obs()\n        else:\n            # return build_obs(self.env,\n            #                       self.k_dicts[agent_id],\n            #                       self.k_categories,\n            #                       self.mujoco_globals,\n            #                       self.global_categories,\n            #                       vec_len=getattr(self, \"obs_size\", None))\n            return build_obs(self.env,\n                             self.k_dicts[agent_id],\n                             self.k_categories,\n                             self.mujoco_globals,\n                             self.global_categories)\n\n    def get_obs_size(self):\n        \"\"\" Returns the shape of the observation \"\"\"\n        if self.agent_obsk is None:\n            return self.get_obs_agent(0).size\n        else:\n            return len(self.get_obs()[0])\n            # return max([len(self.get_obs_agent(agent_id)) for agent_id in range(self.n_agents)])\n\n    def get_state(self, team=None):\n        # TODO: May want global states for different teams (so cannot see what the other team is communicating e.g.)\n        state = self.env._get_obs()\n        share_obs = []\n        for a in range(self.n_agents):\n            agent_id_feats = np.zeros(self.n_agents, dtype=np.float32)\n            agent_id_feats[a] = 1.0\n            # share_obs.append(np.concatenate([state, self.get_obs_agent(a), agent_id_feats]))\n            state_i = np.concatenate([state, agent_id_feats])\n            state_i = (state_i - np.mean(state_i)) / np.std(state_i)\n            share_obs.append(state_i)\n        return share_obs\n\n    def get_state_size(self):\n        \"\"\" Returns the shape of the state\"\"\"\n        return len(self.get_state()[0])\n\n    def get_avail_actions(self):  # all actions are always available\n        return np.ones(shape=(self.n_agents, self.n_actions,))\n\n    def get_avail_agent_actions(self, agent_id):\n        \"\"\" Returns the available actions for agent_id \"\"\"\n        return np.ones(shape=(self.n_actions,))\n\n    def get_total_actions(self):\n        \"\"\" Returns the total number of actions an agent could ever take \"\"\"\n        return self.n_actions  # CAREFUL! - for continuous dims, this is action space dim rather\n        # return self.env.action_space.shape[0]\n\n    def get_stats(self):\n        return {}\n\n    # TODO: Temp hack\n    def get_agg_stats(self, stats):\n        return {}\n\n    def reset(self, **kwargs):\n        \"\"\" Returns initial observations and states\"\"\"\n        self.steps = 0\n        self.timelimit_env.reset()\n        return self.get_obs(), self.get_state(), self.get_avail_actions()\n\n    def render(self, **kwargs):\n        self.env.render(**kwargs)\n\n    def close(self):\n        pass\n\n    def seed(self, args):\n        pass\n\n    def get_env_info(self):\n\n        env_info = {\"state_shape\": self.get_state_size(),\n                    \"obs_shape\": self.get_obs_size(),\n                    \"n_actions\": self.get_total_actions(),\n                    \"n_agents\": self.n_agents,\n                    \"episode_limit\": self.episode_limit,\n                    \"action_spaces\": self.action_space,\n                    \"actions_dtype\": np.float32,\n                    \"normalise_actions\": False\n                    }\n        return env_info\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/multiagentenv.py",
    "content": "from collections import namedtuple\nimport numpy as np\n\n\ndef convert(dictionary):\n    return namedtuple('GenericDict', dictionary.keys())(**dictionary)\n\nclass MultiAgentEnv(object):\n\n    def __init__(self, batch_size=None, **kwargs):\n        # Unpack arguments from sacred\n        args = kwargs[\"env_args\"]\n        if isinstance(args, dict):\n            args = convert(args)\n        self.args = args\n\n        if getattr(args, \"seed\", None) is not None:\n            self.seed = args.seed\n            self.rs = np.random.RandomState(self.seed) # initialise numpy random state\n\n    def step(self, actions):\n        \"\"\" Returns reward, terminated, info \"\"\"\n        raise NotImplementedError\n\n    def get_obs(self):\n        \"\"\" Returns all agent observations in a list \"\"\"\n        raise NotImplementedError\n\n    def get_obs_agent(self, agent_id):\n        \"\"\" Returns observation for agent_id \"\"\"\n        raise NotImplementedError\n\n    def get_obs_size(self):\n        \"\"\" Returns the shape of the observation \"\"\"\n        raise NotImplementedError\n\n    def get_state(self):\n        raise NotImplementedError\n\n    def get_state_size(self):\n        \"\"\" Returns the shape of the state\"\"\"\n        raise NotImplementedError\n\n    def get_avail_actions(self):\n        raise NotImplementedError\n\n    def get_avail_agent_actions(self, agent_id):\n        \"\"\" Returns the available actions for agent_id \"\"\"\n        raise NotImplementedError\n\n    def get_total_actions(self):\n        \"\"\" Returns the total number of actions an agent could ever take \"\"\"\n        # TODO: This is only suitable for a discrete 1 dimensional action space for each agent\n        raise NotImplementedError\n\n    def get_stats(self):\n        raise NotImplementedError\n\n    # TODO: Temp hack\n    def get_agg_stats(self, stats):\n        return {}\n\n    def reset(self):\n        \"\"\" Returns initial observations and states\"\"\"\n        raise NotImplementedError\n\n    def render(self):\n        raise NotImplementedError\n\n    def close(self):\n        raise NotImplementedError\n\n    def seed(self, seed):\n        raise NotImplementedError\n\n    def get_env_info(self):\n        env_info = {\"state_shape\": self.get_state_size(),\n                    \"obs_shape\": self.get_obs_size(),\n                    \"n_actions\": self.get_total_actions(),\n                    \"n_agents\": self.n_agents,\n                    \"episode_limit\": self.episode_limit}\n        return env_info"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/obsk.py",
    "content": "import itertools\nimport numpy as np\nfrom copy import deepcopy\n\nclass Node():\n    def __init__(self, label, qpos_ids, qvel_ids, act_ids, body_fn=None, bodies=None, extra_obs=None, tendons=None):\n        self.label = label\n        self.qpos_ids = qpos_ids\n        self.qvel_ids = qvel_ids\n        self.act_ids = act_ids\n        self.bodies = bodies\n        self.extra_obs = {} if extra_obs is None else extra_obs\n        self.body_fn = body_fn\n        self.tendons = tendons\n        pass\n\n    def __str__(self):\n        return self.label\n\n    def __repr__(self):\n        return self.label\n\n\nclass HyperEdge():\n    def __init__(self, *edges):\n        self.edges = set(edges)\n\n    def __contains__(self, item):\n        return item in self.edges\n\n    def __str__(self):\n        return \"HyperEdge({})\".format(self.edges)\n\n    def __repr__(self):\n        return \"HyperEdge({})\".format(self.edges)\n\n\ndef get_joints_at_kdist(agent_id, agent_partitions, hyperedges, k=0, kagents=False,):\n    \"\"\" Identify all joints at distance <= k from agent agent_id\n\n    :param agent_id: id of agent to be considered\n    :param agent_partitions: list of joint tuples in order of agentids\n    :param edges: list of tuples (joint1, joint2)\n    :param k: kth degree\n    :param kagents: True (observe all joints of an agent if a single one is) or False (individual joint granularity)\n    :return:\n        dict with k as key, and list of joints at that distance\n    \"\"\"\n    assert not kagents, \"kagents not implemented!\"\n\n    agent_joints = agent_partitions[agent_id]\n\n    def _adjacent(lst, kagents=False):\n        # return all sets adjacent to any element in lst\n        ret = set([])\n        for l in lst:\n            ret = ret.union(set(itertools.chain(*[e.edges.difference({l}) for e in hyperedges if l in e])))\n        return ret\n\n    seen = set([])\n    new = set([])\n    k_dict = {}\n    for _k in range(k+1):\n        if not _k:\n            new = set(agent_joints)\n        else:\n            print(hyperedges)\n            new = _adjacent(new) - seen\n        seen = seen.union(new)\n        k_dict[_k] = sorted(list(new), key=lambda x:x.label)\n    return k_dict\n\n\ndef build_obs(env, k_dict, k_categories, global_dict, global_categories, vec_len=None):\n    \"\"\"Given a k_dict from get_joints_at_kdist, extract observation vector.\n\n    :param k_dict: k_dict\n    :param qpos: qpos numpy array\n    :param qvel: qvel numpy array\n    :param vec_len: if None no padding, else zero-pad to vec_len\n    :return:\n    observation vector\n    \"\"\"\n\n    # TODO: This needs to be fixed, it was designed for half-cheetah only!\n    #if add_global_pos:\n    #    obs_qpos_lst.append(global_qpos)\n    #    obs_qvel_lst.append(global_qvel)\n\n\n    body_set_dict = {}\n    obs_lst = []\n    # Add parts attributes\n    for k in sorted(list(k_dict.keys())):\n        cats = k_categories[k]\n        for _t in k_dict[k]:\n            for c in cats:\n                if c in _t.extra_obs:\n                    items = _t.extra_obs[c](env).tolist()\n                    obs_lst.extend(items if isinstance(items, list) else [items])\n                else:\n                    if c in [\"qvel\",\"qpos\"]: # this is a \"joint position/velocity\" item\n                        items = getattr(env.sim.data, c)[getattr(_t, \"{}_ids\".format(c))]\n                        obs_lst.extend(items if isinstance(items, list) else [items])\n                    elif c in [\"qfrc_actuator\"]: # this is a \"vel position\" item\n                        items = getattr(env.sim.data, c)[getattr(_t, \"{}_ids\".format(\"qvel\"))]\n                        obs_lst.extend(items if isinstance(items, list) else [items])\n                    elif c in [\"cvel\", \"cinert\", \"cfrc_ext\"]:  # this is a \"body position\" item\n                        if _t.bodies is not None:\n                            for b in _t.bodies:\n                                if c not in body_set_dict:\n                                    body_set_dict[c] = set()\n                                if b not in body_set_dict[c]:\n                                    items = getattr(env.sim.data, c)[b].tolist()\n                                    items = getattr(_t, \"body_fn\", lambda _id,x:x)(b, items)\n                                    obs_lst.extend(items if isinstance(items, list) else [items])\n                                    body_set_dict[c].add(b)\n\n    # Add global attributes\n    body_set_dict = {}\n    for c in global_categories:\n        if c in [\"qvel\", \"qpos\"]:  # this is a \"joint position\" item\n            for j in global_dict.get(\"joints\", []):\n                items = getattr(env.sim.data, c)[getattr(j, \"{}_ids\".format(c))]\n                obs_lst.extend(items if isinstance(items, list) else [items])\n        else:\n            for b in global_dict.get(\"bodies\", []):\n                if c not in body_set_dict:\n                    body_set_dict[c] = set()\n                if b not in body_set_dict[c]:\n                    obs_lst.extend(getattr(env.sim.data, c)[b].tolist())\n                    body_set_dict[c].add(b)\n\n    if vec_len is not None:\n        pad = np.array((vec_len - len(obs_lst))*[0])\n        if len(pad):\n            return np.concatenate([np.array(obs_lst), pad])\n    return np.array(obs_lst)\n\n\ndef build_actions(agent_partitions, k_dict):\n    # Composes agent actions output from networks\n    # into coherent joint action vector to be sent to the env.\n    pass\n\ndef get_parts_and_edges(label, partitioning):\n    if label in [\"half_cheetah\", \"HalfCheetah-v2\"]:\n\n        # define Mujoco graph\n        bthigh = Node(\"bthigh\", -6, -6, 0)\n        bshin = Node(\"bshin\", -5, -5, 1)\n        bfoot = Node(\"bfoot\", -4, -4, 2)\n        fthigh = Node(\"fthigh\", -3, -3, 3)\n        fshin = Node(\"fshin\", -2, -2, 4)\n        ffoot = Node(\"ffoot\", -1, -1, 5)\n\n        edges = [HyperEdge(bfoot, bshin),\n                 HyperEdge(bshin, bthigh),\n                 HyperEdge(bthigh, fthigh),\n                 HyperEdge(fthigh, fshin),\n                 HyperEdge(fshin, ffoot)]\n\n        root_x = Node(\"root_x\", 0, 0, -1,\n                      extra_obs={\"qpos\": lambda env: np.array([])})\n        root_z = Node(\"root_z\", 1, 1, -1)\n        root_y = Node(\"root_y\", 2, 2, -1)\n        globals = {\"joints\":[root_x, root_y, root_z]}\n\n        if partitioning == \"2x3\":\n            parts = [(bfoot, bshin, bthigh),\n                     (ffoot, fshin, fthigh)]\n        elif partitioning == \"6x1\":\n            parts = [(bfoot,), (bshin,), (bthigh,), (ffoot,), (fshin,), (fthigh,)]\n        elif partitioning == \"3x2\":\n            parts = [(bfoot, bshin,), (bthigh, ffoot,), (fshin, fthigh,)]\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Ant-v2\"]:\n\n        # define Mujoco graph\n        torso = 1\n        front_left_leg = 2\n        aux_1 = 3\n        ankle_1 = 4\n        front_right_leg = 5\n        aux_2 = 6\n        ankle_2 = 7\n        back_leg = 8\n        aux_3 = 9\n        ankle_3 = 10\n        right_back_leg = 11\n        aux_4 = 12\n        ankle_4 = 13\n\n        hip1 = Node(\"hip1\", -8, -8, 2, bodies=[torso, front_left_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) #\n        ankle1 = Node(\"ankle1\", -7, -7, 3, bodies=[front_left_leg, aux_1, ankle_1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        hip2 = Node(\"hip2\", -6, -6, 4, bodies=[torso, front_right_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        ankle2 = Node(\"ankle2\", -5, -5, 5, bodies=[front_right_leg, aux_2, ankle_2], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        hip3 = Node(\"hip3\", -4, -4, 6, bodies=[torso, back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        ankle3 = Node(\"ankle3\", -3, -3, 7, bodies=[back_leg, aux_3, ankle_3], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        hip4 = Node(\"hip4\", -2, -2, 0, bodies=[torso, right_back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        ankle4 = Node(\"ankle4\", -1, -1, 1, bodies=[right_back_leg, aux_4, ankle_4], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n\n        edges = [HyperEdge(ankle4, hip4),\n                 HyperEdge(ankle1, hip1),\n                 HyperEdge(ankle2, hip2),\n                 HyperEdge(ankle3, hip3),\n                 HyperEdge(hip4, hip1, hip2, hip3),\n                 ]\n\n        free_joint = Node(\"free\", 0, 0, -1, extra_obs={\"qpos\": lambda env: env.sim.data.qpos[:7],\n                                                       \"qvel\": lambda env: env.sim.data.qvel[:6],\n                                                       \"cfrc_ext\": lambda env: np.clip(env.sim.data.cfrc_ext[0:1], -1, 1)})\n        globals = {\"joints\": [free_joint]}\n\n        if partitioning == \"2x4\": # neighbouring legs together\n            parts = [(hip1, ankle1, hip2, ankle2),\n                     (hip3, ankle3, hip4, ankle4)]\n        elif partitioning == \"2x4d\": # diagonal legs together\n            parts = [(hip1, ankle1, hip3, ankle3),\n                     (hip2, ankle2, hip4, ankle4)]\n        elif partitioning == \"4x2\":\n            parts = [(hip1, ankle1),\n                     (hip2, ankle2),\n                     (hip3, ankle3),\n                     (hip4, ankle4)]\n        elif partitioning == \"8x1\":\n            parts = [(hip1,), (ankle1,),\n                     (hip2,), (ankle2,),\n                     (hip3,), (ankle3,),\n                     (hip4,), (ankle4,)]\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Hopper-v2\"]:\n\n        # define Mujoco-Graph\n        thigh_joint = Node(\"thigh_joint\", -3, -3, 0,\n                           extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[-3]]), -10, 10)})\n        leg_joint = Node(\"leg_joint\", -2, -2, 1,\n                         extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[-2]]), -10, 10)})\n        foot_joint = Node(\"foot_joint\", -1, -1, 2,\n                          extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[-1]]), -10, 10)})\n\n        edges = [HyperEdge(foot_joint, leg_joint),\n                 HyperEdge(leg_joint, thigh_joint)]\n\n        root_x = Node(\"root_x\", 0, 0, -1, extra_obs={\"qpos\": lambda env: np.array([]),\n                                                     \"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[1]]), -10, 10)})\n        root_z = Node(\"root_z\", 1, 1, -1, extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[1]]), -10, 10)})\n        root_y = Node(\"root_y\", 2, 2, -1, extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[2]]), -10, 10)})\n        globals = {\"joints\":[root_x, root_y, root_z]}\n\n        if partitioning == \"3x1\":\n            parts = [(thigh_joint,),\n                     (leg_joint,),\n                     (foot_joint,)]\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Humanoid-v2\", \"HumanoidStandup-v2\"]:\n\n        # define Mujoco-Graph\n        abdomen_y = Node(\"abdomen_y\", -16, -16, 0) # act ordering bug in env -- double check!\n        abdomen_z = Node(\"abdomen_z\", -17, -17, 1)\n        abdomen_x = Node(\"abdomen_x\", -15, -15, 2)\n        right_hip_x = Node(\"right_hip_x\", -14, -14, 3)\n        right_hip_z = Node(\"right_hip_z\", -13, -13, 4)\n        right_hip_y = Node(\"right_hip_y\", -12, -12, 5)\n        right_knee = Node(\"right_knee\", -11, -11, 6)\n        left_hip_x = Node(\"left_hip_x\", -10, -10, 7)\n        left_hip_z = Node(\"left_hip_z\", -9, -9, 8)\n        left_hip_y = Node(\"left_hip_y\", -8, -8, 9)\n        left_knee = Node(\"left_knee\", -7, -7, 10)\n        right_shoulder1 = Node(\"right_shoulder1\", -6, -6, 11)\n        right_shoulder2 = Node(\"right_shoulder2\", -5, -5, 12)\n        right_elbow = Node(\"right_elbow\", -4, -4, 13)\n        left_shoulder1 = Node(\"left_shoulder1\", -3, -3, 14)\n        left_shoulder2 = Node(\"left_shoulder2\", -2, -2, 15)\n        left_elbow = Node(\"left_elbow\", -1, -1, 16)\n\n        edges = [HyperEdge(abdomen_x, abdomen_y, abdomen_z),\n                 HyperEdge(right_hip_x, right_hip_y, right_hip_z),\n                 HyperEdge(left_hip_x, left_hip_y, left_hip_z),\n                 HyperEdge(left_elbow, left_shoulder1, left_shoulder2),\n                 HyperEdge(right_elbow, right_shoulder1, right_shoulder2),\n                 HyperEdge(left_knee, left_hip_x, left_hip_y, left_hip_z),\n                 HyperEdge(right_knee, right_hip_x, right_hip_y, right_hip_z),\n                 HyperEdge(left_shoulder1, left_shoulder2, abdomen_x, abdomen_y, abdomen_z),\n                 HyperEdge(right_shoulder1, right_shoulder2, abdomen_x, abdomen_y, abdomen_z),\n                 HyperEdge(abdomen_x, abdomen_y, abdomen_z, left_hip_x, left_hip_y, left_hip_z),\n                 HyperEdge(abdomen_x, abdomen_y, abdomen_z, right_hip_x, right_hip_y, right_hip_z),\n                 ]\n\n        globals = {}\n\n        if partitioning == \"9|8\": # 17 in total, so one action is a dummy (to be handled by pymarl)\n            # isolate upper and lower body\n            parts = [(left_shoulder1, left_shoulder2, abdomen_x, abdomen_y, abdomen_z,\n                      right_shoulder1, right_shoulder2,\n                      right_elbow, left_elbow),\n                     (left_hip_x, left_hip_y, left_hip_z,\n                      right_hip_x, right_hip_y, right_hip_z,\n                      right_knee, left_knee)]\n            # TODO: There could be tons of decompositions here\n        elif partitioning == \"17x1\": # 17 in total, so one action is a dummy (to be handled by pymarl)\n            # isolate upper and lower body\n            parts = [(left_shoulder1,), (left_shoulder2,), (abdomen_x,), (abdomen_y,), (abdomen_z,),\n                     (right_shoulder1,), (right_shoulder2,), (right_elbow,), (left_elbow,),\n                     (left_hip_x,), (left_hip_y,), (left_hip_z,), (right_hip_x,), (right_hip_y,), (right_hip_z,),\n                     (right_knee,), (left_knee,)]\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Reacher-v2\"]:\n\n        # define Mujoco-Graph\n        body0 = 1\n        body1 = 2\n        fingertip = 3\n        joint0 = Node(\"joint0\", -4, -4, 0,\n                      bodies=[body0, body1],\n                      extra_obs={\"qpos\":(lambda env:np.array([np.sin(env.sim.data.qpos[-4]),\n                                                              np.cos(env.sim.data.qpos[-4])]))})\n        joint1 = Node(\"joint1\", -3, -3, 1,\n                      bodies=[body1, fingertip],\n                      extra_obs={\"fingertip_dist\":(lambda env:env.get_body_com(\"fingertip\") - env.get_body_com(\"target\")),\n                                 \"qpos\":(lambda env:np.array([np.sin(env.sim.data.qpos[-3]),\n                                                              np.cos(env.sim.data.qpos[-3])]))})\n        edges = [HyperEdge(joint0, joint1)]\n\n        worldbody = 0\n        target = 4\n        target_x = Node(\"target_x\", -2, -2, -1, extra_obs={\"qvel\":(lambda env:np.array([]))})\n        target_y = Node(\"target_y\", -1, -1, -1, extra_obs={\"qvel\":(lambda env:np.array([]))})\n        globals = {\"bodies\":[worldbody, target],\n                   \"joints\":[target_x, target_y]}\n\n        if partitioning == \"2x1\":\n            # isolate upper and lower arms\n            parts = [(joint0,), (joint1,)]\n            # TODO: There could be tons of decompositions here\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Swimmer-v2\"]:\n\n        # define Mujoco-Graph\n        joint0 = Node(\"rot2\", -2, -2, 0) # TODO: double-check ids\n        joint1 = Node(\"rot3\", -1, -1, 1)\n\n        edges = [HyperEdge(joint0, joint1)]\n        globals = {}\n\n        if partitioning == \"2x1\":\n            # isolate upper and lower body\n            parts = [(joint0,), (joint1,)]\n            # TODO: There could be tons of decompositions here\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Walker2d-v2\"]:\n\n        # define Mujoco-Graph\n        thigh_joint = Node(\"thigh_joint\", -6, -6, 0)\n        leg_joint = Node(\"leg_joint\", -5, -5, 1)\n        foot_joint = Node(\"foot_joint\", -4, -4, 2)\n        thigh_left_joint = Node(\"thigh_left_joint\", -3, -3, 3)\n        leg_left_joint = Node(\"leg_left_joint\", -2, -2, 4)\n        foot_left_joint = Node(\"foot_left_joint\", -1, -1, 5)\n\n        edges = [HyperEdge(foot_joint, leg_joint),\n                 HyperEdge(leg_joint, thigh_joint),\n                 HyperEdge(foot_left_joint, leg_left_joint),\n                 HyperEdge(leg_left_joint, thigh_left_joint),\n                 HyperEdge(thigh_joint, thigh_left_joint)\n                 ]\n        globals = {}\n\n        if partitioning == \"2x3\":\n            # isolate upper and lower body\n            parts = [(foot_joint, leg_joint, thigh_joint),\n                     (foot_left_joint, leg_left_joint, thigh_left_joint,)]\n            # TODO: There could be tons of decompositions here\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"coupled_half_cheetah\"]:\n\n        # define Mujoco graph\n        tendon = 0\n\n        bthigh = Node(\"bthigh\", -6, -6, 0,\n                     tendons=[tendon],\n                     extra_obs = {\"ten_J\": lambda env: env.sim.data.ten_J[tendon],\n                                  \"ten_length\": lambda env: env.sim.data.ten_length,\n                                  \"ten_velocity\": lambda env: env.sim.data.ten_velocity})\n        bshin = Node(\"bshin\", -5, -5, 1)\n        bfoot = Node(\"bfoot\", -4, -4, 2)\n        fthigh = Node(\"fthigh\", -3, -3, 3)\n        fshin = Node(\"fshin\", -2, -2, 4)\n        ffoot = Node(\"ffoot\", -1, -1, 5)\n\n        bthigh2 = Node(\"bthigh2\", -6, -6, 0,\n                      tendons=[tendon],\n                      extra_obs={\"ten_J\": lambda env: env.sim.data.ten_J[tendon],\n                                 \"ten_length\": lambda env: env.sim.data.ten_length,\n                                 \"ten_velocity\": lambda env: env.sim.data.ten_velocity})\n        bshin2 = Node(\"bshin2\", -5, -5, 1)\n        bfoot2 = Node(\"bfoot2\", -4, -4, 2)\n        fthigh2 = Node(\"fthigh2\", -3, -3, 3)\n        fshin2 = Node(\"fshin2\", -2, -2, 4)\n        ffoot2 = Node(\"ffoot2\", -1, -1, 5)\n\n\n        edges = [HyperEdge(bfoot, bshin),\n                 HyperEdge(bshin, bthigh),\n                 HyperEdge(bthigh, fthigh),\n                 HyperEdge(fthigh, fshin),\n                 HyperEdge(fshin, ffoot),\n                 HyperEdge(bfoot2, bshin2),\n                 HyperEdge(bshin2, bthigh2),\n                 HyperEdge(bthigh2, fthigh2),\n                 HyperEdge(fthigh2, fshin2),\n                 HyperEdge(fshin2, ffoot2)\n                 ]\n        globals = {}\n\n        root_x = Node(\"root_x\", 0, 0, -1,\n                      extra_obs={\"qpos\": lambda env: np.array([])})\n        root_z = Node(\"root_z\", 1, 1, -1)\n        root_y = Node(\"root_y\", 2, 2, -1)\n        globals = {\"joints\":[root_x, root_y, root_z]}\n\n        if partitioning == \"1p1\":\n            parts = [(bfoot, bshin, bthigh, ffoot, fshin, fthigh),\n                     (bfoot2, bshin2, bthigh2, ffoot2, fshin2, fthigh2)\n                     ]\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"manyagent_swimmer\"]:\n\n        # Generate asset file\n        try:\n            n_agents = int(partitioning.split(\"x\")[0])\n            n_segs_per_agents = int(partitioning.split(\"x\")[1])\n            n_segs = n_agents * n_segs_per_agents\n        except Exception as e:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        # Note: Default Swimmer corresponds to n_segs = 3\n\n        # define Mujoco-Graph\n        joints = [Node(\"rot{:d}\".format(i), -n_segs + i, -n_segs + i, i) for i in range(0, n_segs)]\n        edges = [HyperEdge(joints[i], joints[i+1]) for i in range(n_segs-1)]\n        globals = {}\n\n        parts = [tuple(joints[i * n_segs_per_agents:(i + 1) * n_segs_per_agents]) for i in range(n_agents)]\n        return parts, edges, globals\n\n    elif label in [\"manyagent_ant\"]: # TODO: FIX!\n\n        # Generate asset file\n        try:\n            n_agents = int(partitioning.split(\"x\")[0])\n            n_segs_per_agents = int(partitioning.split(\"x\")[1])\n            n_segs = n_agents * n_segs_per_agents\n        except Exception as e:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n\n        # # define Mujoco graph\n        # torso = 1\n        # front_left_leg = 2\n        # aux_1 = 3\n        # ankle_1 = 4\n        # right_back_leg = 11\n        # aux_4 = 12\n        # ankle_4 = 13\n        #\n        # off = -4*(n_segs-1)\n        # hip1 = Node(\"hip1\", -4-off, -4-off, 2, bodies=[torso, front_left_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) #\n        # ankle1 = Node(\"ankle1\", -3-off, -3-off, 3, bodies=[front_left_leg, aux_1, ankle_1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        # hip4 = Node(\"hip4\", -2-off, -2-off, 0, bodies=[torso, right_back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        # ankle4 = Node(\"ankle4\", -1-off, -1-off, 1, bodies=[right_back_leg, aux_4, ankle_4], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        #\n        # edges = [HyperEdge(ankle4, hip4),\n        #          HyperEdge(ankle1, hip1),\n        #          HyperEdge(hip4, hip1),\n        #          ]\n\n        edges = []\n        joints = []\n        for si in range(n_segs):\n\n            torso = 1 + si*7\n            front_right_leg = 2 + si*7\n            aux1 = 3 + si*7\n            ankle1 = 4 + si*7\n            back_leg = 5 + si*7\n            aux2 = 6 + si*7\n            ankle2 = 7 + si*7\n\n            off = -4 * (n_segs - 1 - si)\n            hip1n = Node(\"hip1_{:d}\".format(si), -4-off, -4-off, 2+4*si, bodies=[torso, front_right_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())\n            ankle1n = Node(\"ankle1_{:d}\".format(si), -3-off, -3-off, 3+4*si, bodies=[front_right_leg, aux1, ankle1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())\n            hip2n = Node(\"hip2_{:d}\".format(si), -2-off, -2-off, 0+4*si, bodies=[torso, back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())\n            ankle2n = Node(\"ankle2_{:d}\".format(si), -1-off, -1-off, 1+4*si, bodies=[back_leg, aux2, ankle2], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())\n\n            edges += [HyperEdge(ankle1n, hip1n),\n                      HyperEdge(ankle2n, hip2n),\n                      HyperEdge(hip1n, hip2n)]\n            if si:\n                edges += [HyperEdge(hip1m, hip2m, hip1n, hip2n)]\n\n            hip1m = deepcopy(hip1n)\n            hip2m = deepcopy(hip2n)\n            joints.append([hip1n,\n                           ankle1n,\n                           hip2n,\n                           ankle2n])\n\n        free_joint = Node(\"free\", 0, 0, -1, extra_obs={\"qpos\": lambda env: env.sim.data.qpos[:7],\n                                                       \"qvel\": lambda env: env.sim.data.qvel[:6],\n                                                       \"cfrc_ext\": lambda env: np.clip(env.sim.data.cfrc_ext[0:1], -1, 1)})\n        globals = {\"joints\": [free_joint]}\n\n        parts =  [[x for sublist in joints[i * n_segs_per_agents:(i + 1) * n_segs_per_agents] for x in sublist] for i in range(n_agents)]\n\n        return parts, edges, globals\n"
  },
  {
    "path": "MACPO/macpo/envs/safety_ma_mujoco/test.py",
    "content": "from safety_multiagent_mujoco.mujoco_multi import MujocoMulti\nimport numpy as np\nimport time\n\n\ndef main():\n\n    # Swimmer\n    # env_args = {\"scenario\": \"manyagent_swimmer\",\n    #             \"agent_conf\": \"10x2\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # coupled_half_cheetah\n    # env_args = {\"scenario\": \"coupled_half_cheetah\",\n    #             \"agent_conf\": \"1p1\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # ANT 4\n    # env_args = {\"scenario\": \"manyagent_ant\",\n    #               \"agent_conf\": \"3x2\",\n    #               \"agent_obsk\": 1,\n    #               \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"manyagent_swimmer\",\n    #             \"agent_conf\": \"10x2\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    env_args = {\"scenario\": \"HalfCheetah-v2\",\n                \"agent_conf\": \"2x3\",\n                \"agent_obsk\": 1,\n                \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Hopper-v2\",\n    #             \"agent_conf\": \"3x1\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Humanoid-v2\",\n    #             \"agent_conf\": \"9|8\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Humanoid-v2\",\n    #             \"agent_conf\": \"17x1\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Ant-v2\",\n    #             \"agent_conf\": \"2x4\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Ant-v2\",\n    #             \"agent_conf\": \"2x4d\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Ant-v2\",\n    #             \"agent_conf\": \"4x2\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    env = MujocoMulti(env_args=env_args)\n    env_info = env.get_env_info()\n    n_actions = env_info[\"n_actions\"]\n    n_agents = env_info[\"n_agents\"]\n    n_episodes = 10\n\n    for e in range(n_episodes):\n        ob=env.reset()\n        terminated = False\n        episode_reward = 0\n\n        while not terminated:\n            obs = env.get_obs()\n            state = env.get_state()\n\n            actions = []\n            for agent_id in range(n_agents):\n                avail_actions = env.get_avail_agent_actions(agent_id)\n                avail_actions_ind = np.nonzero(avail_actions)[0]\n                action = np.random.uniform(-10, 0.0, n_actions)\n                actions.append(action)\n\n            # reward, terminated, _ = env.step(actions)\n            # print(\"env.step(actions): \", env.step(actions))\n            get_obs, get_state, reward, dones, infos, get_avail_actions= env.step(actions)\n            # episode_reward += reward\n            # print(\"reward: \", reward)\n            cost_x= [[item['cost']] for item in infos]\n            print(\"cost_x:\", cost_x)\n            print(\"reward:\", reward)\n\n            # time.sleep(0.1)\n            env.render()\n\n\n        # print(\"Total reward in episode {} = {}\".format(e, episode_reward))\n\n    env.close()\n\nif __name__ == \"__main__\":\n    main()\n    \"\"\"\n    infos[cost]: [{'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,\n                   'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0},\n                  {'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,\n                   'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0},\n                  {'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,\n                   'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}]\n    \"\"\""
  },
  {
    "path": "MACPO/macpo/runner/__init__.py",
    "content": "from macpo.runner import separated\n\n__all__=[\n\n    \"separated\"\n]"
  },
  {
    "path": "MACPO/macpo/runner/separated/__init__.py",
    "content": "from macpo.runner.separated import base_runner\n\n__all__=[\n    \"base_runner\"\n]"
  },
  {
    "path": "MACPO/macpo/runner/separated/base_runner.py",
    "content": "    \nimport time\nimport wandb\nimport os\nimport numpy as np\nfrom itertools import chain\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom macpo.utils.separated_buffer import SeparatedReplayBuffer\nfrom macpo.utils.util import update_linear_schedule\n\ndef _t2n(x):\n    return x.detach().cpu().numpy()\n\nclass Runner(object):\n    def __init__(self, config):\n\n        self.all_args = config['all_args']\n        self.envs = config['envs']\n        self.eval_envs = config['eval_envs']\n        self.device = config['device']\n        self.num_agents = config['num_agents']\n\n        # parameters\n        self.env_name = self.all_args.env_name\n        self.algorithm_name = self.all_args.algorithm_name\n        self.experiment_name = self.all_args.experiment_name\n        self.use_centralized_V = self.all_args.use_centralized_V\n        self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state\n        self.num_env_steps = self.all_args.num_env_steps\n        self.episode_length = self.all_args.episode_length\n        self.n_rollout_threads = self.all_args.n_rollout_threads\n        self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads\n        self.use_linear_lr_decay = self.all_args.use_linear_lr_decay\n        self.hidden_size = self.all_args.hidden_size\n        self.use_wandb = self.all_args.use_wandb\n        self.use_render = self.all_args.use_render\n        self.recurrent_N = self.all_args.recurrent_N\n        self.use_single_network = self.all_args.use_single_network\n        # interval\n        self.save_interval = self.all_args.save_interval\n        self.use_eval = self.all_args.use_eval\n        self.eval_interval = self.all_args.eval_interval\n        self.log_interval = self.all_args.log_interval\n\n        # dir\n        self.model_dir = self.all_args.model_dir\n\n        if self.use_render:\n            import imageio\n            self.run_dir = config[\"run_dir\"]\n            self.gif_dir = str(self.run_dir / 'gifs')\n            if not os.path.exists(self.gif_dir):\n                os.makedirs(self.gif_dir)\n        else:\n            if self.use_wandb:\n                self.save_dir = str(wandb.run.dir)\n            else:\n                self.run_dir = config[\"run_dir\"]\n                self.log_dir = str(self.run_dir / 'logs')\n                if not os.path.exists(self.log_dir):\n                    os.makedirs(self.log_dir)\n                self.writter = SummaryWriter(self.log_dir)\n                self.save_dir = str(self.run_dir / 'models')\n                if not os.path.exists(self.save_dir):\n                    os.makedirs(self.save_dir)\n\n\n        from macpo.algorithms.r_mappo.r_mappo import R_MAPPO as TrainAlgo\n        from macpo.algorithms.r_mappo.algorithm.rMAPPOPolicy import R_MAPPOPolicy as Policy\n\n        print(\"share_observation_space: \", self.envs.share_observation_space)\n        print(\"observation_space: \", self.envs.observation_space)\n        print(\"action_space: \", self.envs.action_space)\n\n        self.policy = []\n        for agent_id in range(self.num_agents):\n            share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id]\n            # policy network\n            po = Policy(self.all_args,\n                        self.envs.observation_space[agent_id],\n                        share_observation_space,\n                        self.envs.action_space[agent_id],\n                        device = self.device)\n            self.policy.append(po)\n\n        if self.model_dir is not None:\n            self.restore()\n\n        self.trainer = []\n        self.buffer = []\n        for agent_id in range(self.num_agents):\n            # algorithm\n            tr = TrainAlgo(self.all_args, self.policy[agent_id], device = self.device)\n            # buffer\n            share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id]\n            bu = SeparatedReplayBuffer(self.all_args,\n                                       self.envs.observation_space[agent_id],\n                                       share_observation_space,\n                                       self.envs.action_space[agent_id])\n            self.buffer.append(bu)\n            self.trainer.append(tr)\n            \n    def run(self):\n        raise NotImplementedError\n\n    def warmup(self):\n        raise NotImplementedError\n\n    def collect(self, step):\n        raise NotImplementedError\n\n    def insert(self, data):\n        raise NotImplementedError\n    \n    @torch.no_grad()\n    def compute(self):\n        for agent_id in range(self.num_agents):\n            self.trainer[agent_id].prep_rollout()\n            next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1], \n                                                                self.buffer[agent_id].rnn_states_critic[-1],\n                                                                self.buffer[agent_id].masks[-1])\n            next_value = _t2n(next_value)\n            self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer)\n\n    def train(self):\n        # have modified for SAD_PPO\n        train_infos = []\n        \n        for agent_id in torch.randperm(self.num_agents):\n            self.trainer[agent_id].prep_training()\n            train_info = self.trainer[agent_id].train(self.buffer[agent_id])\n            train_infos.append(train_info)       \n            self.buffer[agent_id].after_update()\n\n        return train_infos\n\n    def save(self):\n        for agent_id in range(self.num_agents):\n            if self.use_single_network:\n                policy_model = self.trainer[agent_id].policy.model\n                torch.save(policy_model.state_dict(), str(self.save_dir) + \"/model_agent\" + str(agent_id) + \".pt\")\n            else:\n                policy_actor = self.trainer[agent_id].policy.actor\n                torch.save(policy_actor.state_dict(), str(self.save_dir) + \"/actor_agent\" + str(agent_id) + \".pt\")\n                policy_critic = self.trainer[agent_id].policy.critic\n                torch.save(policy_critic.state_dict(), str(self.save_dir) + \"/critic_agent\" + str(agent_id) + \".pt\")\n\n    def restore(self):\n        for agent_id in range(self.num_agents):\n            if self.use_single_network:\n                policy_model_state_dict = torch.load(str(self.model_dir) + '/model_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].model.load_state_dict(policy_model_state_dict)\n            else:\n                policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict)\n                policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict)\n\n    def log_train(self, train_infos, total_num_steps): \n        for agent_id in range(self.num_agents):\n            for k, v in train_infos[agent_id].items():\n                agent_k = \"agent%i/\" % agent_id + k\n                if self.use_wandb:\n                    wandb.log({agent_k: v}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)\n\n    def log_env(self, env_infos, total_num_steps):\n        for k, v in env_infos.items():\n            if len(v) > 0:\n                if self.use_wandb:\n                    wandb.log({k: np.mean(v)}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps)\n"
  },
  {
    "path": "MACPO/macpo/runner/separated/base_runner_macpo.py",
    "content": "import copy\nimport time\nimport wandb\nimport os\nimport numpy as np\nfrom itertools import chain\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom macpo.utils.separated_buffer import SeparatedReplayBuffer\nfrom macpo.utils.util import update_linear_schedule\n\n\ndef _t2n(x):\n    return x.detach().cpu().numpy()\n\n\nclass Runner(object):\n    def __init__(self, config):\n\n        self.all_args = config['all_args']\n        self.envs = config['envs']\n        self.eval_envs = config['eval_envs']\n        self.device = config['device']\n        self.num_agents = config['num_agents']\n\n        # parameters\n        self.env_name = self.all_args.env_name\n        self.algorithm_name = self.all_args.algorithm_name\n        self.experiment_name = self.all_args.experiment_name\n        self.use_centralized_V = self.all_args.use_centralized_V\n        self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state\n        self.num_env_steps = self.all_args.num_env_steps\n        self.episode_length = self.all_args.episode_length\n        self.n_rollout_threads = self.all_args.n_rollout_threads\n        self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads\n        self.use_linear_lr_decay = self.all_args.use_linear_lr_decay\n        self.hidden_size = self.all_args.hidden_size\n        self.use_wandb = self.all_args.use_wandb\n        self.use_render = self.all_args.use_render\n        self.recurrent_N = self.all_args.recurrent_N\n        self.use_single_network = self.all_args.use_single_network\n        # interval\n        self.save_interval = self.all_args.save_interval\n        self.use_eval = self.all_args.use_eval\n        self.eval_interval = self.all_args.eval_interval\n        self.log_interval = self.all_args.log_interval\n        self.gamma = self.all_args.gamma\n        self.use_popart = self.all_args.use_popart\n\n        self.safty_bound = self.all_args.safty_bound\n\n        # dir\n        self.model_dir = self.all_args.model_dir\n\n        if self.use_render:\n            import imageio\n            self.run_dir = config[\"run_dir\"]\n            self.gif_dir = str(self.run_dir / 'gifs')\n            if not os.path.exists(self.gif_dir):\n                os.makedirs(self.gif_dir)\n        else:\n            if self.use_wandb:\n                self.save_dir = str(wandb.run.dir)\n            else:\n                self.run_dir = config[\"run_dir\"]\n                self.log_dir = str(self.run_dir / 'logs')\n                if not os.path.exists(self.log_dir):\n                    os.makedirs(self.log_dir)\n                self.writter = SummaryWriter(self.log_dir)\n                self.save_dir = str(self.run_dir / 'models')\n                if not os.path.exists(self.save_dir):\n                    os.makedirs(self.save_dir)\n\n        from macpo.algorithms.r_mappo.r_macpo import R_MACTRPO_CPO as TrainAlgo\n\n        from macpo.algorithms.r_mappo.algorithm.MACPPOPolicy import MACPPOPolicy as Policy\n\n        self.policy = []\n        for agent_id in range(self.num_agents):\n            share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else \\\n            self.envs.observation_space[agent_id]\n            # policy network\n            po = Policy(self.all_args,\n                        self.envs.observation_space[agent_id],\n                        share_observation_space,\n                        self.envs.action_space[agent_id],\n                        device=self.device)\n            self.policy.append(po)\n\n        if self.model_dir is not None:\n            self.restore()\n\n        self.trainer = []\n        self.buffer = []\n        # todo: revise this for trpo\n        for agent_id in range(self.num_agents):\n            # algorithm\n            tr = TrainAlgo(self.all_args, self.policy[agent_id], device=self.device)\n            # buffer\n            share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else \\\n            self.envs.observation_space[agent_id]\n            bu = SeparatedReplayBuffer(self.all_args,\n                                       self.envs.observation_space[agent_id],\n                                       share_observation_space,\n                                       self.envs.action_space[agent_id])\n            self.buffer.append(bu)\n            self.trainer.append(tr)\n\n    def run(self):\n        raise NotImplementedError\n\n    def warmup(self):\n        raise NotImplementedError\n\n    def collect(self, step):\n        raise NotImplementedError\n\n    def insert(self, data):\n        raise NotImplementedError\n\n    @torch.no_grad()\n    def compute(self):\n        for agent_id in range(self.num_agents):\n            self.trainer[agent_id].prep_rollout()\n            next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1],\n                                                                  self.buffer[agent_id].rnn_states_critic[-1],\n                                                                  self.buffer[agent_id].masks[-1])\n            next_value = _t2n(next_value)\n            self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer)\n\n            next_costs = self.trainer[agent_id].policy.get_cost_values(self.buffer[agent_id].share_obs[-1],\n                                                                       self.buffer[agent_id].rnn_states_cost[-1],\n                                                                       self.buffer[agent_id].masks[-1])\n            next_costs = _t2n(next_costs)\n            self.buffer[agent_id].compute_cost_returns(next_costs, self.trainer[agent_id].value_normalizer)\n\n    def train(self):\n        # have modified for SAD_PPO\n        train_infos = []\n        cost_train_infos = []\n        # random update order\n        action_dim = self.buffer[0].actions.shape[-1]\n        factor = np.ones((self.episode_length, self.n_rollout_threads, action_dim), dtype=np.float32)\n        for agent_id in torch.randperm(self.num_agents):\n            self.trainer[agent_id].prep_training()\n            self.buffer[agent_id].update_factor(factor)\n            available_actions = None if self.buffer[agent_id].available_actions is None \\\n                else self.buffer[agent_id].available_actions[:-1].reshape(-1, *self.buffer[\n                                                                                   agent_id].available_actions.shape[\n                                                                               2:])\n\n            if self.all_args.algorithm_name == \"macpo\":\n                old_actions_logprob, _, _, _ = self.trainer[agent_id].policy.actor.evaluate_actions(\n                    self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]),\n                    self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]),\n                    self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]),\n                    self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]),\n                    available_actions,\n                    self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:]))\n            else:\n                old_actions_logprob, _ = self.trainer[agent_id].policy.actor.evaluate_actions(\n                    self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]),\n                    self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]),\n                    self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]),\n                    self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]),\n                    available_actions,\n                    self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:]))\n\n            # safe_buffer, cost_adv = self.buffer_filter(agent_id)\n            # train_info = self.trainer[agent_id].train(safe_buffer, cost_adv)\n\n            train_info = self.trainer[agent_id].train(self.buffer[agent_id])\n\n            new_actions_logprob, dist_entropy, action_mu, action_std = self.trainer[agent_id].policy.actor.evaluate_actions(\n                self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]),\n                self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]),\n                self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]),\n                self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]),\n                available_actions,\n                self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:]))\n            factor = factor * _t2n(torch.exp(new_actions_logprob - old_actions_logprob).reshape(self.episode_length,\n                                                                                                self.n_rollout_threads,\n                                                                                                action_dim))\n            train_infos.append(train_info)\n\n            self.buffer[agent_id].after_update()\n\n        return train_infos, cost_train_infos\n\n    # episode length of envs is exactly equal to buffer size, that is, num_thread = num_episode\n    def buffer_filter(self, agent_id):\n        episode_length = len(self.buffer[0].rewards)\n        # J constraints for all agents, just a toy example\n        J = np.zeros((self.n_rollout_threads, 1), dtype=np.float32)\n        for t in reversed(range(episode_length)):\n            J = self.buffer[agent_id].costs[t] + self.gamma * J\n\n        factor = self.buffer[agent_id].factor\n\n        if self.use_popart:\n            cost_adv = self.buffer[agent_id].cost_returns[:-1] - \\\n                       self.trainer[agent_id].value_normalizer.denormalize(self.buffer[agent_id].cost_preds[:-1])\n        else:\n            cost_adv = self.buffer[agent_id].cost_returns[:-1] - self.buffer[agent_id].cost_preds[:-1]\n\n        expectation = np.mean(factor * cost_adv, axis=(0, 2))\n\n        constraints_value = J + np.expand_dims(expectation, -1)\n\n        del_id = []\n        for i in range(self.n_rollout_threads):\n            if constraints_value[i][0] > self.safty_bound:\n                del_id.append(i)\n\n        buffer_filterd = self.remove_episodes(agent_id, del_id)\n        return buffer_filterd, cost_adv\n\n    def remove_episodes(self, agent_id, del_ids):\n        buffer = copy.deepcopy(self.buffer[agent_id])\n        buffer.share_obs = np.delete(buffer.share_obs, del_ids, 1)\n        buffer.obs = np.delete(buffer.obs, del_ids, 1)\n        buffer.rnn_states = np.delete(buffer.rnn_states, del_ids, 1)\n        buffer.rnn_states_critic = np.delete(buffer.rnn_states_critic, del_ids, 1)\n        buffer.rnn_states_cost = np.delete(buffer.rnn_states_cost, del_ids, 1)\n        buffer.value_preds = np.delete(buffer.value_preds, del_ids, 1)\n        buffer.returns = np.delete(buffer.returns, del_ids, 1)\n        if buffer.available_actions is not None:\n            buffer.available_actions = np.delete(buffer.available_actions, del_ids, 1)\n        buffer.actions = np.delete(buffer.actions, del_ids, 1)\n        buffer.action_log_probs = np.delete(buffer.action_log_probs, del_ids, 1)\n        buffer.rewards = np.delete(buffer.rewards, del_ids, 1)\n        # todo: cost should be calculated entirely\n        buffer.costs = np.delete(buffer.costs, del_ids, 1)\n        buffer.cost_preds = np.delete(buffer.cost_preds, del_ids, 1)\n        buffer.cost_returns = np.delete(buffer.cost_returns, del_ids, 1)\n        buffer.masks = np.delete(buffer.masks, del_ids, 1)\n        buffer.bad_masks = np.delete(buffer.bad_masks, del_ids, 1)\n        buffer.active_masks = np.delete(buffer.active_masks, del_ids, 1)\n        if buffer.factor is not None:\n            buffer.factor = np.delete(buffer.factor, del_ids, 1)\n        return buffer\n\n    def save(self):\n        for agent_id in range(self.num_agents):\n            if self.use_single_network:\n                policy_model = self.trainer[agent_id].policy.model\n                torch.save(policy_model.state_dict(), str(self.save_dir) + \"/model_agent\" + str(agent_id) + \".pt\")\n            else:\n                policy_actor = self.trainer[agent_id].policy.actor\n                torch.save(policy_actor.state_dict(), str(self.save_dir) + \"/actor_agent\" + str(agent_id) + \".pt\")\n                policy_critic = self.trainer[agent_id].policy.critic\n                torch.save(policy_critic.state_dict(), str(self.save_dir) + \"/critic_agent\" + str(agent_id) + \".pt\")\n\n    def restore(self):\n        for agent_id in range(self.num_agents):\n            if self.use_single_network:\n                policy_model_state_dict = torch.load(str(self.model_dir) + '/model_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].model.load_state_dict(policy_model_state_dict)\n            else:\n                policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict)\n                policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict)\n\n    def log_train(self, train_infos, total_num_steps):\n        for agent_id in range(self.num_agents):\n            for k, v in train_infos[agent_id].items():\n                agent_k = \"agent%i/\" % agent_id + k\n                if self.use_wandb:\n                    wandb.log({agent_k: v}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)\n\n    def log_env(self, env_infos, total_num_steps):\n        for k, v in env_infos.items():\n            if len(v) > 0:\n                if self.use_wandb:\n                    wandb.log({k: np.mean(v)}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps)\n"
  },
  {
    "path": "MACPO/macpo/runner/separated/mujoco_runner.py",
    "content": "import time\nimport wandb\nimport numpy as np\nfrom functools import reduce\nimport torch\nfrom macpo.runner.separated.base_runner import Runner\n\n\ndef _t2n(x):\n    return x.detach().cpu().numpy()\n\n\nclass MujocoRunner(Runner):\n    \"\"\"Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details.\"\"\"\n\n    def __init__(self, config):\n        super(MujocoRunner, self).__init__(config)\n\n    def run(self):\n        self.warmup()\n\n        start = time.time()\n        episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads\n\n        train_episode_rewards = [0 for _ in range(self.n_rollout_threads)]\n\n        for episode in range(episodes):\n            if self.use_linear_lr_decay:\n                self.trainer.policy.lr_decay(episode, episodes)\n\n            done_episodes_rewards = []\n\n            for step in range(self.episode_length):\n                # Sample actions\n                values, actions, action_log_probs, rnn_states, rnn_states_critic = self.collect(step)\n\n                # Obser reward and next obs\n                obs, share_obs, rewards, dones, infos, _ = self.envs.step(actions)\n\n                dones_env = np.all(dones, axis=1)\n                reward_env = np.mean(rewards, axis=1).flatten()\n                train_episode_rewards += reward_env\n                for t in range(self.n_rollout_threads):\n                    if dones_env[t]:\n                        done_episodes_rewards.append(train_episode_rewards[t])\n                        train_episode_rewards[t] = 0\n\n                data = obs, share_obs, rewards, dones, infos, \\\n                       values, actions, action_log_probs, \\\n                       rnn_states, rnn_states_critic\n\n                # insert data into buffer\n                self.insert(data)\n\n            # compute return and update network\n            self.compute()\n            train_infos = self.train()\n\n            # post process\n            total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads\n            # save model\n            if (episode % self.save_interval == 0 or episode == episodes - 1):\n                self.save()\n\n            # log information\n            if episode % self.log_interval == 0:\n                end = time.time()\n                print(\"\\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\\n\"\n                      .format(self.all_args.scenario,\n                              self.algorithm_name,\n                              self.experiment_name,\n                              episode,\n                              episodes,\n                              total_num_steps,\n                              self.num_env_steps,\n                              int(total_num_steps / (end - start))))\n\n                self.log_train(train_infos, total_num_steps)\n\n                if len(done_episodes_rewards) > 0:\n                    aver_episode_rewards = np.mean(done_episodes_rewards)\n                    print(\"some episodes done, average rewards: \", aver_episode_rewards)\n                    self.writter.add_scalars(\"train_episode_rewards\", {\"aver_rewards\": aver_episode_rewards},\n                                             total_num_steps)\n\n            # eval\n            if episode % self.eval_interval == 0 and self.use_eval:\n                self.eval(total_num_steps)\n\n    def warmup(self):\n        # reset env\n        obs, share_obs, _ = self.envs.reset()\n        # replay buffer\n        if not self.use_centralized_V:\n            share_obs = obs\n\n        for agent_id in range(self.num_agents):\n            self.buffer[agent_id].share_obs[0] = share_obs[:, agent_id].copy()\n            self.buffer[agent_id].obs[0] = obs[:, agent_id].copy()\n\n    @torch.no_grad()\n    def collect(self, step):\n        value_collector = []\n        action_collector = []\n        action_log_prob_collector = []\n        rnn_state_collector = []\n        rnn_state_critic_collector = []\n        for agent_id in range(self.num_agents):\n            self.trainer[agent_id].prep_rollout()\n            value, action, action_log_prob, rnn_state, rnn_state_critic \\\n                = self.trainer[agent_id].policy.get_actions(self.buffer[agent_id].share_obs[step],\n                                                            self.buffer[agent_id].obs[step],\n                                                            self.buffer[agent_id].rnn_states[step],\n                                                            self.buffer[agent_id].rnn_states_critic[step],\n                                                            self.buffer[agent_id].masks[step])\n            value_collector.append(_t2n(value))\n            action_collector.append(_t2n(action))\n            action_log_prob_collector.append(_t2n(action_log_prob))\n            rnn_state_collector.append(_t2n(rnn_state))\n            rnn_state_critic_collector.append(_t2n(rnn_state_critic))\n        # [self.envs, agents, dim]\n        values = np.array(value_collector).transpose(1, 0, 2)\n        actions = np.array(action_collector).transpose(1, 0, 2)\n        action_log_probs = np.array(action_log_prob_collector).transpose(1, 0, 2)\n        rnn_states = np.array(rnn_state_collector).transpose(1, 0, 2, 3)\n        rnn_states_critic = np.array(rnn_state_critic_collector).transpose(1, 0, 2, 3)\n\n        return values, actions, action_log_probs, rnn_states, rnn_states_critic\n\n    def insert(self, data):\n        obs, share_obs, rewards, dones, infos, \\\n        values, actions, action_log_probs, rnn_states, rnn_states_critic = data\n\n        dones_env = np.all(dones, axis=1)\n\n        rnn_states[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)\n        rnn_states_critic[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_critic.shape[2:]), dtype=np.float32)\n\n        masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n        masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)\n\n        active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n        active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32)\n        active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)\n\n        if not self.use_centralized_V:\n            share_obs = obs\n\n        for agent_id in range(self.num_agents):\n            self.buffer[agent_id].insert(share_obs[:, agent_id], obs[:, agent_id], rnn_states[:, agent_id],\n                                         rnn_states_critic[:, agent_id], actions[:, agent_id],\n                                         action_log_probs[:, agent_id],\n                                         values[:, agent_id], rewards[:, agent_id], masks[:, agent_id], None,\n                                         active_masks[:, agent_id], None)\n\n    def log_train(self, train_infos, total_num_steps):\n        print(\"average_step_rewards is {}.\".format(np.mean(self.buffer[0].rewards)))\n        for agent_id in range(self.num_agents):\n            train_infos[agent_id][\"average_step_rewards\"] = np.mean(self.buffer[agent_id].rewards)\n            for k, v in train_infos[agent_id].items():\n                agent_k = \"agent%i/\" % agent_id + k\n                if self.use_wandb:\n                    wandb.log({agent_k: v}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)\n\n    @torch.no_grad()\n    def eval(self, total_num_steps):\n        eval_episode = 0\n        eval_episode_rewards = []\n        one_episode_rewards = []\n        for eval_i in range(self.n_eval_rollout_threads):\n            one_episode_rewards.append([])\n            eval_episode_rewards.append([])\n\n        eval_obs, eval_share_obs, _ = self.eval_envs.reset()\n\n        eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size),\n                                   dtype=np.float32)\n        eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n\n        while True:\n            eval_actions_collector = []\n            eval_rnn_states_collector = []\n            for agent_id in range(self.num_agents):\n                self.trainer[agent_id].prep_rollout()\n                eval_actions, temp_rnn_state = \\\n                    self.trainer[agent_id].policy.act(eval_obs[:, agent_id],\n                                                      eval_rnn_states[:, agent_id],\n                                                      eval_masks[:, agent_id],\n                                                      deterministic=True)\n                eval_rnn_states[:, agent_id] = _t2n(temp_rnn_state)\n                eval_actions_collector.append(_t2n(eval_actions))\n\n            eval_actions = np.array(eval_actions_collector).transpose(1, 0, 2)\n\n            # Obser reward and next obs\n            eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, _ = self.eval_envs.step(\n                eval_actions)\n            for eval_i in range(self.n_eval_rollout_threads):\n                one_episode_rewards[eval_i].append(eval_rewards[eval_i])\n\n            eval_dones_env = np.all(eval_dones, axis=1)\n\n            eval_rnn_states[eval_dones_env == True] = np.zeros(\n                ((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)\n\n            eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n            eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1),\n                                                          dtype=np.float32)\n\n            for eval_i in range(self.n_eval_rollout_threads):\n                if eval_dones_env[eval_i]:\n                    eval_episode += 1\n                    eval_episode_rewards[eval_i].append(np.sum(one_episode_rewards[eval_i], axis=0))\n                    one_episode_rewards[eval_i] = []\n\n            if eval_episode >= self.all_args.eval_episodes:\n                eval_episode_rewards = np.concatenate(eval_episode_rewards)\n                eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards,\n                                  'eval_max_episode_rewards': [np.max(eval_episode_rewards)]}\n                self.log_env(eval_env_infos, total_num_steps)\n                print(\"eval_average_episode_rewards is {}.\".format(np.mean(eval_episode_rewards)))\n                break\n"
  },
  {
    "path": "MACPO/macpo/runner/separated/mujoco_runner_macpo.py",
    "content": "import time\nfrom itertools import chain\n\nimport wandb\nimport numpy as np\nfrom functools import reduce\nimport torch\nfrom macpo.runner.separated.base_runner_macpo import Runner\n\n\ndef _t2n(x):\n    return x.detach().cpu().numpy()\n\n\nclass MujocoRunner(Runner):\n    \"\"\"Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details.\"\"\"\n\n    def __init__(self, config):\n        super(MujocoRunner, self).__init__(config)\n        self.retrun_average_cost = 0\n\n    def run(self):\n        self.warmup()\n\n        start = time.time()\n        episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads\n\n        train_episode_rewards = [0 for _ in range(self.n_rollout_threads)]\n        train_episode_costs = [0 for _ in range(self.n_rollout_threads)]\n\n        for episode in range(episodes):\n            if self.use_linear_lr_decay:\n                self.trainer.policy.lr_decay(episode, episodes)\n\n            done_episodes_rewards = []\n            done_episodes_costs = []\n\n            for step in range(self.episode_length):\n                # Sample actions\n                values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, \\\n                rnn_states_cost = self.collect(step)\n\n                # Obser reward cost and next obs\n                obs, share_obs, rewards, costs, dones, infos, _ = self.envs.step(actions)\n\n                dones_env = np.all(dones, axis=1)\n                reward_env = np.mean(rewards, axis=1).flatten()\n                cost_env = np.mean(costs, axis=1).flatten()\n                train_episode_rewards += reward_env\n                train_episode_costs += cost_env\n\n                # print(\"reward_env--mujoco_runner_mappo_lagr\", reward_env)\n                # print(\"cost_env--mujoco_runner_mappo_lagr\", cost_env)\n                for t in range(self.n_rollout_threads):\n                    # print(\"dones_env--mujoco_runner_mappo_lagr\", dones_env)\n                    if dones_env[t]:\n                        done_episodes_rewards.append(train_episode_rewards[t])\n                        train_episode_rewards[t] = 0\n                        done_episodes_costs.append(train_episode_costs[t])\n                        train_episode_costs[t] = 0\n                        # print(\"done_episodes_rewards--mujoco_runner_mappo_lagr\", done_episodes_rewards)\n                        # print(\"done_episodes_costs--mujoco_runner_mappo_lagr\", done_episodes_costs)\n                done_episodes_costs_aver = np.mean(train_episode_costs)\n                # print(\"train_episode_costs_aver\",train_episode_costs_aver)\n                data = obs, share_obs, rewards, costs, dones, infos, \\\n                       values, actions, action_log_probs, \\\n                       rnn_states, rnn_states_critic,  cost_preds, rnn_states_cost, done_episodes_costs_aver  # fixme: it's important!!!\n\n                # insert data into buffer\n\n                self.insert(data)\n\n            # compute return and update network\n            self.compute()\n            train_infos = self.train()\n\n            # post process\n            total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads\n            # save model\n            if (episode % self.save_interval == 0 or episode == episodes - 1):\n                self.save()\n\n            # log information\n            if episode % self.log_interval == 0:\n                end = time.time()\n                print(\"\\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\\n\"\n                      .format(self.all_args.scenario,\n                              self.algorithm_name,\n                              self.experiment_name,\n                              episode,\n                              episodes,\n                              total_num_steps,\n                              self.num_env_steps,\n                              int(total_num_steps / (end - start))))\n\n                self.log_train(train_infos, total_num_steps)\n\n                if len(done_episodes_rewards) > 0:\n                    aver_episode_rewards = np.mean(done_episodes_rewards)\n                    aver_episode_costs = np.mean(done_episodes_costs)\n                    # self.retrun_average_cost = aver_episode_costs\n                    self.return_aver_cost(aver_episode_costs)\n                    # self.insert(data, aver_episode_costs=aver_episode_costs)\n                    # print(\"+++++++=aver_episode_costs++++++++=\", aver_episode_costs)\n                    # print(\"+++++++=data++++++++=\", data)\n                    print(\"some episodes done, average rewards: {}, average costs: {}\".format(aver_episode_rewards,\n                                                                                              aver_episode_costs))\n                    self.writter.add_scalars(\"train_episode_rewards\", {\"aver_rewards\": aver_episode_rewards},\n                                             total_num_steps)\n                    self.writter.add_scalars(\"train_episode_costs\", {\"aver_costs\": aver_episode_costs},\n                                             total_num_steps)\n\n\n\n            # eval\n            if episode % self.eval_interval == 0 and self.use_eval:\n                self.eval(total_num_steps)\n    def return_aver_cost(self, aver_episode_costs):\n        for agent_id in range(self.num_agents):\n            self.buffer[agent_id].return_aver_insert(aver_episode_costs)\n\n\n        # pass\n\n    def warmup(self):\n        # reset env\n        obs, share_obs, _ = self.envs.reset()\n        # replay buffer\n        if not self.use_centralized_V:\n            share_obs = obs\n\n        for agent_id in range(self.num_agents):\n            # print(share_obs[:, agent_id])\n            self.buffer[agent_id].share_obs[0] = share_obs[:, agent_id].copy()\n            self.buffer[agent_id].obs[0] = obs[:, agent_id].copy()\n\n    @torch.no_grad()\n    def collect(self, step):\n        # values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, \\\n        # rnn_states_cost = self.collect(step)\n\n        value_collector = []\n        action_collector = []\n        action_log_prob_collector = []\n        rnn_state_collector = []\n        rnn_state_critic_collector = []\n        cost_preds_collector = []\n        rnn_states_cost_collector = []\n\n        for agent_id in range(self.num_agents):\n            self.trainer[agent_id].prep_rollout()\n            value, action, action_log_prob, rnn_state, rnn_state_critic, cost_pred, rnn_state_cost \\\n                = self.trainer[agent_id].policy.get_actions(self.buffer[agent_id].share_obs[step],\n                                                            self.buffer[agent_id].obs[step],\n                                                            self.buffer[agent_id].rnn_states[step],\n                                                            self.buffer[agent_id].rnn_states_critic[step],\n                                                            self.buffer[agent_id].masks[step],\n                                                            rnn_states_cost=self.buffer[agent_id].rnn_states_cost[step]\n                                                            )\n            value_collector.append(_t2n(value))\n            action_collector.append(_t2n(action))\n            action_log_prob_collector.append(_t2n(action_log_prob))\n            rnn_state_collector.append(_t2n(rnn_state))\n            rnn_state_critic_collector.append(_t2n(rnn_state_critic))\n            cost_preds_collector.append(_t2n(cost_pred))\n            rnn_states_cost_collector.append(_t2n(rnn_state_cost))\n        # [self.envs, agents, dim]\n        values = np.array(value_collector).transpose(1, 0, 2)\n        actions = np.array(action_collector).transpose(1, 0, 2)\n        action_log_probs = np.array(action_log_prob_collector).transpose(1, 0, 2)\n        rnn_states = np.array(rnn_state_collector).transpose(1, 0, 2, 3)\n        rnn_states_critic = np.array(rnn_state_critic_collector).transpose(1, 0, 2, 3)\n        cost_preds = np.array(cost_preds_collector).transpose(1, 0, 2)\n        rnn_states_cost = np.array(rnn_states_cost_collector).transpose(1, 0, 2, 3)\n\n        return values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, rnn_states_cost\n\n    def insert(self, data, aver_episode_costs = 0):\n        aver_episode_costs = aver_episode_costs\n        # print(\"self.insert(data, aver_episode_costs)\", aver_episode_costs)\n        obs, share_obs, rewards, costs, dones, infos, \\\n        values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, rnn_states_cost, done_episodes_costs_aver = data # fixme:!!!\n        # print(\"insert--rewards\", rewards)\n        dones_env = np.all(dones, axis=1)\n\n        rnn_states[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)\n        rnn_states_critic[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_critic.shape[2:]), dtype=np.float32)\n\n        rnn_states_cost[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_cost.shape[2:]), dtype=np.float32)\n\n        masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n        masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)\n\n        active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n        active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32)\n        active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)\n\n        if not self.use_centralized_V:\n            share_obs = obs\n\n        for agent_id in range(self.num_agents):\n            self.buffer[agent_id].insert(share_obs[:, agent_id], obs[:, agent_id], rnn_states[:, agent_id],\n                                         rnn_states_critic[:, agent_id], actions[:, agent_id],\n                                         action_log_probs[:, agent_id],\n                                         values[:, agent_id], rewards[:, agent_id], masks[:, agent_id], None,\n                                         active_masks[:, agent_id],  None, costs=costs[:, agent_id],\n                                         cost_preds=cost_preds[:, agent_id],\n                                         rnn_states_cost=rnn_states_cost[:, agent_id], done_episodes_costs_aver=done_episodes_costs_aver, aver_episode_costs=aver_episode_costs)\n\n    def log_train(self, train_infos, total_num_steps):\n        print(\"average_step_rewards is {}.\".format(np.mean(self.buffer[0].rewards)))\n        train_infos[0][0][\"average_step_rewards\"] = 0\n        for agent_id in range(self.num_agents):\n            train_infos[0][agent_id][\"average_step_rewards\"]= np.mean(self.buffer[agent_id].rewards)\n            for k, v in train_infos[0][agent_id].items():\n                agent_k = \"agent%i/\" % agent_id + k\n                if self.use_wandb:\n                    wandb.log({agent_k: v}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)\n\n    @torch.no_grad()\n    def eval(self, total_num_steps):\n        eval_episode = 0\n        eval_episode_rewards = []\n        one_episode_rewards = []\n        eval_episode_costs = []\n        one_episode_costs = []\n\n        for eval_i in range(self.n_eval_rollout_threads):\n            one_episode_rewards.append([])\n            eval_episode_rewards.append([])\n\n            one_episode_costs.append([])\n            eval_episode_costs.append([])\n\n        eval_obs, eval_share_obs, _ = self.eval_envs.reset()\n\n        eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size),\n                                   dtype=np.float32)\n        eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n\n        while True:\n            eval_actions_collector = []\n            eval_rnn_states_collector = []\n            for agent_id in range(self.num_agents):\n                self.trainer[agent_id].prep_rollout()\n                eval_actions, temp_rnn_state = \\\n                    self.trainer[agent_id].policy.act(eval_obs[:, agent_id],\n                                                      eval_rnn_states[:, agent_id],\n                                                      eval_masks[:, agent_id],\n                                                      deterministic=True)\n                eval_rnn_states[:, agent_id] = _t2n(temp_rnn_state)\n                eval_actions_collector.append(_t2n(eval_actions))\n\n            eval_actions = np.array(eval_actions_collector).transpose(1, 0, 2)\n\n            # Obser reward and next obs\n            eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, _ = self.eval_envs.step(\n                eval_actions)\n            for eval_i in range(self.n_eval_rollout_threads):\n                one_episode_rewards[eval_i].append(eval_rewards[eval_i])\n                one_episode_costs[eval_i].append(eval_costs[eval_i])\n\n            eval_dones_env = np.all(eval_dones, axis=1)\n\n            eval_rnn_states[eval_dones_env == True] = np.zeros(\n                ((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)\n\n            eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n            eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1),\n                                                          dtype=np.float32)\n\n            for eval_i in range(self.n_eval_rollout_threads):\n                if eval_dones_env[eval_i]:\n                    eval_episode += 1\n                    eval_episode_rewards[eval_i].append(np.sum(one_episode_rewards[eval_i], axis=0))\n                    one_episode_rewards[eval_i] = []\n\n            if eval_episode >= self.all_args.eval_episodes:\n                eval_episode_rewards = np.concatenate(eval_episode_rewards)\n                eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards,\n                                  'eval_max_episode_rewards': [np.max(eval_episode_rewards)]}\n                self.log_env(eval_env_infos, total_num_steps)\n                print(\"eval_average_episode_rewards is {}.\".format(np.mean(eval_episode_rewards)))\n                break\n"
  },
  {
    "path": "MACPO/macpo/scripts/__init__.py",
    "content": ""
  },
  {
    "path": "MACPO/macpo/scripts/train/__init__.py",
    "content": ""
  },
  {
    "path": "MACPO/macpo/scripts/train/train_mujoco.py",
    "content": "#!/usr/bin/env python\nimport sys\nimport os\ncurPath = os.path.abspath(__file__)\n\nif len(curPath.split('/'))==1:\n    rootPath = '\\\\'.join(curPath.split('\\\\')[:-3])\nelse:\n    rootPath = '/'.join(curPath.split('/')[:-3])\nsys.path.append(os.path.split(rootPath)[0])\n\nimport wandb\nimport socket\nimport setproctitle\nimport numpy as np\nfrom pathlib import Path\nimport torch\nfrom macpo.config import get_config\nfrom macpo.envs.safety_ma_mujoco.safety_multiagent_mujoco import MujocoMulti\nfrom macpo.envs.env_wrappers import ShareSubprocVecEnv, ShareDummyVecEnv\n\n\ndef make_train_env(all_args):\n    def get_env_fn(rank):\n        def init_env():\n            if all_args.env_name == \"mujoco\":\n                env_args = {\"scenario\": all_args.scenario,\n                            \"agent_conf\": all_args.agent_conf,\n                            \"agent_obsk\": all_args.agent_obsk,\n                            \"episode_limit\": 1000}\n                env = MujocoMulti(env_args=env_args)\n            else:\n                print(\"Can not support the \" + all_args.env_name + \"environment.\")\n                raise NotImplementedError\n            env.seed(all_args.seed + rank * 1000)\n            return env\n\n        return init_env\n\n    if all_args.n_rollout_threads == 1:\n        return ShareDummyVecEnv([get_env_fn(0)])\n    else:\n        return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_rollout_threads)])\n\n\ndef make_eval_env(all_args):\n    def get_env_fn(rank):\n        def init_env():\n            if all_args.env_name == \"mujoco\":\n                env_args = {\"scenario\": all_args.scenario,\n                            \"agent_conf\": all_args.agent_conf,\n                            \"agent_obsk\": all_args.agent_obsk,\n                            \"episode_limit\": 1000}\n                env = MujocoMulti(env_args=env_args)\n            else:\n                print(\"Can not support the \" + all_args.env_name + \"environment.\")\n                raise NotImplementedError\n            env.seed(all_args.seed * 50000 + rank * 10000)\n            return env\n\n        return init_env\n\n    if all_args.n_eval_rollout_threads == 1:\n        return ShareDummyVecEnv([get_env_fn(0)])\n    else:\n        return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])\n\n\ndef parse_args(args, parser):\n    parser.add_argument('--scenario', type=str, default='Hopper-v2', help=\"Which mujoco task to run on\")\n    parser.add_argument('--agent_conf', type=str, default='3x1')\n    parser.add_argument('--agent_obsk', type=int, default=0)\n    parser.add_argument(\"--add_move_state\", action='store_true', default=False)\n    parser.add_argument(\"--add_local_obs\", action='store_true', default=False)\n    parser.add_argument(\"--add_distance_state\", action='store_true', default=False)\n    parser.add_argument(\"--add_enemy_action_state\", action='store_true', default=False)\n    parser.add_argument(\"--add_agent_id\", action='store_true', default=False)\n    parser.add_argument(\"--add_visible_state\", action='store_true', default=False)\n    parser.add_argument(\"--add_xy_state\", action='store_true', default=False)\n\n    # agent-specific state should be designed carefully\n    parser.add_argument(\"--use_state_agent\", action='store_true', default=False)\n    parser.add_argument(\"--use_mustalive\", action='store_false', default=True)\n    parser.add_argument(\"--add_center_xy\", action='store_true', default=False)\n    parser.add_argument(\"--use_single_network\", action='store_true', default=False)\n\n    all_args = parser.parse_known_args(args)[0]\n\n    return all_args\n\n\ndef main(args):\n    parser = get_config()\n    all_args = parse_args(args, parser)\n    print(\"mumu config: \", all_args)\n\n    if all_args.algorithm_name == \"macpo\":\n        all_args.share_policy=False\n    else:\n        raise NotImplementedError\n\n    # cuda\n    # all_args.cuda = True\n    if all_args.cuda and torch.cuda.is_available():\n        print(\"choose to use gpu...\")\n        device = torch.device(\"cuda:0\")\n        torch.set_num_threads(all_args.n_training_threads)\n        if all_args.cuda_deterministic:\n            torch.backends.cudnn.benchmark = False\n            torch.backends.cudnn.deterministic = True\n    else:\n        print(\"cuda flag: \", all_args.cuda, \"Torch: \", torch.cuda.is_available())\n        print(\"choose to use cpu...\")\n        device = torch.device(\"cpu\")\n        torch.set_num_threads(all_args.n_training_threads)\n\n    run_dir = Path(os.path.split(os.path.dirname(os.path.abspath(__file__)))[\n                       0] + \"/results\") / all_args.env_name / all_args.scenario / all_args.algorithm_name / all_args.experiment_name\n    if not run_dir.exists():\n        os.makedirs(str(run_dir))\n\n    if all_args.use_wandb:\n        run = wandb.init(config=all_args,\n                         project=all_args.env_name,\n                         entity=all_args.user_name,\n                         notes=socket.gethostname(),\n                         name=str(all_args.algorithm_name) + \"_\" +\n                              str(all_args.experiment_name) +\n                              \"_seed\" + str(all_args.seed),\n                         group=all_args.map_name,\n                         dir=str(run_dir),\n                         job_type=\"training\",\n                         reinit=True)\n    else:\n        if not run_dir.exists():\n            curr_run = 'run1'\n        else:\n            exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if\n                             str(folder.name).startswith('run')]\n            if len(exst_run_nums) == 0:\n                curr_run = 'run1'\n            else:\n                curr_run = 'run%i' % (max(exst_run_nums) + 1)\n        run_dir = run_dir / curr_run\n        if not run_dir.exists():\n            os.makedirs(str(run_dir))\n\n    setproctitle.setproctitle(\n        str(all_args.algorithm_name) + \"-\" + str(all_args.env_name) + \"-\" + str(all_args.experiment_name) + \"@\" + str(\n            all_args.user_name))\n\n    # seed\n    torch.manual_seed(all_args.seed)\n    torch.cuda.manual_seed_all(all_args.seed)\n    np.random.seed(all_args.seed)\n\n    # env\n    envs = make_train_env(all_args)\n    eval_envs = make_eval_env(all_args) if all_args.use_eval else None\n    num_agents = envs.n_agents\n\n    config = {\n        \"all_args\": all_args,\n        \"envs\": envs,\n        \"eval_envs\": eval_envs,\n        \"num_agents\": num_agents,\n        \"device\": device,\n        \"run_dir\": run_dir\n    }\n\n    # run experiments\n    if all_args.share_policy:\n        from macpo.runner.shared.mujoco_runner import MujocoRunner as Runner\n    else:\n        #in origin code not implement this method\n        if all_args.algorithm_name == \"macpo\":\n            from macpo.runner.separated.mujoco_runner_macpo import MujocoRunner as Runner\n        else:\n            from macpo.runner.separated.mujoco_runner import MujocoRunner as Runner\n\n    runner = Runner(config)\n    runner.run()\n\n    # post process\n    envs.close()\n    if all_args.use_eval and eval_envs is not envs:\n        eval_envs.close()\n\n    if all_args.use_wandb:\n        run.finish()\n    else:\n        runner.writter.export_scalars_to_json(str(runner.log_dir + '/summary.json'))\n        runner.writter.close()\n\n\nif __name__ == \"__main__\":\n    main(sys.argv[1:])\n"
  },
  {
    "path": "MACPO/macpo/scripts/train_mujoco.sh",
    "content": "#!/bin/sh\nenv=\"mujoco\"\nscenario=\"Ant-v2\"\nagent_conf=\"2x4\"\nagent_obsk=1\nalgo=\"macpo\"\nexp=\"rnn\"\nseed_max=1\n\necho \"env is ${env}, scenario is ${scenario}, algo is ${algo}, exp is ${exp}, max seed is ${seed_max}\"\nfor seed in `seq ${seed_max}`;\ndo\n    echo \"seed is ${seed}:\"\n    CUDA_VISIBLE_DEVICES=0 python train/train_mujoco.py  --env_name ${env} --algorithm_name ${algo} --experiment_name ${exp} --scenario ${scenario} --agent_conf ${agent_conf} --agent_obsk ${agent_obsk} --lr 9e-5 --critic_lr 5e-3 --std_x_coef 1 --std_y_coef 5e-1 --seed 50 --n_training_threads 4 --n_rollout_threads 16 --num_mini_batch 40 --episode_length 1000 --num_env_steps 10000000 --ppo_epoch 1 --use_value_active_masks  --add_center_xy --use_state_agent --kl_threshold 0.0065 --safety_bound 10 --safety_gamma 0.09 --line_search_fraction 0.5 --fraction_coef 0.27\ndone\n"
  },
  {
    "path": "MACPO/macpo/utils/__init__.py",
    "content": ""
  },
  {
    "path": "MACPO/macpo/utils/multi_discrete.py",
    "content": "import gym\nimport numpy as np\n\n# An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)\n# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)\nclass MultiDiscrete(gym.Space):\n    \"\"\"\n    - The multi-discrete action space consists of a series of discrete action spaces with different parameters\n    - It can be adapted to both a Discrete action space or a continuous (Box) action space\n    - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space\n    - It is parametrized by passing an array of arrays containing [min, max] for each discrete action space where the discrete action space can take any integers from `min` to `max` (both inclusive)\n    Note: A value of 0 always need to represent the NOOP action.\n    e.g. Nintendo Game Controller\n    - Can be conceptualized as 3 discrete action spaces:\n        1) Arrow Keys: Discrete 5  - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4]  - params: min: 0, max: 4\n        2) Button A:   Discrete 2  - NOOP[0], Pressed[1] - params: min: 0, max: 1\n        3) Button B:   Discrete 2  - NOOP[0], Pressed[1] - params: min: 0, max: 1\n    - Can be initialized as\n        MultiDiscrete([ [0,4], [0,1], [0,1] ])\n    \"\"\"\n\n    def __init__(self, array_of_param_array):\n        self.low = np.array([x[0] for x in array_of_param_array])\n        self.high = np.array([x[1] for x in array_of_param_array])\n        self.num_discrete_space = self.low.shape[0]\n        self.n = np.sum(self.high) + 2\n\n    def sample(self):\n        \"\"\" Returns a array with one sample from each discrete action space \"\"\"\n        # For each row: round(random .* (max - min) + min, 0)\n        random_array = np.random.rand(self.num_discrete_space)\n        return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]\n\n    def contains(self, x):\n        return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()\n\n    @property\n    def shape(self):\n        return self.num_discrete_space\n\n    def __repr__(self):\n        return \"MultiDiscrete\" + str(self.num_discrete_space)\n\n    def __eq__(self, other):\n        return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)\n"
  },
  {
    "path": "MACPO/macpo/utils/popart.py",
    "content": "\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\n\nclass PopArt(nn.Module):\n    \"\"\" Normalize a vector of observations - across the first norm_axes dimensions\"\"\"\n\n    def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device(\"cpu\")):\n        super(PopArt, self).__init__()\n\n        self.input_shape = input_shape\n        self.norm_axes = norm_axes\n        self.epsilon = epsilon\n        self.beta = beta\n        self.per_element_update = per_element_update\n        self.tpdv = dict(dtype=torch.float32, device=device)\n\n        self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)\n        self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)\n        self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)\n\n    def reset_parameters(self):\n        self.running_mean.zero_()\n        self.running_mean_sq.zero_()\n        self.debiasing_term.zero_()\n\n    def running_mean_var(self):\n        debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)\n        debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)\n        debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)\n        return debiased_mean, debiased_var\n\n    def forward(self, input_vector, train=True):\n        # Make sure input is float32\n        if type(input_vector) == np.ndarray:\n            input_vector = torch.from_numpy(input_vector)\n        input_vector = input_vector.to(**self.tpdv)\n\n        if train:\n            # Detach input before adding it to running means to avoid backpropping through it on\n            # subsequent batches.\n            detached_input = input_vector.detach()\n            batch_mean = detached_input.mean(dim=tuple(range(self.norm_axes)))\n            batch_sq_mean = (detached_input ** 2).mean(dim=tuple(range(self.norm_axes)))\n\n            if self.per_element_update:\n                batch_size = np.prod(detached_input.size()[:self.norm_axes])\n                weight = self.beta ** batch_size\n            else:\n                weight = self.beta\n\n            self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))\n            self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))\n            self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))\n\n        mean, var = self.running_mean_var()\n        out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]\n        \n        return out\n\n    def denormalize(self, input_vector):\n        \"\"\" Transform normalized data back into original distribution \"\"\"\n        if type(input_vector) == np.ndarray:\n            input_vector = torch.from_numpy(input_vector)\n        input_vector = input_vector.to(**self.tpdv)\n\n        mean, var = self.running_mean_var()\n        out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]\n        \n        out = out.cpu().numpy()\n        \n        return out\n"
  },
  {
    "path": "MACPO/macpo/utils/separated_buffer.py",
    "content": "import torch\nimport numpy as np\nfrom collections import defaultdict\n\nfrom macpo.utils.util import check, get_shape_from_obs_space, get_shape_from_act_space\n\n\ndef _flatten(T, N, x):\n    return x.reshape(T * N, *x.shape[2:])\n\n\ndef _cast(x):\n    return x.transpose(1,0,2).reshape(-1, *x.shape[2:])\n\n\nclass SeparatedReplayBuffer(object):\n    def __init__(self, args, obs_space, share_obs_space, act_space):\n        self.episode_length = args.episode_length\n        self.n_rollout_threads = args.n_rollout_threads\n        self.rnn_hidden_size = args.hidden_size\n        self.recurrent_N = args.recurrent_N\n        self.gamma = args.gamma\n        self.gae_lambda = args.gae_lambda\n        self._use_gae = args.use_gae\n        self._use_popart = args.use_popart\n        self._use_valuenorm = args.use_valuenorm\n        self._use_proper_time_limits = args.use_proper_time_limits\n        self.algo = args.algorithm_name\n\n        obs_shape = get_shape_from_obs_space(obs_space)\n        share_obs_shape = get_shape_from_obs_space(share_obs_space)\n\n        if type(obs_shape[-1]) == list:\n            obs_shape = obs_shape[:1]\n\n        if type(share_obs_shape[-1]) == list:\n            share_obs_shape = share_obs_shape[:1]\n\n        self.aver_episode_costs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape), dtype=np.float32)\n\n        self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *share_obs_shape), dtype=np.float32)\n        self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape), dtype=np.float32)\n\n        self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, self.recurrent_N, self.rnn_hidden_size), dtype=np.float32)\n        self.rnn_states_critic = np.zeros_like(self.rnn_states)\n        self.rnn_states_cost = np.zeros_like(self.rnn_states)\n\n        self.value_preds = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)\n        self.returns = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)\n        \n        if act_space.__class__.__name__ == 'Discrete':\n            self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, act_space.n), dtype=np.float32)\n        else:\n            self.available_actions = None\n\n        act_shape = get_shape_from_act_space(act_space)\n\n        self.actions = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32)\n        self.action_log_probs = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32)\n        self.rewards = np.zeros((self.episode_length, self.n_rollout_threads, 1), dtype=np.float32)\n\n        self.costs = np.zeros_like(self.rewards)\n        self.cost_preds = np.zeros_like(self.value_preds)\n        self.cost_returns = np.zeros_like(self.returns)\n        \n        self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)\n        self.bad_masks = np.ones_like(self.masks)\n        self.active_masks = np.ones_like(self.masks)\n\n        self.factor = None\n\n        self.step = 0\n\n    def update_factor(self, factor):\n        self.factor = factor.copy()\n\n    def return_aver_insert(self, aver_episode_costs):\n        self.aver_episode_costs = aver_episode_costs.copy()\n\n    def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,\n               value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None, costs=None,\n               cost_preds=None, rnn_states_cost=None, done_episodes_costs_aver=None, aver_episode_costs = 0):\n        # print(\"separated_buffer--aver_episode_costs：\", aver_episode_costs)\n        self.share_obs[self.step + 1] = share_obs.copy()\n        self.obs[self.step + 1] = obs.copy()\n        self.rnn_states[self.step + 1] = rnn_states.copy()\n        self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()\n        self.actions[self.step] = actions.copy()\n        self.action_log_probs[self.step] = action_log_probs.copy()\n        self.value_preds[self.step] = value_preds.copy()\n        self.rewards[self.step] = rewards.copy()\n        self.masks[self.step + 1] = masks.copy()\n        if bad_masks is not None:\n            self.bad_masks[self.step + 1] = bad_masks.copy()\n        if active_masks is not None:\n            self.active_masks[self.step + 1] = active_masks.copy()\n        if available_actions is not None:\n            self.available_actions[self.step + 1] = available_actions.copy()\n        if costs is not None:\n            self.costs[self.step] = costs.copy()\n        if cost_preds is not None:\n            self.cost_preds[self.step] = cost_preds.copy()\n        if rnn_states_cost is not None:\n            self.rnn_states_cost[self.step + 1] = rnn_states_cost.copy()\n        # if train_episode_costs_aver is not None:\n        #     self.train_episode_costs_aver[self.step + 1] = train_episode_costs_aver.copy()\n\n        self.step = (self.step + 1) % self.episode_length\n\n    def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,\n                     value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):\n        self.share_obs[self.step] = share_obs.copy()\n        self.obs[self.step] = obs.copy()\n        self.rnn_states[self.step + 1] = rnn_states.copy()\n        self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()\n        self.actions[self.step] = actions.copy()\n        self.action_log_probs[self.step] = action_log_probs.copy()\n        self.value_preds[self.step] = value_preds.copy()\n        self.rewards[self.step] = rewards.copy()\n        self.masks[self.step + 1] = masks.copy()\n        if bad_masks is not None:\n            self.bad_masks[self.step + 1] = bad_masks.copy()\n        if active_masks is not None:\n            self.active_masks[self.step] = active_masks.copy()\n        if available_actions is not None:\n            self.available_actions[self.step] = available_actions.copy()\n\n        self.step = (self.step + 1) % self.episode_length\n    \n    def after_update(self):\n        self.share_obs[0] = self.share_obs[-1].copy()\n        self.obs[0] = self.obs[-1].copy()\n        self.rnn_states[0] = self.rnn_states[-1].copy()\n        self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()\n        self.rnn_states_cost[0] = self.rnn_states_cost[-1].copy()\n        self.masks[0] = self.masks[-1].copy()\n        self.bad_masks[0] = self.bad_masks[-1].copy()\n        self.active_masks[0] = self.active_masks[-1].copy()\n        if self.available_actions is not None:\n            self.available_actions[0] = self.available_actions[-1].copy()\n\n    def chooseafter_update(self):\n        self.rnn_states[0] = self.rnn_states[-1].copy()\n        self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()\n        self.masks[0] = self.masks[-1].copy()\n        self.bad_masks[0] = self.bad_masks[-1].copy()\n\n    def compute_returns(self, next_value, value_normalizer=None):\n        \"\"\"\n        use proper time limits, the difference of use or not is whether use bad_mask\n        \"\"\"\n        if self._use_proper_time_limits:\n            if self._use_gae:\n                self.value_preds[-1] = next_value\n                gae = 0\n                for step in reversed(range(self.rewards.shape[0])):\n                    if self._use_popart or self._use_valuenorm:\n                        delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[\n                            step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step])\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        gae = gae * self.bad_masks[step + 1]\n                        self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])\n                    else:\n                        delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        gae = gae * self.bad_masks[step + 1]\n                        self.returns[step] = gae + self.value_preds[step]\n            else:\n                self.returns[-1] = next_value\n                for step in reversed(range(self.rewards.shape[0])):\n                    if self._use_popart:\n                        self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \\\n                            + (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step])\n                    else:\n                        self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \\\n                            + (1 - self.bad_masks[step + 1]) * self.value_preds[step]\n        else:\n            if self._use_gae:\n                self.value_preds[-1] = next_value\n                gae = 0\n                for step in reversed(range(self.rewards.shape[0])):\n                    if self._use_popart or self._use_valuenorm:\n                        delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step])\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])\n                    else:\n                        delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        self.returns[step] = gae + self.value_preds[step]\n            else:\n                self.returns[-1] = next_value\n                for step in reversed(range(self.rewards.shape[0])):\n                    self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]\n\n    def compute_cost_returns(self, next_cost, value_normalizer=None):\n\n        if self._use_proper_time_limits:\n            if self._use_gae:\n                self.cost_preds[-1] = next_cost\n                gae = 0\n                for step in reversed(range(self.costs.shape[0])):\n                    if self._use_popart or self._use_valuenorm:\n                        delta = self.costs[step] + self.gamma * value_normalizer.denormalize(self.cost_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.cost_preds[step])\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        gae = gae * self.bad_masks[step + 1]\n                        self.cost_returns[step] = gae + value_normalizer.denormalize(self.cost_preds[step])\n                    else:\n                        delta = self.costs[step] + self.gamma * self.cost_preds[step + 1] * self.masks[step + 1] - self.cost_preds[step]\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        gae = gae * self.bad_masks[step + 1]\n                        self.cost_returns[step] = gae + self.cost_preds[step]\n            else:\n                self.cost_returns[-1] = next_cost\n                for step in reversed(range(self.costs.shape[0])):\n                    if self._use_popart:\n                        self.cost_returns[step] = (self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]) * self.bad_masks[step + 1] \\\n                                             + (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.cost_preds[step])\n                    else:\n                        self.cost_returns[step] = (self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]) * self.bad_masks[step + 1] \\\n                                             + (1 - self.bad_masks[step + 1]) * self.cost_preds[step]\n        else:\n            if self._use_gae:\n                self.cost_preds[-1] = next_cost\n                gae = 0\n                for step in reversed(range(self.costs.shape[0])):\n                    if self._use_popart or self._use_valuenorm:\n                        delta = self.costs[step] + self.gamma * value_normalizer.denormalize(self.cost_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.cost_preds[step])\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        self.cost_returns[step] = gae + value_normalizer.denormalize(self.cost_preds[step])\n                    else:\n                        delta = self.costs[step] + self.gamma * self.cost_preds[step + 1] * self.masks[step + 1] - self.cost_preds[step]\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        self.cost_returns[step] = gae + self.cost_preds[step]\n            else:\n                self.cost_returns[-1] = next_cost\n                for step in reversed(range(self.costs.shape[0])):\n                    self.cost_returns[step] = self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]\n\n    def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None, cost_adv=None):\n        episode_length, n_rollout_threads = self.rewards.shape[0:2]\n        batch_size = n_rollout_threads * episode_length\n\n        if mini_batch_size is None:\n            assert batch_size >= num_mini_batch, (\n                \"PPO requires the number of processes ({}) \"\n                \"* number of steps ({}) = {} \"\n                \"to be greater than or equal to the number of PPO mini batches ({}).\"\n                \"\".format(n_rollout_threads, episode_length, n_rollout_threads * episode_length,\n                          num_mini_batch))\n            mini_batch_size = batch_size // num_mini_batch\n\n        rand = torch.randperm(batch_size).numpy()\n        sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]\n\n        share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[2:])\n        obs = self.obs[:-1].reshape(-1, *self.obs.shape[2:])\n        rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[2:])\n        rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[2:])\n        rnn_states_cost = self.rnn_states_cost[:-1].reshape(-1, *self.rnn_states_cost.shape[2:])\n        actions = self.actions.reshape(-1, self.actions.shape[-1])\n        if self.available_actions is not None:\n            available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])\n        value_preds = self.value_preds[:-1].reshape(-1, 1)\n        returns = self.returns[:-1].reshape(-1, 1)\n        cost_preds = self.cost_preds[:-1].reshape(-1, 1)\n        cost_returns = self.cost_returns[:-1].reshape(-1, 1)\n        masks = self.masks[:-1].reshape(-1, 1)\n        active_masks = self.active_masks[:-1].reshape(-1, 1)\n        action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])\n        # print(\"self.aver_episode_costs--separated--buffer\", self.aver_episode_costs.mean())\n        aver_episode_costs = self.aver_episode_costs # self.aver_episode_costs[:-1].reshape(-1, *self.aver_episode_costs.shape[2:])\n        if self.factor is not None:\n            # factor = self.factor.reshape(-1,1)\n            factor = self.factor.reshape(-1, self.factor.shape[-1])\n        advantages = advantages.reshape(-1, 1)\n        if cost_adv is not None:\n            cost_adv = cost_adv.reshape(-1, 1)\n\n        for indices in sampler:\n            # obs size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[index,Dim]\n            share_obs_batch = share_obs[indices]\n            obs_batch = obs[indices]\n            rnn_states_batch = rnn_states[indices]\n            rnn_states_critic_batch = rnn_states_critic[indices]\n            rnn_states_cost_batch = rnn_states_cost[indices]\n            actions_batch = actions[indices]\n            if self.available_actions is not None:\n                available_actions_batch = available_actions[indices]\n            else:\n                available_actions_batch = None\n            value_preds_batch = value_preds[indices]\n            return_batch = returns[indices]\n            cost_preds_batch = cost_preds[indices]\n            cost_return_batch = cost_returns[indices]\n            masks_batch = masks[indices]\n            active_masks_batch = active_masks[indices]\n            old_action_log_probs_batch = action_log_probs[indices]\n            if advantages is None:\n                adv_targ = None\n            else:\n                adv_targ = advantages[indices]\n            if cost_adv is None:\n                cost_adv_targ = None\n            else:\n                cost_adv_targ = cost_adv[indices]\n\n            if self.factor is None:\n                yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch\n            else:\n                if self.algo == \"macppo\":\n                    factor_batch = factor[indices]\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ\n                elif self.algo == \"mappo_lagr\":\n                    factor_batch = factor[indices]\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ\n                elif self.algo == \"macpo\":\n                    factor_batch = factor[indices]\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ, aver_episode_costs\n                else:\n                    factor_batch = factor[indices]\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch\n\n    def naive_recurrent_generator(self, advantages, num_mini_batch, cost_adv=None):\n        n_rollout_threads = self.rewards.shape[1]\n        assert n_rollout_threads >= num_mini_batch, (\n            \"PPO requires the number of processes ({}) \"\n            \"to be greater than or equal to the number of \"\n            \"PPO mini batches ({}).\".format(n_rollout_threads, num_mini_batch))\n        num_envs_per_batch = n_rollout_threads // num_mini_batch\n        perm = torch.randperm(n_rollout_threads).numpy()\n        for start_ind in range(0, n_rollout_threads, num_envs_per_batch):\n            share_obs_batch = []\n            obs_batch = []\n            rnn_states_batch = []\n            rnn_states_critic_batch = []\n            rnn_states_cost_batch = []\n            actions_batch = []\n            available_actions_batch = []\n            value_preds_batch = []\n            cost_preds_batch = []\n            return_batch = []\n            cost_return_batch = []\n            masks_batch = []\n            active_masks_batch = []\n            old_action_log_probs_batch = []\n            adv_targ = []\n            cost_adv_targ = []\n            factor_batch = []\n            for offset in range(num_envs_per_batch):\n                ind = perm[start_ind + offset]\n                share_obs_batch.append(self.share_obs[:-1, ind])\n                obs_batch.append(self.obs[:-1, ind])\n                rnn_states_batch.append(self.rnn_states[0:1, ind])\n                rnn_states_critic_batch.append(self.rnn_states_critic[0:1, ind])\n                rnn_states_cost_batch.append(self.rnn_states_cost[0:1, ind])\n                actions_batch.append(self.actions[:, ind])\n                if self.available_actions is not None:\n                    available_actions_batch.append(self.available_actions[:-1, ind])\n                value_preds_batch.append(self.value_preds[:-1, ind])\n                cost_preds_batch.append(self.cost_preds[:-1, ind])\n                return_batch.append(self.returns[:-1, ind])\n                cost_return_batch.append(self.cost_returns[:-1, ind])\n                masks_batch.append(self.masks[:-1, ind])\n                active_masks_batch.append(self.active_masks[:-1, ind])\n                old_action_log_probs_batch.append(self.action_log_probs[:, ind])\n                adv_targ.append(advantages[:, ind])\n                if cost_adv is not None:\n                    cost_adv_targ.append(cost_adv[:, ind])\n                if self.factor is not None:\n                    factor_batch.append(self.factor[:, ind])\n\n            # [N[T, dim]]\n            T, N = self.episode_length, num_envs_per_batch\n            # These are all from_numpys of size (T, N, -1)\n            share_obs_batch = np.stack(share_obs_batch, 1)\n            obs_batch = np.stack(obs_batch, 1)\n            actions_batch = np.stack(actions_batch, 1)\n            if self.available_actions is not None:\n                available_actions_batch = np.stack(available_actions_batch, 1)\n            if self.factor is not None:\n                factor_batch=np.stack(factor_batch,1)\n            value_preds_batch = np.stack(value_preds_batch, 1)\n            cost_preds_batch = np.stack(cost_preds_batch, 1)\n            return_batch = np.stack(return_batch, 1)\n            cost_return_batch = np.stack(cost_return_batch, 1)\n            masks_batch = np.stack(masks_batch, 1)\n            active_masks_batch = np.stack(active_masks_batch, 1)\n            old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)\n            adv_targ = np.stack(adv_targ, 1)\n            if cost_adv is not None:\n                cost_adv_targ = np.stack(cost_adv_targ, 1)\n\n            # States is just a (N, -1) from_numpy [N[1,dim]]\n            rnn_states_batch = np.stack(rnn_states_batch, 1).reshape(N, *self.rnn_states.shape[2:])\n            rnn_states_critic_batch = np.stack(rnn_states_critic_batch, 1).reshape(N, *self.rnn_states_critic.shape[2:])\n            rnn_states_cost_batch = np.stack(rnn_states_cost_batch, 1).reshape(N, *self.rnn_states_cost.shape[2:])\n\n            # Flatten the (T, N, ...) from_numpys to (T * N, ...)\n            share_obs_batch = _flatten(T, N, share_obs_batch)\n            obs_batch = _flatten(T, N, obs_batch)\n            actions_batch = _flatten(T, N, actions_batch)\n            if self.available_actions is not None:\n                available_actions_batch = _flatten(T, N, available_actions_batch)\n            else:\n                available_actions_batch = None\n            if self.factor is not None:\n                factor_batch=_flatten(T,N,factor_batch)\n            value_preds_batch = _flatten(T, N, value_preds_batch)\n            cost_preds_batch = _flatten(T, N, cost_preds_batch)\n            return_batch = _flatten(T, N, return_batch)\n            cost_return_batch = _flatten(T, N, cost_return_batch)\n            masks_batch = _flatten(T, N, masks_batch)\n            active_masks_batch = _flatten(T, N, active_masks_batch)\n            old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)\n            adv_targ = _flatten(T, N, adv_targ)\n            if cost_adv is not None:\n                cost_adv_targ = _flatten(T, N, cost_adv_targ)\n            if self.factor is not None:\n                if self.algo == \"mappo_lagr\":\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ  # 17 value\n                elif self.algo == \"macppo\":\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ  # 17 value\n                elif self.algo == \"macpo\":\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ  # 17 value\n                else:\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch  # value\n            else:\n                yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch\n\n    # def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length, cost_adv=None):\n    #     episode_length, n_rollout_threads = self.rewards.shape[0:2]\n    #     batch_size = n_rollout_threads * episode_length\n    #     data_chunks = batch_size // data_chunk_length  # [C=r*T/L]\n    #     mini_batch_size = data_chunks // num_mini_batch\n    #\n    #     assert episode_length * n_rollout_threads >= data_chunk_length, (\n    #         \"PPO requires the number of processes ({}) * episode length ({}) \"\n    #         \"to be greater than or equal to the number of \"\n    #         \"data chunk length ({}).\".format(n_rollout_threads, episode_length, data_chunk_length))\n    #     assert data_chunks >= 2, (\"need larger batch size\")\n    #\n    #     rand = torch.randperm(data_chunks).numpy()\n    #     sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]\n    #\n    #     if len(self.share_obs.shape) > 3:\n    #         share_obs = self.share_obs[:-1].transpose(1, 0, 2, 3, 4).reshape(-1, *self.share_obs.shape[2:])\n    #         obs = self.obs[:-1].transpose(1, 0, 2, 3, 4).reshape(-1, *self.obs.shape[2:])\n    #     else:\n    #         share_obs = _cast(self.share_obs[:-1])\n    #         obs = _cast(self.obs[:-1])\n    #\n    #     actions = _cast(self.actions)\n    #     action_log_probs = _cast(self.action_log_probs)\n    #     advantages = _cast(advantages)\n    #     value_preds = _cast(self.value_preds[:-1])\n    #     returns = _cast(self.returns[:-1])\n    #     masks = _cast(self.masks[:-1])\n    #     active_masks = _cast(self.active_masks[:-1])\n    #     if self.factor is not None:\n    #         factor = _cast(self.factor)\n    #     # rnn_states = _cast(self.rnn_states[:-1])\n    #     # rnn_states_critic = _cast(self.rnn_states_critic[:-1])\n    #     rnn_states = self.rnn_states[:-1].transpose(1, 0, 2, 3).reshape(-1, *self.rnn_states.shape[2:])\n    #     rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 0, 2, 3).reshape(-1, *self.rnn_states_critic.shape[2:])\n    #\n    #     if self.available_actions is not None:\n    #         available_actions = _cast(self.available_actions[:-1])\n    #\n    #     for indices in sampler:\n    #         share_obs_batch = []\n    #         obs_batch = []\n    #         rnn_states_batch = []\n    #         rnn_states_critic_batch = []\n    #         actions_batch = []\n    #         available_actions_batch = []\n    #         value_preds_batch = []\n    #         return_batch = []\n    #         masks_batch = []\n    #         active_masks_batch = []\n    #         old_action_log_probs_batch = []\n    #         adv_targ = []\n    #         factor_batch = []\n    #         for index in indices:\n    #             ind = index * data_chunk_length\n    #             # size [T+1 N M Dim]-->[T N Dim]-->[N T Dim]-->[T*N,Dim]-->[L,Dim]\n    #             share_obs_batch.append(share_obs[ind:ind+data_chunk_length])\n    #             obs_batch.append(obs[ind:ind+data_chunk_length])\n    #             actions_batch.append(actions[ind:ind+data_chunk_length])\n    #             if self.available_actions is not None:\n    #                 available_actions_batch.append(available_actions[ind:ind+data_chunk_length])\n    #             value_preds_batch.append(value_preds[ind:ind+data_chunk_length])\n    #             return_batch.append(returns[ind:ind+data_chunk_length])\n    #             masks_batch.append(masks[ind:ind+data_chunk_length])\n    #             active_masks_batch.append(active_masks[ind:ind+data_chunk_length])\n    #             old_action_log_probs_batch.append(action_log_probs[ind:ind+data_chunk_length])\n    #             adv_targ.append(advantages[ind:ind+data_chunk_length])\n    #             # size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[1,Dim]\n    #             rnn_states_batch.append(rnn_states[ind])\n    #             rnn_states_critic_batch.append(rnn_states_critic[ind])\n    #             if self.factor is not None:\n    #                 factor_batch.append(factor[ind:ind+data_chunk_length])\n    #         L, N = data_chunk_length, mini_batch_size\n    #\n    #         # These are all from_numpys of size (N, L, Dim)\n    #         share_obs_batch = np.stack(share_obs_batch)\n    #         obs_batch = np.stack(obs_batch)\n    #\n    #         actions_batch = np.stack(actions_batch)\n    #         if self.available_actions is not None:\n    #             available_actions_batch = np.stack(available_actions_batch)\n    #         if self.factor is not None:\n    #             factor_batch = np.stack(factor_batch)\n    #         value_preds_batch = np.stack(value_preds_batch)\n    #         return_batch = np.stack(return_batch)\n    #         masks_batch = np.stack(masks_batch)\n    #         active_masks_batch = np.stack(active_masks_batch)\n    #         old_action_log_probs_batch = np.stack(old_action_log_probs_batch)\n    #         adv_targ = np.stack(adv_targ)\n    #\n    #         # States is just a (N, -1) from_numpy\n    #         rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[2:])\n    #         rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[2:])\n    #\n    #         # Flatten the (L, N, ...) from_numpys to (L * N, ...)\n    #         share_obs_batch = _flatten(L, N, share_obs_batch)\n    #         obs_batch = _flatten(L, N, obs_batch)\n    #         actions_batch = _flatten(L, N, actions_batch)\n    #         if self.available_actions is not None:\n    #             available_actions_batch = _flatten(L, N, available_actions_batch)\n    #         else:\n    #             available_actions_batch = None\n    #         if self.factor is not None:\n    #             factor_batch = _flatten(L, N, factor_batch)\n    #         value_preds_batch = _flatten(L, N, value_preds_batch)\n    #         return_batch = _flatten(L, N, return_batch)\n    #         masks_batch = _flatten(L, N, masks_batch)\n    #         active_masks_batch = _flatten(L, N, active_masks_batch)\n    #         old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)\n    #         adv_targ = _flatten(L, N, adv_targ)\n    #         if self.factor is not None:\n    #             yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch\n    #         else:\n    #             yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch\n"
  },
  {
    "path": "MACPO/macpo/utils/util.py",
    "content": "import numpy as np\nimport math\nimport torch\n\ndef check(input):\n    if type(input) == np.ndarray:\n        return torch.from_numpy(input)\n        \ndef get_gard_norm(it):\n    sum_grad = 0\n    for x in it:\n        if x.grad is None:\n            continue\n        sum_grad += x.grad.norm() ** 2\n    return math.sqrt(sum_grad)\n\ndef update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):\n    \"\"\"Decreases the learning rate linearly\"\"\"\n    lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))\n    for param_group in optimizer.param_groups:\n        param_group['lr'] = lr\n\ndef huber_loss(e, d):\n    a = (abs(e) <= d).float()\n    b = (e > d).float()\n    return a*e**2/2 + b*d*(abs(e)-d/2)\n\ndef mse_loss(e):\n    return e**2/2\n\ndef get_shape_from_obs_space(obs_space):\n    if obs_space.__class__.__name__ == 'Box':\n        obs_shape = obs_space.shape\n    elif obs_space.__class__.__name__ == 'list':\n        obs_shape = obs_space\n    else:\n        raise NotImplementedError\n    return obs_shape\n\ndef get_shape_from_act_space(act_space):\n    if act_space.__class__.__name__ == 'Discrete':\n        act_shape = 1\n    elif act_space.__class__.__name__ == \"MultiDiscrete\":\n        act_shape = act_space.shape\n    elif act_space.__class__.__name__ == \"Box\":\n        act_shape = act_space.shape[0]\n    elif act_space.__class__.__name__ == \"MultiBinary\":\n        act_shape = act_space.shape[0]\n    else:  # agar\n        act_shape = act_space[0].shape[0] + 1  \n    return act_shape\n\n\ndef tile_images(img_nhwc):\n    \"\"\"\n    Tile N images into one big PxQ image\n    (P,Q) are chosen to be as close as possible, and if N\n    is square, then P=Q.\n    input: img_nhwc, list or array of images, ndim=4 once turned into array\n        n = batch index, h = height, w = width, c = channel\n    returns:\n        bigim_HWc, ndarray with ndim=3\n    \"\"\"\n    img_nhwc = np.asarray(img_nhwc)\n    N, h, w, c = img_nhwc.shape\n    H = int(np.ceil(np.sqrt(N)))\n    W = int(np.ceil(float(N)/H))\n    img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])\n    img_HWhwc = img_nhwc.reshape(H, W, h, w, c)\n    img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)\n    img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)\n    return img_Hh_Ww_c"
  },
  {
    "path": "MACPO/macpo.egg-info/PKG-INFO",
    "content": "Metadata-Version: 2.1\nName: macpo\nVersion: 0.1.0\nSummary: macpo algorithms of marlbenchmark\nHome-page: UNKNOWN\nAuthor: marl\nAuthor-email: marl@gmail.com\nLicense: UNKNOWN\nDescription: # MAPPO\n        \n        Chao Yu*, Akash Velu*, Eugene Vinitsky, Yu Wang, Alexandre Bayen, and Yi Wu. \n        \n        Website: https://sites.google.com/view/mappo\n        \n        This repository implements MAPPO, a multi-agent variant of PPO. The implementation in this repositorory is used in the paper \"The Surprising Effectiveness of MAPPO in Cooperative Multi-Agent Games\" (https://arxiv.org/abs/2103.01955). \n        This repository is heavily based on https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail. \n        \n        ## Environments supported:\n        \n        - [StarCraftII (SMAC)](https://github.com/oxwhirl/smac)\n        - [Hanabi](https://github.com/deepmind/hanabi-learning-environment)\n        - [Multiagent Particle-World Environments (MPEs)](https://github.com/openai/multiagent-particle-envs)\n        \n        ## 1. Usage\n        All core code is located within the onpolicy folder. The algorithms/ subfolder contains algorithm-specific code\n        for MAPPO. \n        \n        * The envs/ subfolder contains environment wrapper implementations for the MPEs, SMAC, and Hanabi. \n        \n        * Code to perform training rollouts and policy updates are contained within the runner/ folder - there is a runner for \n        each environment. \n        \n        * Executable scripts for training with default hyperparameters can be found in the scripts/ folder. The files are named\n        in the following manner: train_algo_environment.sh. Within each file, the map name (in the case of SMAC and the MPEs) can be altered. \n        * Python training scripts for each environment can be found in the scripts/train/ folder. \n        \n        * The config.py file contains relevant hyperparameter and env settings. Most hyperparameters are defaulted to the ones\n        used in the paper; however, please refer to the appendix for a full list of hyperparameters used. \n        \n        \n        ## 2. Installation\n        \n         Here we give an example installation on CUDA == 10.1. For non-GPU & other CUDA version installation, please refer to the [PyTorch website](https://pytorch.org/get-started/locally/).\n        \n        ``` Bash\n        # create conda environment\n        conda create -n marl python==3.6.1\n        conda activate marl\n        pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html\n        ```\n        \n        ```\n        # install on-policy package\n        cd on-policy\n        pip install -e .\n        ```\n        \n        Even though we provide requirement.txt, it may have redundancy. We recommend that the user try to install other required packages by running the code and finding which required package hasn't installed yet.\n        \n        ### 2.1 Install StarCraftII [4.10](http://blzdistsc2-a.akamaihd.net/Linux/SC2.4.10.zip)\n        \n           \n        \n        ``` Bash\n        unzip SC2.4.10.zip\n        # password is iagreetotheeula\n        echo \"export SC2PATH=~/StarCraftII/\" > ~/.bashrc\n        ```\n        \n        * download SMAC Maps, and move it to `~/StarCraftII/Maps/`.\n        \n        * To use a stableid, copy `stableid.json` from https://github.com/Blizzard/s2client-proto.git to `~/StarCraftII/`.\n        \n        \n        ### 2.2 Hanabi\n        Environment code for Hanabi is developed from the open-source environment code, but has been slightly modified to fit the algorithms used here.  \n        To install, execute the following:\n        ``` Bash\n        pip install cffi\n        cd envs/hanabi\n        mkdir build & cd build\n        cmake ..\n        make -j\n        ```\n        \n        \n        ### 2.3 Install MPE\n        \n        ``` Bash\n        # install this package first\n        pip install seaborn\n        ```\n        \n        There are 3 Cooperative scenarios in MPE:\n        \n        * simple_spread\n        * simple_speaker_listener, which is 'Comm' scenario in paper\n        * simple_reference\n        \n        ## 3.Train\n        Here we use train_mpe.sh as an example:\n        ```\n        cd onpolicy/scripts\n        chmod +x ./train_mpe.sh\n        ./train_mpe.sh\n        ```\n        Local results are stored in subfold scripts/results. Note that we use Weights & Bias as the default visualization platform; to use Weights & Bias, please register and login to the platform first. More instructions for using Weights&Bias can be found in the official [documentation](https://docs.wandb.ai/). Adding the `--use_wandb` in command line or in the .sh file will use Tensorboard instead of Weights & Biases. \n        \n        We additionally provide `./eval_hanabi_forward.sh` for evaluating the hanabi score over 100k trials. \n        \n        ## 4. Publication\n        \n        If you find this repository useful, please cite our [paper](https://arxiv.org/abs/2103.01955):\n        ```\n        @misc{yu2021surprising,\n              title={The Surprising Effectiveness of MAPPO in Cooperative Multi-Agent Games}, \n              author={Chao Yu and Akash Velu and Eugene Vinitsky and Yu Wang and Alexandre Bayen and Yi Wu},\n              year={2021},\n              eprint={2103.01955},\n              archivePrefix={arXiv},\n              primaryClass={cs.LG}\n        }\n        ```\n        \n        \nKeywords: multi-agent reinforcement learning platform pytorch\nPlatform: UNKNOWN\nClassifier: Development Status :: 3 - Alpha\nClassifier: Intended Audience :: Science/Research\nClassifier: Topic :: Scientific/Engineering :: Artificial Intelligence\nClassifier: Topic :: Software Development :: Libraries :: Python Modules\nClassifier: Programming Language :: Python :: 3\nClassifier: License :: OSI Approved :: MIT License\nClassifier: Operating System :: OS Independent\nRequires-Python: >=3.6\nDescription-Content-Type: text/markdown\n"
  },
  {
    "path": "MACPO/macpo.egg-info/SOURCES.txt",
    "content": "README.md\nsetup.py\nmacpo/__init__.py\nmacpo/config.py\nmacpo.egg-info/PKG-INFO\nmacpo.egg-info/SOURCES.txt\nmacpo.egg-info/dependency_links.txt\nmacpo.egg-info/top_level.txt\nmacpo/algorithms/__init__.py\nmacpo/algorithms/r_mappo/__init__.py\nmacpo/algorithms/r_mappo/r_mactrpo_based_cpo.py\nmacpo/envs/__init__.py\nmacpo/envs/env_wrappers.py\nmacpo/envs/safety_ma_mujoco/__init__.py\nmacpo/envs/safety_ma_mujoco/test.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/__init__.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/ant.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/half_cheetah.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/hopper.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/humanoid.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_ant.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_swimmer.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_env.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_multi.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/multiagentenv.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/obsk.py\nmacpo/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/__init__.py\nmacpo/runner/__init__.py\nmacpo/runner/separated/__init__.py\nmacpo/runner/separated/base_runner.py\nmacpo/runner/separated/base_runner_mactrpo_based_matrpo.py\nmacpo/runner/separated/mujoco_runner.py\nmacpo/runner/separated/mujoco_runner_mactrpo_based_matrpo.py\nmacpo/scripts/__init__.py\nmacpo/scripts/train/__init__.py\nmacpo/scripts/train/train_mujoco.py\nmacpo/utils/__init__.py\nmacpo/utils/multi_discrete.py\nmacpo/utils/popart.py\nmacpo/utils/separated_buffer.py\nmacpo/utils/util.py"
  },
  {
    "path": "MACPO/macpo.egg-info/dependency_links.txt",
    "content": "\n"
  },
  {
    "path": "MACPO/macpo.egg-info/top_level.txt",
    "content": "macpo\n"
  },
  {
    "path": "MACPO/setup.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom setuptools import setup, find_packages\nimport setuptools\n\ndef get_version() -> str:\n    # https://packaging.python.org/guides/single-sourcing-package-version/\n    init = open(os.path.join(\"macpo\", \"__init__.py\"), \"r\").read().split()\n    return init[init.index(\"__version__\") + 2][1:-1]\n\nsetup(\n    name=\"macpo\",  # Replace with your own username\n    version=get_version(),\n    description=\"macpo algorithms of marlbenchmark\",\n    # long_description=open(\"README.md\", encoding=\"utf8\").read(),\n    long_description_content_type=\"text/markdown\",\n    author=\"marl\",\n    author_email=\"marl@gmail.com\",\n    packages=setuptools.find_packages(),\n    classifiers=[\n        \"Development Status :: 3 - Alpha\",\n        \"Intended Audience :: Science/Research\",\n        \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n        \"Topic :: Software Development :: Libraries :: Python Modules\",\n        \"Programming Language :: Python :: 3\",\n        \"License :: OSI Approved :: MIT License\",\n        \"Operating System :: OS Independent\",\n    ],\n    keywords=\"multi-agent reinforcement learning platform pytorch\",\n    python_requires='>=3.6',\n)\n"
  },
  {
    "path": "MAPPO-Lagrangian/.gitignore",
    "content": "/.idea/\n*/__pycache__/\n"
  },
  {
    "path": "MAPPO-Lagrangian/environment.yaml",
    "content": "name: marl\nchannels:\n  - defaults\ndependencies:\n  - _libgcc_mutex=0.1=main\n  - _tflow_select=2.1.0=gpu\n  - absl-py=0.9.0=py36_0\n  - astor=0.8.0=py36_0\n  - blas=1.0=mkl\n  - c-ares=1.15.0=h7b6447c_1001\n  - ca-certificates=2020.1.1=0\n  - certifi=2020.4.5.2=py36_0\n  - cudatoolkit=10.0.130=0\n  - cudnn=7.6.5=cuda10.0_0\n  - cupti=10.0.130=0\n  - gast=0.2.2=py36_0\n  - google-pasta=0.2.0=py_0\n  - grpcio=1.14.1=py36h9ba97e2_0\n  - h5py=2.10.0=py36h7918eee_0\n  - hdf5=1.10.4=hb1b8bf9_0\n  - intel-openmp=2020.1=217\n  - keras-applications=1.0.8=py_0\n  - keras-preprocessing=1.1.0=py_1\n  - libedit=3.1=heed3624_0\n  - libffi=3.2.1=hd88cf55_4\n  - libgcc-ng=9.1.0=hdf63c60_0\n  - libgfortran-ng=7.3.0=hdf63c60_0\n  - libprotobuf=3.12.3=hd408876_0\n  - libstdcxx-ng=9.1.0=hdf63c60_0\n  - markdown=3.1.1=py36_0\n  - mkl=2020.1=217\n  - mkl-service=2.3.0=py36he904b0f_0\n  - mkl_fft=1.1.0=py36h23d657b_0\n  - mkl_random=1.1.1=py36h0573a6f_0\n  - ncurses=6.0=h9df7e31_2\n  - numpy=1.18.1=py36h4f9e942_0\n  - numpy-base=1.18.1=py36hde5b4d6_1\n  - openssl=1.0.2u=h7b6447c_0\n  - opt_einsum=3.1.0=py_0\n  - pip=20.1.1=py36_1\n  - protobuf=3.12.3=py36he6710b0_0\n  - python=3.6.2=hca45abc_19\n  - readline=7.0=ha6073c6_4\n  - scipy=1.4.1=py36h0b6359f_0\n  - setuptools=47.3.0=py36_0\n  - six=1.15.0=py_0\n  - sqlite=3.23.1=he433501_0\n  - tensorboard=2.0.0=pyhb38c66f_1\n  - tensorflow=2.0.0=gpu_py36h6b29c10_0\n  - tensorflow-base=2.0.0=gpu_py36h0ec5d1f_0\n  - tensorflow-estimator=2.0.0=pyh2649769_0\n  - tensorflow-gpu=2.0.0=h0d30ee6_0\n  - termcolor=1.1.0=py36_1\n  - tk=8.6.8=hbc83047_0\n  - werkzeug=0.16.1=py_0\n  - wheel=0.34.2=py36_0\n  - wrapt=1.12.1=py36h7b6447c_1\n  - xz=5.2.5=h7b6447c_0\n  - zlib=1.2.11=h7b6447c_3\n  - pip:\n      - aiohttp==3.6.2\n      - aioredis==1.3.1\n      - astunparse==1.6.3\n      - async-timeout==3.0.1\n      - atari-py==0.2.6\n      - atomicwrites==1.2.1\n      - attrs==18.2.0\n      - beautifulsoup4==4.9.1\n      - blessings==1.7\n      - cachetools==4.1.1\n      - cffi==1.14.1\n      - chardet==3.0.4\n      - click==7.1.2\n      - cloudpickle==1.3.0\n      - colorama==0.4.3\n      - colorful==0.5.4\n      - configparser==5.0.1\n      - contextvars==2.4\n      - cycler==0.10.0\n      - cython==0.29.21\n      - deepdiff==4.3.2\n      - dill==0.3.2\n      - docker-pycreds==0.4.0\n      - docopt==0.6.2\n      - fasteners==0.15\n      - filelock==3.0.12\n      - funcsigs==1.0.2\n      - future==0.16.0\n      - gin==0.1.6\n      - gin-config==0.3.0\n      - gitdb==4.0.5\n      - gitpython==3.1.9\n      - glfw==1.12.0\n      - google==3.0.0\n      - google-api-core==1.22.1\n      - google-auth==1.21.0\n      - google-auth-oauthlib==0.4.1\n      - googleapis-common-protos==1.52.0\n      - gpustat==0.6.0\n      - gql==0.2.0\n      - graphql-core==1.1\n      - gym==0.17.2\n      - hiredis==1.1.0\n      - idna==2.7\n      - idna-ssl==1.1.0\n      - imageio==2.4.1\n      - immutables==0.14\n      - importlib-metadata==1.7.0\n      - joblib==0.16.0\n      - jsonnet==0.16.0\n      - jsonpickle==0.9.6\n      - jsonschema==3.2.0\n      - kiwisolver==1.0.1\n      - lockfile==0.12.2\n      - mappo==0.0.1\n      - matplotlib==3.0.0\n      - mock==2.0.0\n      - monotonic==1.5\n      - more-itertools==4.3.0\n      - mpi4py==3.0.3\n      - mpyq==0.2.5\n      - msgpack==1.0.0\n      - mujoco-py==2.0.2.13\n      - mujoco-worldgen==0.0.0\n      - multidict==4.7.6\n      - munch==2.3.2\n      - nvidia-ml-py3==7.352.0\n      - oauthlib==3.1.0\n      - opencensus==0.7.10\n      - opencensus-context==0.1.1\n      - opencv-python==4.2.0.34\n      - ordered-set==4.0.2\n      - packaging==20.4\n      - pandas==1.1.1\n      - pathlib2==2.3.2\n      - pathtools==0.1.2\n      - pbr==4.3.0\n      - pillow==5.3.0\n      - pluggy==0.7.1\n      - portpicker==1.2.0\n      - probscale==0.2.3\n      - progressbar2==3.53.1\n      - prometheus-client==0.8.0\n      - promise==2.3\n      - psutil==5.7.2\n      - py==1.6.0\n      - py-spy==0.3.3\n      - pyasn1==0.4.8\n      - pyasn1-modules==0.2.8\n      - pycparser==2.20\n      - pygame==1.9.4\n      - pyglet==1.5.0\n      - pyopengl==3.1.5\n      - pyopengl-accelerate==3.1.5\n      - pyparsing==2.2.2\n      - pyrsistent==0.16.0\n      - pysc2==3.0.0\n      - pytest==3.8.2\n      - python-dateutil==2.7.3\n      - python-utils==2.4.0\n      - pytz==2020.1\n      - pyyaml==3.13\n      - pyzmq==19.0.2\n      - ray==0.8.0\n      - redis==3.4.1\n      - requests==2.24.0\n      - requests-oauthlib==1.3.0\n      - rsa==4.6\n      - s2clientprotocol==4.10.1.75800.0\n      - s2protocol==4.11.4.78285.0\n      - sacred==0.7.2\n      - seaborn==0.10.1\n      - sentry-sdk==0.18.0\n      - shortuuid==1.0.1\n      - sk-video==1.1.10\n      - smmap==3.0.4\n      - snakeviz==1.0.0\n      - soupsieve==2.0.1\n      - subprocess32==3.5.4\n      - tabulate==0.8.7\n      - tensorboard-logger==0.1.0\n      - tensorboard-plugin-wit==1.7.0\n      - tensorboardx==2.0\n      - torch==1.5.1+cu101\n      - torchvision==0.6.1+cu101\n      - tornado==5.1.1\n      - tqdm==4.48.2\n      - typing-extensions==3.7.4.3\n      - urllib3==1.23\n      - wandb==0.10.5\n      - watchdog==0.10.3\n      - websocket-client==0.53.0\n      - whichcraft==0.5.2\n      - xmltodict==0.12.0\n      - yarl==1.5.1\n      - zipp==3.1.0\n      - zmq==0.0.0\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/__init__.py",
    "content": "from mappo_lagrangian import algorithms, envs, runner, scripts, utils, config\n\n\n__version__ = \"0.1.0\"\n\n__all__ = [\n    \"algorithms\",\n    \"envs\",\n    \"runner\",\n    \"scripts\",\n    \"utils\",\n    \"config\",\n]"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/__init__.py",
    "content": ""
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/__init__.py",
    "content": "def cost_trpo_macppo():\n    return None"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/algorithm/MACPPOPolicy.py",
    "content": "import torch\nfrom mappo_lagrangian.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic\nfrom mappo_lagrangian.utils.util import update_linear_schedule\n\n\nclass MACPPOPolicy:\n    \"\"\"\n    Safe MAPPO Policy  class. Wraps actor and critic networks to compute actions and value function predictions.\n\n    :param args: (argparse.Namespace) arguments containing relevant model and policy information.\n    :param obs_space: (gym.Space) observation space.\n    :param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO).\n    :param action_space: (gym.Space) action space.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n\n    def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device(\"cpu\")):\n        self.device = device\n        self.lr = args.lr\n        self.critic_lr = args.critic_lr\n        self.opti_eps = args.opti_eps\n        self.weight_decay = args.weight_decay\n\n        self.obs_space = obs_space\n        self.share_obs_space = cent_obs_space\n        self.act_space = act_space\n\n        self.actor = R_Actor(args, self.obs_space, self.act_space, self.device)\n        self.critic = R_Critic(args, self.share_obs_space, self.device)\n        self.cost_critic = R_Critic(args, self.share_obs_space, self.device)\n\n        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),\n                                                lr=self.lr, eps=self.opti_eps,\n                                                weight_decay=self.weight_decay)\n        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),\n                                                 lr=self.critic_lr,\n                                                 eps=self.opti_eps,\n                                                 weight_decay=self.weight_decay)\n        self.cost_optimizer = torch.optim.Adam(self.cost_critic.parameters(),\n                                               lr=self.critic_lr,\n                                               eps=self.opti_eps,\n                                               weight_decay=self.weight_decay)\n\n    def lr_decay(self, episode, episodes):\n        \"\"\"\n        Decay the actor and critic learning rates.\n        :param episode: (int) current training episode.\n        :param episodes: (int) total number of training episodes.\n        \"\"\"\n        update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr)\n        update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr)\n        update_linear_schedule(self.cost_optimizer, episode, episodes, self.critic_lr)\n\n    def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None,\n                    deterministic=False, rnn_states_cost=None):\n        \"\"\"\n        Compute actions and value function predictions for the given inputs.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether the action should be mode of distribution or should be sampled.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return actions: (torch.Tensor) actions to take.\n        :return action_log_probs: (torch.Tensor) log probabilities of chosen actions.\n        :return rnn_states_actor: (torch.Tensor) updated actor network RNN states.\n        :return rnn_states_critic: (torch.Tensor) updated critic network RNN states.\n        \"\"\"\n        actions, action_log_probs, rnn_states_actor = self.actor(obs,\n                                                                 rnn_states_actor,\n                                                                 masks,\n                                                                 available_actions,\n                                                                 deterministic)\n\n        values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks)\n        if rnn_states_cost is None:\n            return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic\n        else:\n            cost_preds, rnn_states_cost = self.cost_critic(cent_obs, rnn_states_cost, masks)\n            return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic, cost_preds, rnn_states_cost\n\n\n    def get_values(self, cent_obs, rnn_states_critic, masks):\n        \"\"\"\n        Get value function predictions.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n\n        :return values: (torch.Tensor) value function predictions.\n        \"\"\"\n        values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n        return values\n\n    def get_cost_values(self, cent_obs, rnn_states_cost, masks):\n        \"\"\"\n        Get constraint cost predictions.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n\n        :return values: (torch.Tensor) value function predictions.\n        \"\"\"\n        cost_preds, _ = self.cost_critic(cent_obs, rnn_states_cost, masks)\n        return cost_preds\n\n    def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks,\n                         available_actions=None, active_masks=None, rnn_states_cost=None):\n        \"\"\"\n        Get action logprobs / entropy and value function predictions for actor update.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param action: (np.ndarray) actions whose log probabilites and entropy to compute.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n        action_log_probs, dist_entropy = self.actor.evaluate_actions(obs,\n                                                                     rnn_states_actor,\n                                                                     action,\n                                                                     masks,\n                                                                     available_actions,\n                                                                     active_masks)\n\n        values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n        if rnn_states_cost is None:\n            return values, action_log_probs, dist_entropy\n        else:\n            cost_values, _ = self.cost_critic(cent_obs, rnn_states_cost, masks)\n            return values, action_log_probs, dist_entropy, cost_values\n\n    def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False):\n        \"\"\"\n        Compute actions using the given inputs.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether the action should be mode of distribution or should be sampled.\n        \"\"\"\n        actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)\n        return actions, rnn_states_actor\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/algorithm/rMAPPOPolicy.py",
    "content": "import torch\nfrom mappo_lagrangian.algorithms.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic\nfrom mappo_lagrangian.utils.util import update_linear_schedule\n\n\nclass R_MAPPOPolicy:\n    \"\"\"\n    MAPPO Policy  class. Wraps actor and critic networks to compute actions and value function predictions.\n\n    :param args: (argparse.Namespace) arguments containing relevant model and policy information.\n    :param obs_space: (gym.Space) observation space.\n    :param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO).\n    :param action_space: (gym.Space) action space.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n\n    def __init__(self, args, obs_space, cent_obs_space, act_space, device=torch.device(\"cpu\")):\n        self.device = device\n        self.lr = args.lr\n        self.critic_lr = args.critic_lr\n        self.opti_eps = args.opti_eps\n        self.weight_decay = args.weight_decay\n\n        self.obs_space = obs_space\n        self.share_obs_space = cent_obs_space\n        self.act_space = act_space\n\n        self.actor = R_Actor(args, self.obs_space, self.act_space, self.device)\n        self.critic = R_Critic(args, self.share_obs_space, self.device)\n\n        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),\n                                                lr=self.lr, eps=self.opti_eps,\n                                                weight_decay=self.weight_decay)\n        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),\n                                                 lr=self.critic_lr,\n                                                 eps=self.opti_eps,\n                                                 weight_decay=self.weight_decay)\n\n    def lr_decay(self, episode, episodes):\n        \"\"\"\n        Decay the actor and critic learning rates.\n        :param episode: (int) current training episode.\n        :param episodes: (int) total number of training episodes.\n        \"\"\"\n        update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr)\n        update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr)\n\n    def get_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None,\n                    deterministic=False):\n        \"\"\"\n        Compute actions and value function predictions for the given inputs.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether the action should be mode of distribution or should be sampled.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return actions: (torch.Tensor) actions to take.\n        :return action_log_probs: (torch.Tensor) log probabilities of chosen actions.\n        :return rnn_states_actor: (torch.Tensor) updated actor network RNN states.\n        :return rnn_states_critic: (torch.Tensor) updated critic network RNN states.\n        \"\"\"\n        actions, action_log_probs, rnn_states_actor = self.actor(obs,\n                                                                 rnn_states_actor,\n                                                                 masks,\n                                                                 available_actions,\n                                                                 deterministic)\n\n        values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks)\n        return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic\n\n    def get_values(self, cent_obs, rnn_states_critic, masks):\n        \"\"\"\n        Get value function predictions.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n\n        :return values: (torch.Tensor) value function predictions.\n        \"\"\"\n        values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n        return values\n\n    def evaluate_actions(self, cent_obs, obs, rnn_states_actor, rnn_states_critic, action, masks,\n                         available_actions=None, active_masks=None):\n        \"\"\"\n        Get action logprobs / entropy and value function predictions for actor update.\n        :param cent_obs (np.ndarray): centralized input to the critic.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.\n        :param action: (np.ndarray) actions whose log probabilites and entropy to compute.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n        action_log_probs, dist_entropy = self.actor.evaluate_actions(obs,\n                                                                     rnn_states_actor,\n                                                                     action,\n                                                                     masks,\n                                                                     available_actions,\n                                                                     active_masks)\n\n        values, _ = self.critic(cent_obs, rnn_states_critic, masks)\n        return values, action_log_probs, dist_entropy\n\n    def act(self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False):\n        \"\"\"\n        Compute actions using the given inputs.\n        :param obs (np.ndarray): local agent inputs to the actor.\n        :param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.\n        :param masks: (np.ndarray) denotes points at which RNN states should be reset.\n        :param available_actions: (np.ndarray) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether the action should be mode of distribution or should be sampled.\n        \"\"\"\n        actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)\n        return actions, rnn_states_actor\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/algorithm/r_actor_critic.py",
    "content": "import torch\nimport torch.nn as nn\nfrom mappo_lagrangian.algorithms.utils.util import init, check\nfrom mappo_lagrangian.algorithms.utils.cnn import CNNBase\nfrom mappo_lagrangian.algorithms.utils.mlp import MLPBase\nfrom mappo_lagrangian.algorithms.utils.rnn import RNNLayer\nfrom mappo_lagrangian.algorithms.utils.act import ACTLayer\nfrom mappo_lagrangian.utils.util import get_shape_from_obs_space\n\n\nclass R_Actor(nn.Module):\n    \"\"\"\n    Actor network class for MAPPO. Outputs actions given observations.\n    :param args: (argparse.Namespace) arguments containing relevant model information.\n    :param obs_space: (gym.Space) observation space.\n    :param action_space: (gym.Space) action space.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n    def __init__(self, args, obs_space, action_space, device=torch.device(\"cpu\")):\n        super(R_Actor, self).__init__()\n        self.hidden_size = args.hidden_size\n\n        self._gain = args.gain\n        self._use_orthogonal = args.use_orthogonal\n        self._use_policy_active_masks = args.use_policy_active_masks\n        self._use_naive_recurrent_policy = args.use_naive_recurrent_policy\n        self._use_recurrent_policy = args.use_recurrent_policy\n        self._recurrent_N = args.recurrent_N\n        self.tpdv = dict(dtype=torch.float32, device=device)\n\n        obs_shape = get_shape_from_obs_space(obs_space)\n        base = CNNBase if len(obs_shape) == 3 else MLPBase\n        self.base = base(args, obs_shape)\n\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)\n\n        self.act = ACTLayer(action_space, self.hidden_size, self._use_orthogonal, self._gain, args)\n\n        self.to(device)\n\n    def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):\n        \"\"\"\n        Compute actions from the given inputs.\n        :param obs: (np.ndarray / torch.Tensor) observation inputs into network.\n        :param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.\n        :param masks: (np.ndarray / torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.\n        :param available_actions: (np.ndarray / torch.Tensor) denotes which actions are available to agent\n                                                              (if None, all actions available)\n        :param deterministic: (bool) whether to sample from action distribution or return the mode.\n\n        :return actions: (torch.Tensor) actions to take.\n        :return action_log_probs: (torch.Tensor) log probabilities of taken actions.\n        :return rnn_states: (torch.Tensor) updated RNN hidden states.\n        \"\"\"\n        obs = check(obs).to(**self.tpdv)\n        rnn_states = check(rnn_states).to(**self.tpdv)\n        masks = check(masks).to(**self.tpdv)\n        if available_actions is not None:\n            available_actions = check(available_actions).to(**self.tpdv)\n\n        actor_features = self.base(obs)\n\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)\n\n        actions, action_log_probs = self.act(actor_features, available_actions, deterministic)\n\n        return actions, action_log_probs, rnn_states\n\n    def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):\n        \"\"\"\n        Compute log probability and entropy of given actions.\n        :param obs: (torch.Tensor) observation inputs into network.\n        :param action: (torch.Tensor) actions whose entropy and log probability to evaluate.\n        :param rnn_states: (torch.Tensor) if RNN network, hidden states for RNN.\n        :param masks: (torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                                              (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n        obs = check(obs).to(**self.tpdv)\n        rnn_states = check(rnn_states).to(**self.tpdv)\n        action = check(action).to(**self.tpdv)\n        masks = check(masks).to(**self.tpdv)\n        if available_actions is not None:\n            available_actions = check(available_actions).to(**self.tpdv)\n\n        if active_masks is not None:\n            active_masks = check(active_masks).to(**self.tpdv)\n\n        actor_features = self.base(obs)\n\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)\n\n        action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features,\n                                                                   action, available_actions,\n                                                                   active_masks=\n                                                                   active_masks if self._use_policy_active_masks\n                                                                   else None)\n\n        return action_log_probs, dist_entropy\n\n\nclass R_Critic(nn.Module):\n    \"\"\"\n    Critic network class for MAPPO. Outputs value function predictions given centralized input (MAPPO) or\n                            local observations (IPPO).\n    :param args: (argparse.Namespace) arguments containing relevant model information.\n    :param cent_obs_space: (gym.Space) (centralized) observation space.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    \"\"\"\n    def __init__(self, args, cent_obs_space, device=torch.device(\"cpu\")):\n        super(R_Critic, self).__init__()\n        self.hidden_size = args.hidden_size\n        self._use_orthogonal = args.use_orthogonal\n        self._use_naive_recurrent_policy = args.use_naive_recurrent_policy\n        self._use_recurrent_policy = args.use_recurrent_policy\n        self._recurrent_N = args.recurrent_N\n        self.tpdv = dict(dtype=torch.float32, device=device)\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]\n\n        cent_obs_shape = get_shape_from_obs_space(cent_obs_space)\n        base = CNNBase if len(cent_obs_shape) == 3 else MLPBase\n        self.base = base(args, cent_obs_shape)\n\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            self.rnn = RNNLayer(self.hidden_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)\n\n        def init_(m):\n            return init(m, init_method, lambda x: nn.init.constant_(x, 0))\n\n        self.v_out = init_(nn.Linear(self.hidden_size, 1))\n\n        self.to(device)\n\n    def forward(self, cent_obs, rnn_states, masks):\n        \"\"\"\n        Compute actions from the given inputs.\n        :param cent_obs: (np.ndarray / torch.Tensor) observation inputs into network.\n        :param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.\n        :param masks: (np.ndarray / torch.Tensor) mask tensor denoting if RNN states should be reinitialized to zeros.\n\n        :return values: (torch.Tensor) value function predictions.\n        :return rnn_states: (torch.Tensor) updated RNN hidden states.\n        \"\"\"\n        cent_obs = check(cent_obs).to(**self.tpdv)\n        rnn_states = check(rnn_states).to(**self.tpdv)\n        masks = check(masks).to(**self.tpdv)\n\n        critic_features = self.base(cent_obs)\n        if self._use_naive_recurrent_policy or self._use_recurrent_policy:\n            critic_features, rnn_states = self.rnn(critic_features, rnn_states, masks)\n        values = self.v_out(critic_features)\n\n        return values, rnn_states\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/r_mappo/r_mappo_lagr.py",
    "content": "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom mappo_lagrangian.utils.util import get_gard_norm, huber_loss, mse_loss\nfrom mappo_lagrangian.utils.popart import PopArt\nfrom mappo_lagrangian.algorithms.utils.util import check\n\nclass R_MAPPO_Lagr:\n    \"\"\"\n    Trainer class for MAPPO-L to update policies.\n    :param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.\n    :param policy: (R_MAPPO_Policy) policy to update.\n    :param device: (torch.device) specifies the device to run on (cpu/gpu).\n    :param precompute: Use an 'input' for the linearization constant instead of true_linear_leq_constraint.\n                           If present, overrides surrogate\n                           When using precompute, the last input is the precomputed linearization constant\n\n    :param attempt_(in)feasible_recovery: deals with cases where x=0 is infeasible point but problem still feasible\n                                                               (where optimization problem is entirely infeasible)\n\n    :param revert_to_last_safe_point: Behavior protocol for situation when optimization problem is entirely infeasible.\n                                          Specifies that we should just reset the parameters to the last point\n                                          that satisfied constraint.\n    \"\"\"\n\n    def __init__(self,\n                 args,\n                 policy, hvp_approach=None, attempt_feasible_recovery=False,\n                 attempt_infeasible_recovery=False, revert_to_last_safe_point=False, delta_bound=0.02, safety_bound=10,\n                 _backtrack_ratio=0.8, _max_backtracks=15, _constraint_name_1=\"trust_region\",\n                 _constraint_name_2=\"safety_region\", linesearch_infeasible_recovery=True, accept_violation=False,\n                 device=torch.device(\"cpu\")):\n        self.args = args\n        self.device = device\n        self.tpdv = dict(dtype=torch.float32, device=device)\n        self.policy = policy\n        # todo hyper parameters for compute hessian\n        self._damping = 0.00001\n\n        self.clip_param = args.clip_param\n        self.ppo_epoch = args.ppo_epoch\n        self.num_mini_batch = args.num_mini_batch\n        self.data_chunk_length = args.data_chunk_length\n        self.value_loss_coef = args.value_loss_coef\n        self.entropy_coef = args.entropy_coef\n        self.max_grad_norm = args.max_grad_norm\n        self.huber_delta = args.huber_delta\n        self.gamma = args.gamma\n\n        self._use_recurrent_policy = args.use_recurrent_policy\n        self._use_naive_recurrent = args.use_naive_recurrent_policy\n        self._use_max_grad_norm = args.use_max_grad_norm\n        self._use_clipped_value_loss = args.use_clipped_value_loss\n        self._use_huber_loss = args.use_huber_loss\n        self._use_popart = args.use_popart\n        self._use_value_active_masks = args.use_value_active_masks\n        self._use_policy_active_masks = args.use_policy_active_masks\n\n        self.attempt_feasible_recovery = attempt_feasible_recovery\n        self.attempt_infeasible_recovery = attempt_infeasible_recovery\n        self.revert_to_last_safe_point = revert_to_last_safe_point\n        num_slices = 1\n        self._max_quad_constraint_val = delta_bound\n        self._max_lin_constraint_val = safety_bound\n        self._backtrack_ratio = _backtrack_ratio\n        self._max_backtracks = _max_backtracks\n        self._constraint_name_1 = _constraint_name_1\n        self._constraint_name_2 = _constraint_name_2\n        self._linesearch_infeasible_recovery = linesearch_infeasible_recovery\n        self._accept_violation = accept_violation\n\n        self.lagrangian_coef = args.lagrangian_coef_rate # lagrangian_coef\n        self.lamda_lagr = args.lamda_lagr # 0.78\n        self.safety_bound = args.safety_bound # 0.2 Ant\n\n\n\n\n        self._hvp_approach = hvp_approach\n\n        if self._use_popart:\n            self.value_normalizer = PopArt(1, device=self.device)\n        else:\n            self.value_normalizer = None\n\n    def cal_value_loss(self, values, value_preds_batch, return_batch, active_masks_batch):\n        \"\"\"\n        Calculate value function loss.\n        :param values: (torch.Tensor) value function predictions.\n        :param value_preds_batch: (torch.Tensor) \"old\" value  predictions from data batch (used for value clip loss)\n        :param return_batch: (torch.Tensor) reward to go returns.\n        :param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep.\n\n        :return value_loss: (torch.Tensor) value function loss.\n        \"\"\"\n        if self._use_popart:\n            value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,\n                                                                                        self.clip_param)\n            error_clipped = self.value_normalizer(return_batch) - value_pred_clipped\n            error_original = self.value_normalizer(return_batch) - values\n        else:\n            value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(-self.clip_param,\n                                                                                        self.clip_param)\n            error_clipped = return_batch - value_pred_clipped\n            error_original = return_batch - values\n\n        if self._use_huber_loss:\n            value_loss_clipped = huber_loss(error_clipped, self.huber_delta)\n            value_loss_original = huber_loss(error_original, self.huber_delta)\n        else:\n            value_loss_clipped = mse_loss(error_clipped)\n            value_loss_original = mse_loss(error_original)\n\n        if self._use_clipped_value_loss:\n            value_loss = torch.max(value_loss_original, value_loss_clipped)\n        else:\n            value_loss = value_loss_original\n\n        if self._use_value_active_masks:\n            value_loss = (value_loss * active_masks_batch).sum() / active_masks_batch.sum()\n        else:\n            value_loss = value_loss.mean()\n\n        return value_loss\n\n    def _get_flat_grad(self, y: torch.Tensor, model: nn.Module, **kwargs) -> torch.Tensor:\n        # caculate first order gradient of kl with respect to theta\n        grads = torch.autograd.grad(y, model.parameters(), **kwargs, allow_unused=True)  # type: ignore\n        # a = torch.where(grads.dtype = None, zero, grads))\n        _grads = []\n        for val in grads:\n            if val != None:\n                _grads.append(val);\n\n        return torch.cat([grad.reshape(-1) for grad in _grads])\n\n    def _conjugate_gradients(self, b: torch.Tensor, flat_kl_grad: torch.Tensor, nsteps: int = 10,\n                             residual_tol: float = 1e-10) -> torch.Tensor:\n        x = torch.zeros_like(b)\n        r, p = b.clone(), b.clone()\n        # Note: should be 'r, p = b - MVP(x)', but for x=0, MVP(x)=0.\n        # Change if doing warm start.\n        rdotr = r.dot(r)\n        for i in range(nsteps):\n            z = self.cal_second_hessian(p, flat_kl_grad)\n            alpha = rdotr / p.dot(z)\n            x += alpha * p\n            r -= alpha * z\n            new_rdotr = r.dot(r)\n            if new_rdotr < residual_tol:\n                break\n            p = r + new_rdotr / rdotr * p\n            rdotr = new_rdotr\n        return x\n\n    def cal_second_hessian(self, v: torch.Tensor, flat_kl_grad: torch.Tensor) -> torch.Tensor:\n        \"\"\"Matrix vector product.\"\"\"\n        # caculate second order gradient of kl with respect to theta\n        kl_v = (flat_kl_grad * v).sum()\n        flat_kl_grad_grad = self._get_flat_grad(\n            kl_v, self.policy.actor, retain_graph=True).detach()\n        return flat_kl_grad_grad + v * self._damping\n\n    def _set_from_flat_params(self, model: nn.Module, flat_params: torch.Tensor) -> nn.Module:\n        prev_ind = 0\n        for param in model.parameters():\n            flat_size = int(np.prod(list(param.size())))\n            param.data.copy_(\n                flat_params[prev_ind:prev_ind + flat_size].view(param.size()))\n            prev_ind += flat_size\n        return model\n\n    def ppo_update(self, sample, update_actor=True, precomputed_eval=None,\n                   precomputed_threshold=None,\n                   diff_threshold=False):\n        \"\"\"\n        Update actor and critic networks.\n        :param sample: (Tuple) contains data batch with which to update networks.\n        :update_actor: (bool) whether to update actor network.\n\n        :return value_loss: (torch.Tensor) value function loss.\n        :return critic_grad_norm: (torch.Tensor) gradient norm from critic update.\n        ;return policy_loss: (torch.Tensor) actor(policy) loss value.\n        :return dist_entropy: (torch.Tensor) action entropies.\n        :return actor_grad_norm: (torch.Tensor) gradient norm from actor update.\n        :return imp_weights: (torch.Tensor) importance sampling weights.\n        :param precompute: Use an 'input' for the linearization constant instead of true_linear_leq_constraint.\n                           If present, overrides surrogate\n                           When using precompute, the last input is the precomputed linearization constant\n\n        :param attempt_(in)feasible_recovery: deals with cases where x=0 is infeasible point but problem still feasible\n                                                               (where optimization problem is entirely infeasible)\n\n        :param revert_to_last_safe_point: Behavior protocol for situation when optimization problem is entirely infeasible.\n                                          Specifies that we should just reset the parameters to the last point\n                                          that satisfied constraint.\n\n        precomputed_eval         :  The value of the safety constraint at theta = theta_old.\n                                    Provide this when the lin_constraint function is a surrogate, and evaluating it at\n                                    theta_old will not give you the correct value.\n\n        precomputed_threshold &\n        diff_threshold           :  These relate to the linesearch that is used to ensure constraint satisfaction.\n                                    If the lin_constraint function is indeed the safety constraint function, then it\n                                    suffices to check that lin_constraint < max_lin_constraint_val to ensure satisfaction.\n                                    But if the lin_constraint function is a surrogate - ie, it only has the same\n                                    /gradient/ as the safety constraint - then the threshold we check it against has to\n                                    be adjusted. You can provide a fixed adjusted threshold via \"precomputed_threshold.\"\n                                    When \"diff_threshold\" == True, instead of checking\n                                        lin_constraint < threshold,\n                                    it will check\n                                        lin_constraint - old_lin_constraint < threshold.\n        \"\"\"\n\n        share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, \\\n        value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \\\n        adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_returns_barch, rnn_states_cost_batch, \\\n        cost_adv_targ, aver_episode_costs = sample\n\n        old_action_log_probs_batch = check(old_action_log_probs_batch).to(**self.tpdv)\n        adv_targ = check(adv_targ).to(**self.tpdv)\n        cost_adv_targ = check(cost_adv_targ).to(**self.tpdv)\n        value_preds_batch = check(value_preds_batch).to(**self.tpdv)\n        return_batch = check(return_batch).to(**self.tpdv)\n        active_masks_batch = check(active_masks_batch).to(**self.tpdv)\n        factor_batch = check(factor_batch).to(**self.tpdv)\n        cost_returns_barch = check(cost_returns_barch).to(**self.tpdv)\n\n        cost_preds_batch = check(cost_preds_batch).to(**self.tpdv)\n\n        # Reshape to do in a single forward pass for all steps\n        values, action_log_probs, dist_entropy, cost_values = self.policy.evaluate_actions(share_obs_batch,\n                                                                                           obs_batch,\n                                                                                           rnn_states_batch,\n                                                                                           rnn_states_critic_batch,\n                                                                                           actions_batch,\n                                                                                           masks_batch,\n                                                                                           available_actions_batch,\n                                                                                           active_masks_batch,\n                                                                                           rnn_states_cost_batch)\n\n        # todo: lagrangian coef\n        adv_targ_hybrid =  adv_targ - self.lamda_lagr*cost_adv_targ\n\n        # todo: lagrangian actor update step\n        # actor update\n        imp_weights = torch.exp(action_log_probs - old_action_log_probs_batch)\n\n        surr1 = imp_weights * adv_targ_hybrid\n        surr2 = torch.clamp(imp_weights, 1.0 - self.clip_param, 1.0 + self.clip_param) * adv_targ_hybrid\n\n        if self._use_policy_active_masks:\n            policy_action_loss = (-torch.sum(factor_batch * torch.min(surr1, surr2),\n                                             dim=-1,\n                                             keepdim=True) * active_masks_batch).sum() / active_masks_batch.sum()\n        else:\n            policy_action_loss = -torch.sum(factor_batch * torch.min(surr1, surr2), dim=-1, keepdim=True).mean()\n\n        policy_loss = policy_action_loss\n\n        self.policy.actor_optimizer.zero_grad()\n\n        if update_actor:\n            (policy_loss - dist_entropy * self.entropy_coef).backward()\n\n        if self._use_max_grad_norm:\n            actor_grad_norm = nn.utils.clip_grad_norm_(self.policy.actor.parameters(), self.max_grad_norm)\n        else:\n            actor_grad_norm = get_gard_norm(self.policy.actor.parameters())\n\n        self.policy.actor_optimizer.step()\n\n        # todo: update lamda_lagr\n        delta_lamda_lagr = -(( aver_episode_costs.mean() - self.safety_bound) * (1 - self.gamma) + (imp_weights * cost_adv_targ)).mean().detach()\n\n        R_Relu = torch.nn.ReLU()\n        new_lamda_lagr = R_Relu(self.lamda_lagr - (delta_lamda_lagr * self.lagrangian_coef))\n\n        self.lamda_lagr = new_lamda_lagr\n\n        # todo: reward critic update\n        value_loss = self.cal_value_loss(values, value_preds_batch, return_batch, active_masks_batch)\n        self.policy.critic_optimizer.zero_grad()\n        (value_loss * self.value_loss_coef).backward()\n        if self._use_max_grad_norm:\n            critic_grad_norm = nn.utils.clip_grad_norm_(self.policy.critic.parameters(), self.max_grad_norm)\n        else:\n            critic_grad_norm = get_gard_norm(self.policy.critic.parameters())\n        self.policy.critic_optimizer.step()\n\n        # todo: cost critic update\n        cost_loss = self.cal_value_loss(cost_values, cost_preds_batch, cost_returns_barch, active_masks_batch)\n        self.policy.cost_optimizer.zero_grad()\n        (cost_loss * self.value_loss_coef).backward()\n        if self._use_max_grad_norm:\n            cost_grad_norm = nn.utils.clip_grad_norm_(self.policy.cost_critic.parameters(), self.max_grad_norm)\n        else:\n            cost_grad_norm = get_gard_norm(self.policy.cost_critic.parameters())\n        self.policy.cost_optimizer.step()\n\n        return value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights, cost_loss, cost_grad_norm\n\n    def train(self, buffer, update_actor=True):\n        \"\"\"\n        Perform a training update using minibatch GD.\n        :param buffer: (SharedReplayBuffer) buffer containing training data.\n        :param update_actor: (bool) whether to update actor network.\n\n        :return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc).\n        \"\"\"\n        if self._use_popart:\n            advantages = buffer.returns[:-1] - self.value_normalizer.denormalize(buffer.value_preds[:-1])\n        else:\n            advantages = buffer.returns[:-1] - buffer.value_preds[:-1]\n        advantages_copy = advantages.copy()\n        advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan\n        mean_advantages = np.nanmean(advantages_copy)\n        std_advantages = np.nanstd(advantages_copy)\n        advantages = (advantages - mean_advantages) / (std_advantages + 1e-5)\n\n        if self._use_popart:\n            cost_adv = buffer.cost_returns[:-1] - self.value_normalizer.denormalize(buffer.cost_preds[:-1])\n        else:\n            cost_adv = buffer.cost_returns[:-1] - buffer.cost_preds[:-1]\n        cost_adv_copy = cost_adv.copy()\n        cost_adv_copy[buffer.active_masks[:-1] == 0.0] = np.nan\n        mean_cost_adv = np.nanmean(cost_adv_copy)\n        std_cost_adv = np.nanstd(cost_adv_copy)\n        cost_adv = (cost_adv - mean_cost_adv) / (std_cost_adv + 1e-5)\n\n        train_info = {}\n\n        train_info['value_loss'] = 0\n        train_info['policy_loss'] = 0\n        train_info['dist_entropy'] = 0\n        train_info['actor_grad_norm'] = 0\n        train_info['critic_grad_norm'] = 0\n        train_info['ratio'] = 0\n        train_info['cost_grad_norm'] = 0\n        train_info['cost_loss'] = 0\n        \n        for _ in range(self.ppo_epoch):\n            if self._use_naive_recurrent:\n                data_generator = buffer.naive_recurrent_generator(advantages, self.num_mini_batch, cost_adv)\n            else:\n                data_generator = buffer.feed_forward_generator(advantages, self.num_mini_batch, cost_adv=cost_adv)\n\n            for sample in data_generator:\n\n                value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights, cost_loss, cost_grad_norm \\\n                    = self.ppo_update(sample, update_actor, precomputed_threshold=None,\n                                      diff_threshold=False)\n\n                train_info['value_loss'] += value_loss.item()\n                train_info['policy_loss'] += policy_loss.item()\n                train_info['dist_entropy'] += dist_entropy.item()\n                train_info['actor_grad_norm'] += actor_grad_norm\n                train_info['critic_grad_norm'] += critic_grad_norm\n                train_info['ratio'] += imp_weights.mean()\n                train_info['cost_loss'] += cost_loss.item()\n                train_info['cost_grad_norm'] += cost_grad_norm\n\n        num_updates = self.ppo_epoch * self.num_mini_batch\n\n        for k in train_info.keys():\n            train_info[k] /= num_updates\n\n        return train_info\n\n    def prep_training(self):\n        self.policy.actor.train()\n        self.policy.critic.train()\n        self.policy.cost_critic.train()\n\n    def prep_rollout(self):\n        self.policy.actor.eval()\n        self.policy.critic.eval()\n        self.policy.cost_critic.eval()\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/act.py",
    "content": "from .distributions import Bernoulli, Categorical, DiagGaussian\nimport torch\nimport torch.nn as nn\n\nclass ACTLayer(nn.Module):\n    \"\"\"\n    MLP Module to compute actions.\n    :param action_space: (gym.Space) action space.\n    :param inputs_dim: (int) dimension of network input.\n    :param use_orthogonal: (bool) whether to use orthogonal initialization.\n    :param gain: (float) gain of the output layer of the network.\n    \"\"\"\n    def __init__(self, action_space, inputs_dim, use_orthogonal, gain, args=None):\n        super(ACTLayer, self).__init__()\n        self.mixed_action = False\n        self.multi_discrete = False\n\n        if action_space.__class__.__name__ == \"Discrete\":\n            action_dim = action_space.n\n            self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain)\n        elif action_space.__class__.__name__ == \"Box\":\n            action_dim = action_space.shape[0]\n            self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain, args)\n        elif action_space.__class__.__name__ == \"MultiBinary\":\n            action_dim = action_space.shape[0]\n            self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain)\n        elif action_space.__class__.__name__ == \"MultiDiscrete\":\n            self.multi_discrete = True\n            action_dims = action_space.high - action_space.low + 1\n            self.action_outs = []\n            for action_dim in action_dims:\n                self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain))\n            self.action_outs = nn.ModuleList(self.action_outs)\n        else:  # discrete + continous\n            self.mixed_action = True\n            continous_dim = action_space[0].shape[0]\n            discrete_dim = action_space[1].n\n            self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain, args),\n                                              Categorical(inputs_dim, discrete_dim, use_orthogonal, gain)])\n    \n    def forward(self, x, available_actions=None, deterministic=False):\n        \"\"\"\n        Compute actions and action logprobs from given input.\n        :param x: (torch.Tensor) input to network.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                  (if None, all actions available)\n        :param deterministic: (bool) whether to sample from action distribution or return the mode.\n\n        :return actions: (torch.Tensor) actions to take.\n        :return action_log_probs: (torch.Tensor) log probabilities of taken actions.\n        \"\"\"\n        if self.mixed_action :\n            actions = []\n            action_log_probs = []\n            for action_out in self.action_outs:\n                action_logit = action_out(x)\n                action = action_logit.mode() if deterministic else action_logit.sample()\n                action_log_prob = action_logit.log_probs(action)\n                actions.append(action.float())\n                action_log_probs.append(action_log_prob)\n\n            actions = torch.cat(actions, -1)\n            action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)\n\n        elif self.multi_discrete:\n            actions = []\n            action_log_probs = []\n            for action_out in self.action_outs:\n                action_logit = action_out(x)\n                action = action_logit.mode() if deterministic else action_logit.sample()\n                action_log_prob = action_logit.log_probs(action)\n                actions.append(action)\n                action_log_probs.append(action_log_prob)\n\n            actions = torch.cat(actions, -1)\n            action_log_probs = torch.cat(action_log_probs, -1)\n        \n        else:\n            action_logits = self.action_out(x, available_actions)\n            actions = action_logits.mode() if deterministic else action_logits.sample() \n            action_log_probs = action_logits.log_probs(actions)\n        \n        return actions, action_log_probs\n\n    def get_probs(self, x, available_actions=None):\n        \"\"\"\n        Compute action probabilities from inputs.\n        :param x: (torch.Tensor) input to network.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                  (if None, all actions available)\n\n        :return action_probs: (torch.Tensor)\n        \"\"\"\n        if self.mixed_action or self.multi_discrete:\n            action_probs = []\n            for action_out in self.action_outs:\n                action_logit = action_out(x)\n                action_prob = action_logit.probs\n                action_probs.append(action_prob)\n            action_probs = torch.cat(action_probs, -1)\n        else:\n            action_logits = self.action_out(x, available_actions)\n            action_probs = action_logits.probs\n        \n        return action_probs\n\n    def evaluate_actions(self, x, action, available_actions=None, active_masks=None):\n        \"\"\"\n        Compute log probability and entropy of given actions.\n        :param x: (torch.Tensor) input to network.\n        :param action: (torch.Tensor) actions whose entropy and log probability to evaluate.\n        :param available_actions: (torch.Tensor) denotes which actions are available to agent\n                                                              (if None, all actions available)\n        :param active_masks: (torch.Tensor) denotes whether an agent is active or dead.\n\n        :return action_log_probs: (torch.Tensor) log probabilities of the input actions.\n        :return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.\n        \"\"\"\n        if self.mixed_action:\n            a, b = action.split((2, 1), -1)\n            b = b.long()\n            action = [a, b] \n            action_log_probs = [] \n            dist_entropy = []\n            for action_out, act in zip(self.action_outs, action):\n                action_logit = action_out(x)\n                action_log_probs.append(action_logit.log_probs(act))\n                if active_masks is not None:\n                    if len(action_logit.entropy().shape) == len(active_masks.shape):\n                        dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum()) \n                    else:\n                        dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum())\n                else:\n                    dist_entropy.append(action_logit.entropy().mean())\n                \n            action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)\n            dist_entropy = dist_entropy[0] / 2.0 + dist_entropy[1] / 0.98 #! dosen't make sense\n\n        elif self.multi_discrete:\n            action = torch.transpose(action, 0, 1)\n            action_log_probs = []\n            dist_entropy = []\n            for action_out, act in zip(self.action_outs, action):\n                action_logit = action_out(x)\n                action_log_probs.append(action_logit.log_probs(act))\n                if active_masks is not None:\n                    dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum())\n                else:\n                    dist_entropy.append(action_logit.entropy().mean())\n\n            action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong\n            dist_entropy = torch.tensor(dist_entropy).mean()\n        \n        else:\n            action_logits = self.action_out(x, available_actions)\n            action_log_probs = action_logits.log_probs(action)\n            if active_masks is not None:\n                dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum()\n                # dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()\n            else:\n                dist_entropy = action_logits.entropy().mean()\n        \n        return action_log_probs, dist_entropy\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/cnn.py",
    "content": "import torch.nn as nn\nfrom .util import init\n\n\"\"\"CNN Modules and utils.\"\"\"\n\nclass Flatten(nn.Module):\n    def forward(self, x):\n        return x.view(x.size(0), -1)\n\n\nclass CNNLayer(nn.Module):\n    def __init__(self, obs_shape, hidden_size, use_orthogonal, use_ReLU, kernel_size=3, stride=1):\n        super(CNNLayer, self).__init__()\n\n        active_func = [nn.Tanh(), nn.ReLU()][use_ReLU]\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n        gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU])\n\n        def init_(m):\n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)\n\n        input_channel = obs_shape[0]\n        input_width = obs_shape[1]\n        input_height = obs_shape[2]\n\n        self.cnn = nn.Sequential(\n            init_(nn.Conv2d(in_channels=input_channel,\n                            out_channels=hidden_size // 2,\n                            kernel_size=kernel_size,\n                            stride=stride)\n                  ),\n            active_func,\n            Flatten(),\n            init_(nn.Linear(hidden_size // 2 * (input_width - kernel_size + stride) * (input_height - kernel_size + stride),\n                            hidden_size)\n                  ),\n            active_func,\n            init_(nn.Linear(hidden_size, hidden_size)), active_func)\n\n    def forward(self, x):\n        x = x / 255.0\n        x = self.cnn(x)\n        return x\n\n\nclass CNNBase(nn.Module):\n    def __init__(self, args, obs_shape):\n        super(CNNBase, self).__init__()\n\n        self._use_orthogonal = args.use_orthogonal\n        self._use_ReLU = args.use_ReLU\n        self.hidden_size = args.hidden_size\n\n        self.cnn = CNNLayer(obs_shape, self.hidden_size, self._use_orthogonal, self._use_ReLU)\n\n    def forward(self, x):\n        x = self.cnn(x)\n        return x\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/distributions.py",
    "content": "import torch\nimport torch.nn as nn\nfrom .util import init\n\n\"\"\"\nModify standard PyTorch distributions so they to make compatible with this codebase. \n\"\"\"\n\n#\n# Standardize distribution interfaces\n#\n\n# Categorical\nclass FixedCategorical(torch.distributions.Categorical):\n    def sample(self):\n        return super().sample().unsqueeze(-1)\n\n    def log_probs(self, actions):\n        return (\n            super()\n            .log_prob(actions.squeeze(-1))\n            .view(actions.size(0), -1)\n            .sum(-1)\n            .unsqueeze(-1)\n        )\n\n    def mode(self):\n        return self.probs.argmax(dim=-1, keepdim=True)\n\n\n# Normal\nclass FixedNormal(torch.distributions.Normal):\n    def log_probs(self, actions):\n        return super().log_prob(actions)\n        # return super().log_prob(actions).sum(-1, keepdim=True)\n\n    def entrop(self):\n        return super.entropy().sum(-1)\n\n    def mode(self):\n        return self.mean\n\n\n# Bernoulli\nclass FixedBernoulli(torch.distributions.Bernoulli):\n    def log_probs(self, actions):\n        return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)\n\n    def entropy(self):\n        return super().entropy().sum(-1)\n\n    def mode(self):\n        return torch.gt(self.probs, 0.5).float()\n\n\nclass Categorical(nn.Module):\n    def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):\n        super(Categorical, self).__init__()\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n        def init_(m): \n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)\n\n        self.linear = init_(nn.Linear(num_inputs, num_outputs))\n\n    def forward(self, x, available_actions=None):\n        x = self.linear(x)\n        if available_actions is not None:\n            x[available_actions == 0] = -1e10\n        return FixedCategorical(logits=x)\n\n\n# class DiagGaussian(nn.Module):\n#     def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):\n#         super(DiagGaussian, self).__init__()\n#\n#         init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n#         def init_(m):\n#             return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)\n#\n#         self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))\n#         self.logstd = AddBias(torch.zeros(num_outputs))\n#\n#     def forward(self, x, available_actions=None):\n#         action_mean = self.fc_mean(x)\n#\n#         #  An ugly hack for my KFAC implementation.\n#         zeros = torch.zeros(action_mean.size())\n#         if x.is_cuda:\n#             zeros = zeros.cuda()\n#\n#         action_logstd = self.logstd(zeros)\n#         return FixedNormal(action_mean, action_logstd.exp())\n\nclass DiagGaussian(nn.Module):\n    def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01, args=None):\n        super(DiagGaussian, self).__init__()\n\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n\n        def init_(m):\n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)\n\n        if args is not None:\n            self.std_x_coef = args.std_x_coef\n            self.std_y_coef = args.std_y_coef\n        else:\n            self.std_x_coef = 1.\n            self.std_y_coef = 0.5\n        self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))\n        log_std = torch.ones(num_outputs) * self.std_x_coef\n        self.log_std = torch.nn.Parameter(log_std)\n\n    def forward(self, x, available_actions=None):\n        action_mean = self.fc_mean(x)\n        action_std = torch.sigmoid(self.log_std / self.std_x_coef) * self.std_y_coef\n        return FixedNormal(action_mean, action_std)\n\nclass Bernoulli(nn.Module):\n    def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):\n        super(Bernoulli, self).__init__()\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n        def init_(m): \n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)\n        \n        self.linear = init_(nn.Linear(num_inputs, num_outputs))\n\n    def forward(self, x):\n        x = self.linear(x)\n        return FixedBernoulli(logits=x)\n\nclass AddBias(nn.Module):\n    def __init__(self, bias):\n        super(AddBias, self).__init__()\n        self._bias = nn.Parameter(bias.unsqueeze(1))\n\n    def forward(self, x):\n        if x.dim() == 2:\n            bias = self._bias.t().view(1, -1)\n        else:\n            bias = self._bias.t().view(1, -1, 1, 1)\n\n        return x + bias\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/mlp.py",
    "content": "import torch.nn as nn\nfrom .util import init, get_clones\n\n\"\"\"MLP modules.\"\"\"\n\nclass MLPLayer(nn.Module):\n    def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, use_ReLU):\n        super(MLPLayer, self).__init__()\n        self._layer_N = layer_N\n\n        active_func = [nn.Tanh(), nn.ReLU()][use_ReLU]\n        init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]\n        gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU])\n\n        def init_(m):\n            return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)\n\n        self.fc1 = nn.Sequential(\n            init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size))\n        self.fc_h = nn.Sequential(init_(\n            nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size))\n        self.fc2 = get_clones(self.fc_h, self._layer_N)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        for i in range(self._layer_N):\n            x = self.fc2[i](x)\n        return x\n\n\nclass MLPBase(nn.Module):\n    def __init__(self, args, obs_shape, cat_self=True, attn_internal=False):\n        super(MLPBase, self).__init__()\n\n        self._use_feature_normalization = args.use_feature_normalization\n        self._use_orthogonal = args.use_orthogonal\n        self._use_ReLU = args.use_ReLU\n        self._stacked_frames = args.stacked_frames\n        self._layer_N = args.layer_N\n        self.hidden_size = args.hidden_size\n\n        obs_dim = obs_shape[0]\n\n        if self._use_feature_normalization:\n            self.feature_norm = nn.LayerNorm(obs_dim)\n\n        self.mlp = MLPLayer(obs_dim, self.hidden_size,\n                              self._layer_N, self._use_orthogonal, self._use_ReLU)\n\n    def forward(self, x):\n        if self._use_feature_normalization:\n            x = self.feature_norm(x)\n\n        x = self.mlp(x)\n\n        return x"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/rnn.py",
    "content": "import torch\nimport torch.nn as nn\n\n\"\"\"RNN modules.\"\"\"\n\n\nclass RNNLayer(nn.Module):\n    def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal):\n        super(RNNLayer, self).__init__()\n        self._recurrent_N = recurrent_N\n        self._use_orthogonal = use_orthogonal\n\n        self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N)\n        for name, param in self.rnn.named_parameters():\n            if 'bias' in name:\n                nn.init.constant_(param, 0)\n            elif 'weight' in name:\n                if self._use_orthogonal:\n                    nn.init.orthogonal_(param)\n                else:\n                    nn.init.xavier_uniform_(param)\n        self.norm = nn.LayerNorm(outputs_dim)\n\n    def forward(self, x, hxs, masks):\n        if x.size(0) == hxs.size(0):\n            x, hxs = self.rnn(x.unsqueeze(0),\n                              (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous())\n            x = x.squeeze(0)\n            hxs = hxs.transpose(0, 1)\n        else:\n            # x is a (T, N, -1) tensor that has been flatten to (T * N, -1)\n            N = hxs.size(0)\n            T = int(x.size(0) / N)\n\n            # unflatten\n            x = x.view(T, N, x.size(1))\n\n            # Same deal with masks\n            masks = masks.view(T, N)\n\n            # Let's figure out which steps in the sequence have a zero for any agent\n            # We will always assume t=0 has a zero in it as that makes the logic cleaner\n            has_zeros = ((masks[1:] == 0.0)\n                         .any(dim=-1)\n                         .nonzero()\n                         .squeeze()\n                         .cpu())\n\n            # +1 to correct the masks[1:]\n            if has_zeros.dim() == 0:\n                # Deal with scalar\n                has_zeros = [has_zeros.item() + 1]\n            else:\n                has_zeros = (has_zeros + 1).numpy().tolist()\n\n            # add t=0 and t=T to the list\n            has_zeros = [0] + has_zeros + [T]\n\n            hxs = hxs.transpose(0, 1)\n\n            outputs = []\n            for i in range(len(has_zeros) - 1):\n                # We can now process steps that don't have any zeros in masks together!\n                # This is much faster\n                start_idx = has_zeros[i]\n                end_idx = has_zeros[i + 1]\n                temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous()\n                rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)\n                outputs.append(rnn_scores)\n\n            # assert len(outputs) == T\n            # x is a (T, N, -1) tensor\n            x = torch.cat(outputs, dim=0)\n\n            # flatten\n            x = x.reshape(T * N, -1)\n            hxs = hxs.transpose(0, 1)\n\n        x = self.norm(x)\n        return x, hxs\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/algorithms/utils/util.py",
    "content": "import copy\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\ndef init(module, weight_init, bias_init, gain=1):\n    weight_init(module.weight.data, gain=gain)\n    bias_init(module.bias.data)\n    return module\n\ndef get_clones(module, N):\n    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\ndef check(input):\n    output = torch.from_numpy(input) if type(input) == np.ndarray else input\n    return output\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/config.py",
    "content": "import argparse\n\n\ndef get_config():\n    \"\"\"\n    The configuration parser for common hyperparameters of all environment. \n    Please reach each `scripts/train/<env>_runner.py` file to find private hyperparameters\n    only used in <env>.\n\n    Prepare parameters:\n        --algorithm_name <algorithm_name>\n            specifiy the algorithm, including `[\"rmappo\", \"mappo\", \"rmappg\", \"mappg\", \"trpo\"]`\n        --experiment_name <str>\n            an identifier to distinguish different experiment.\n        --seed <int>\n            set seed for numpy and torch \n        --cuda\n            by default True, will use GPU to train; or else will use CPU; \n        --cuda_deterministic\n            by default, make sure random seed effective. if set, bypass such function.\n        --n_training_threads <int>\n            number of training threads working in parallel. by default 1\n        --n_rollout_threads <int>\n            number of parallel envs for training rollout. by default 32\n        --n_eval_rollout_threads <int>\n            number of parallel envs for evaluating rollout. by default 1\n        --n_render_rollout_threads <int>\n            number of parallel envs for rendering, could only be set as 1 for some environments.\n        --num_env_steps <int>\n            number of env steps to train (default: 10e6)\n        --user_name <str>\n            [for wandb usage], to specify user's name for simply collecting training data.\n        --use_wandb\n            [for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.\n    \n    Env parameters:\n        --env_name <str>\n            specify the name of environment\n        --use_obs_instead_of_state\n            [only for some env] by default False, will use global state; or else will use concatenated local obs.\n    \n    Replay Buffer parameters:\n        --episode_length <int>\n            the max length of episode in the buffer. \n    \n    Network parameters:\n        --share_policy\n            by default True, all agents will share the same network; set to make training agents use different policies. \n        --use_centralized_V\n            by default True, use centralized training mode; or else will decentralized training mode.\n        --stacked_frames <int>\n            Number of input frames which should be stack together.\n        --hidden_size <int>\n            Dimension of hidden layers for actor/critic networks\n        --layer_N <int>\n            Number of layers for actor/critic networks\n        --use_ReLU\n            by default True, will use ReLU. or else will use Tanh.\n        --use_popart\n            by default True, use running mean and std to normalize rewards. \n        --use_feature_normalization\n            by default True, apply layernorm to normalize inputs. \n        --use_orthogonal\n            by default True, use Orthogonal initialization for weights and 0 initialization for biases. or else, will use xavier uniform inilialization.\n        --gain\n            by default 0.01, use the gain # of last action layer\n        --use_naive_recurrent_policy\n            by default False, use the whole trajectory to calculate hidden states.\n        --use_recurrent_policy\n            by default, use Recurrent Policy. If set, do not use.\n        --recurrent_N <int>\n            The number of recurrent layers ( default 1).\n        --data_chunk_length <int>\n            Time length of chunks used to train a recurrent_policy, default 10.\n    \n    Optimizer parameters:\n        --lr <float>\n            learning rate parameter,  (default: 5e-4, fixed).\n        --critic_lr <float>\n            learning rate of critic  (default: 5e-4, fixed)\n        --opti_eps <float>\n            RMSprop optimizer epsilon (default: 1e-5)\n        --weight_decay <float>\n            coefficience of weight decay (default: 0)\n    \n    PPO parameters:\n        --ppo_epoch <int>\n            number of ppo epochs (default: 15)\n        --use_clipped_value_loss \n            by default, clip loss value. If set, do not clip loss value.\n        --clip_param <float>\n            ppo clip parameter (default: 0.2)\n        --num_mini_batch <int>\n            number of batches for ppo (default: 1)\n        --entropy_coef <float>\n            entropy term coefficient (default: 0.01)\n        --use_max_grad_norm \n            by default, use max norm of gradients. If set, do not use.\n        --max_grad_norm <float>\n            max norm of gradients (default: 0.5)\n        --use_gae\n            by default, use generalized advantage estimation. If set, do not use gae.\n        --gamma <float>\n            discount factor for rewards (default: 0.99)\n        --gae_lambda <float>\n            gae lambda parameter (default: 0.95)\n        --use_proper_time_limits\n            by default, the return value does consider limits of time. If set, compute returns with considering time limits factor.\n        --use_huber_loss\n            by default, use huber loss. If set, do not use huber loss.\n        --use_value_active_masks\n            by default True, whether to mask useless data in value loss.  \n        --huber_delta <float>\n            coefficient of huber loss.  \n    \n    PPG parameters:\n        --aux_epoch <int>\n            number of auxiliary epochs. (default: 4)\n        --clone_coef <float>\n            clone term coefficient (default: 0.01)\n    \n    Run parameters：\n        --use_linear_lr_decay\n            by default, do not apply linear decay to learning rate. If set, use a linear schedule on the learning rate\n    \n    Save & Log parameters:\n        --save_interval <int>\n            time duration between contiunous twice models saving.\n        --log_interval <int>\n            time duration between contiunous twice log printing.\n    \n    Eval parameters:\n        --use_eval\n            by default, do not start evaluation. If set`, start evaluation alongside with training.\n        --eval_interval <int>\n            time duration between contiunous twice evaluation progress.\n        --eval_episodes <int>\n            number of episodes of a single evaluation.\n    \n    Render parameters:\n        --save_gifs\n            by default, do not save render video. If set, save video.\n        --use_render\n            by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.\n        --render_episodes <int>\n            the number of episodes to render a given env\n        --ifi <float>\n            the play interval of each rendered image in saved video.\n    \n    Pretrained parameters:\n        --model_dir <str>\n            by default None. set the path to pretrained model.\n    \"\"\"\n    parser = argparse.ArgumentParser(\n        description='mappo_lagrangian', formatter_class=argparse.RawDescriptionHelpFormatter)\n\n    # prepare parameters\n    parser.add_argument(\"--algorithm_name\", type=str,\n                        default=' ', choices=[ \"mappo_lagr\"])\n\n    parser.add_argument(\"--experiment_name\", type=str, default=\"check\", help=\"an identifier to distinguish different experiment.\")\n    parser.add_argument(\"--seed\", type=int, default=1, help=\"Random seed for numpy/torch\")\n    parser.add_argument(\"--cuda\", action='store_false', default=False, help=\"by default True, will use GPU to train; or else will use CPU;\")\n    parser.add_argument(\"--cuda_deterministic\",\n                        action='store_false', default=True, help=\"by default, make sure random seed effective. if set, bypass such function.\")\n    parser.add_argument(\"--n_training_threads\", type=int,\n                        default=1, help=\"Number of torch threads for training\")\n    parser.add_argument(\"--n_rollout_threads\", type=int, default=32,\n                        help=\"Number of parallel envs for training rollouts\")\n    parser.add_argument(\"--n_eval_rollout_threads\", type=int, default=1,\n                        help=\"Number of parallel envs for evaluating rollouts\")\n    parser.add_argument(\"--n_render_rollout_threads\", type=int, default=1,\n                        help=\"Number of parallel envs for rendering rollouts\")\n    parser.add_argument(\"--num_env_steps\", type=int, default=10e6,\n                        help='Number of environment steps to train (default: 10e6)')\n    parser.add_argument(\"--user_name\", type=str, default='marl',help=\"[for wandb usage], to specify user's name for simply collecting training data.\")\n    parser.add_argument(\"--use_wandb\", action='store_false', default=False, help=\"[for wandb usage], by default True, will log date to wandb server. or else will use tensorboard to log data.\")\n\n    # env parameters\n    parser.add_argument(\"--env_name\", type=str, default='StarCraft2', help=\"specify the name of environment\")\n    parser.add_argument(\"--use_obs_instead_of_state\", action='store_true',\n                        default=False, help=\"Whether to use global state or concatenated obs\")\n\n    # replay buffer parameters\n    parser.add_argument(\"--episode_length\", type=int,\n                        default=200, help=\"Max length for any episode\")\n\n    # network parameters\n    parser.add_argument(\"--share_policy\", action='store_false',\n                        default=True, help='Whether agent share the same policy')\n    parser.add_argument(\"--use_centralized_V\", action='store_false',\n                        default=True, help=\"Whether to use centralized V function\")\n    parser.add_argument(\"--stacked_frames\", type=int, default=1,\n                        help=\"Dimension of hidden layers for actor/critic networks\")\n    parser.add_argument(\"--use_stacked_frames\", action='store_true',\n                        default=False, help=\"Whether to use stacked_frames\")\n    parser.add_argument(\"--hidden_size\", type=int, default=64,\n                        help=\"Dimension of hidden layers for actor/critic networks\") \n    parser.add_argument(\"--layer_N\", type=int, default=1,\n                        help=\"Number of layers for actor/critic networks\")\n    parser.add_argument(\"--use_ReLU\", action='store_false',\n                        default=True, help=\"Whether to use ReLU\")\n    parser.add_argument(\"--use_popart\", action='store_false', default=True, help=\"by default True, use running mean and std to normalize rewards.\")\n    parser.add_argument(\"--use_valuenorm\", action='store_false', default=True, help=\"by default True, use running mean and std to normalize rewards.\")\n    parser.add_argument(\"--use_feature_normalization\", action='store_false',\n                        default=True, help=\"Whether to apply layernorm to the inputs\")\n    parser.add_argument(\"--use_orthogonal\", action='store_false', default=True,\n                        help=\"Whether to use Orthogonal initialization for weights and 0 initialization for biases\")\n    parser.add_argument(\"--gain\", type=float, default=0.01,\n                        help=\"The gain # of last action layer\")\n\n    # recurrent parameters\n    parser.add_argument(\"--use_naive_recurrent_policy\", action='store_true',\n                        default=False, help='Whether to use a naive recurrent policy')\n    parser.add_argument(\"--use_recurrent_policy\", action='store_true',\n                        default=False, help='use a recurrent policy')\n    parser.add_argument(\"--recurrent_N\", type=int, default=1, help=\"The number of recurrent layers.\")\n    parser.add_argument(\"--data_chunk_length\", type=int, default=10,\n                        help=\"Time length of chunks used to train a recurrent_policy\")\n    \n    # optimizer parameters\n    parser.add_argument(\"--lr\", type=float, default=5e-4,\n                        help='learning rate (default: 5e-4)')\n    parser.add_argument(\"--critic_lr\", type=float, default=5e-4,\n                        help='critic learning rate (default: 5e-4)')\n    parser.add_argument(\"--opti_eps\", type=float, default=1e-5,\n                        help='RMSprop optimizer epsilon (default: 1e-5)')\n    parser.add_argument(\"--weight_decay\", type=float, default=0)\n    parser.add_argument(\"--std_x_coef\", type=float, default=1)\n    parser.add_argument(\"--std_y_coef\", type=float, default=0.5)\n\n    # ppo parameters\n    parser.add_argument(\"--ppo_epoch\", type=int, default=15,\n                        help='number of ppo epochs (default: 15)')\n    parser.add_argument(\"--use_clipped_value_loss\",\n                        action='store_false', default=True, help=\"by default, clip loss value. If set, do not clip loss value.\")\n    parser.add_argument(\"--clip_param\", type=float, default=0.2,\n                        help='ppo clip parameter (default: 0.2)')\n    parser.add_argument(\"--num_mini_batch\", type=int, default=1,\n                        help='number of batches for ppo (default: 1)')\n    parser.add_argument(\"--entropy_coef\", type=float, default=0.01,\n                        help='entropy term coefficient (default: 0.01)')\n    # todo: lagrangian_coef is the lagrangian coefficient for mappo_lagrangian\n    parser.add_argument(\"--lamda_lagr\", type=float, default=0.78,\n                        help='lagrangrian coef coefficient (default: 0.78)')\n    parser.add_argument(\"--lagrangian_coef_rate\", type=float, default=5e-4,\n                        help='lagrangrian coef learning rate (default: 5e-4)')\n\n    parser.add_argument(\"--lagrangian_coef\", type=float, default=0.01,\n                        help='entropy term coefficient (default: 0.01)')\n    parser.add_argument(\"--value_loss_coef\", type=float,\n                        default=1, help='value loss coefficient (default: 0.5)')\n    parser.add_argument(\"--use_max_grad_norm\",\n                        action='store_false', default=True, help=\"by default, use max norm of gradients. If set, do not use.\")\n    parser.add_argument(\"--max_grad_norm\", type=float, default=10.0,\n                        help='max norm of gradients (default: 0.5)')\n    parser.add_argument(\"--use_gae\", action='store_false',\n                        default=True, help='use generalized advantage estimation')\n    parser.add_argument(\"--gamma\", type=float, default=0.99,\n                        help='discount factor for rewards (default: 0.99)')\n    parser.add_argument(\"--gae_lambda\", type=float, default=0.95,\n                        help='gae lambda parameter (default: 0.95)')\n    parser.add_argument(\"--use_proper_time_limits\", action='store_true',\n                        default=False, help='compute returns taking into account time limits')\n    parser.add_argument(\"--use_huber_loss\", action='store_false', default=True, help=\"by default, use huber loss. If set, do not use huber loss.\")\n    parser.add_argument(\"--use_value_active_masks\",\n                        action='store_false', default=True, help=\"by default True, whether to mask useless data in value loss.\")\n    parser.add_argument(\"--use_policy_active_masks\",\n                        action='store_false', default=True, help=\"by default True, whether to mask useless data in policy loss.\")\n    parser.add_argument(\"--huber_delta\", type=float, default=10.0, help=\" coefficience of huber loss.\")\n\n    # run parameters\n    parser.add_argument(\"--use_linear_lr_decay\", action='store_true',\n                        default=False, help='use a linear schedule on the learning rate')\n    # save parameters\n    parser.add_argument(\"--save_interval\", type=int, default=1, help=\"time duration between contiunous twice models saving.\")\n\n    # log parameters\n    parser.add_argument(\"--log_interval\", type=int, default=5, help=\"time duration between contiunous twice log printing.\")\n\n    # eval parameters\n    parser.add_argument(\"--use_eval\", action='store_true', default=False, help=\"by default, do not start evaluation. If set`, start evaluation alongside with training.\")\n    parser.add_argument(\"--eval_interval\", type=int, default=25, help=\"time duration between contiunous twice evaluation progress.\")\n    parser.add_argument(\"--eval_episodes\", type=int, default=32, help=\"number of episodes of a single evaluation.\")\n\n    # render parameters\n    parser.add_argument(\"--save_gifs\", action='store_true', default=False, help=\"by default, do not save render video. If set, save video.\")\n    parser.add_argument(\"--use_render\", action='store_true', default=False, help=\"by default, do not render the env during training. If set, start render. Note: something, the environment has internal render process which is not controlled by this hyperparam.\")\n    parser.add_argument(\"--render_episodes\", type=int, default=5, help=\"the number of episodes to render a given env\")\n    parser.add_argument(\"--ifi\", type=float, default=0.1, help=\"the play interval of each rendered image in saved video.\")\n\n    # pretrained parameters\n    parser.add_argument(\"--model_dir\", type=str, default=None, help=\"by default None. set the path to pretrained model.\")\n\n    # safe parameters\n    parser.add_argument(\"--safety_bound\", type=float, default=1, help=\"constraint upper bound\")\n\n    return parser\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/__init__.py",
    "content": "\r\nimport socket\r\nfrom absl import flags\r\nFLAGS = flags.FLAGS\r\nFLAGS(['train_sc.py'])\r\n\r\n\r\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/env_wrappers.py",
    "content": "\"\"\"\nModified from OpenAI Baselines code to work with multi-agent envs\n\"\"\"\nimport numpy as np\nimport torch\nfrom multiprocessing import Process, Pipe\nfrom abc import ABC, abstractmethod\nfrom mappo_lagrangian.utils.util import tile_images\n\nclass CloudpickleWrapper(object):\n    \"\"\"\n    Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)\n    \"\"\"\n\n    def __init__(self, x):\n        self.x = x\n\n    def __getstate__(self):\n        import cloudpickle\n        return cloudpickle.dumps(self.x)\n\n    def __setstate__(self, ob):\n        import pickle\n        self.x = pickle.loads(ob)\n\n\nclass ShareVecEnv(ABC):\n    \"\"\"\n    An abstract asynchronous, vectorized environment.\n    Used to batch data from multiple copies of an environment, so that\n    each observation becomes an batch of observations, and expected action is a batch of actions to\n    be applied per-environment.\n    \"\"\"\n    closed = False\n    viewer = None\n\n    metadata = {\n        'render.modes': ['human', 'rgb_array']\n    }\n\n    def __init__(self, num_envs, observation_space, share_observation_space, action_space):\n        self.num_envs = num_envs\n        self.observation_space = observation_space\n        self.share_observation_space = share_observation_space\n        self.action_space = action_space\n\n    @abstractmethod\n    def reset(self):\n        \"\"\"\n        Reset all the environments and return an array of\n        observations, or a dict of observation arrays.\n\n        If step_async is still doing work, that work will\n        be cancelled and step_wait() should not be called\n        until step_async() is invoked again.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def step_async(self, actions):\n        \"\"\"\n        Tell all the environments to start taking a step\n        with the given actions.\n        Call step_wait() to get the results of the step.\n\n        You should not call this if a step_async run is\n        already pending.\n        \"\"\"\n        pass\n\n    @abstractmethod\n    def step_wait(self):\n        \"\"\"\n        Wait for the step taken with step_async().\n\n        Returns (obs, rews, cos, dones, infos):\n         - obs: an array of observations, or a dict of\n                arrays of observations.\n         - rews: an array of rewards\n         - cos: an array of costs\n         - dones: an array of \"episode done\" booleans\n         - infos: a sequence of info objects\n        \"\"\"\n        pass\n\n    def close_extras(self):\n        \"\"\"\n        Clean up the  extra resources, beyond what's in this base class.\n        Only runs when not self.closed.\n        \"\"\"\n        pass\n\n    def close(self):\n        if self.closed:\n            return\n        if self.viewer is not None:\n            self.viewer.close()\n        self.close_extras()\n        self.closed = True\n\n    def step(self, actions):\n        \"\"\"\n        Step the environments synchronously.\n\n        This is available for backwards compatibility.\n        \"\"\"\n        self.step_async(actions)\n        return self.step_wait()\n\n    def render(self, mode='human'):\n        imgs = self.get_images()\n        bigimg = tile_images(imgs)\n        if mode == 'human':\n            self.get_viewer().imshow(bigimg)\n            return self.get_viewer().isopen\n        elif mode == 'rgb_array':\n            return bigimg\n        else:\n            raise NotImplementedError\n\n    def get_images(self):\n        \"\"\"\n        Return RGB images from each environment\n        \"\"\"\n        raise NotImplementedError\n\n    @property\n    def unwrapped(self):\n        if isinstance(self, VecEnvWrapper):\n            return self.venv.unwrapped\n        else:\n            return self\n\n    def get_viewer(self):\n        if self.viewer is None:\n            from gym.envs.classic_control import rendering\n            self.viewer = rendering.SimpleImageViewer()\n        return self.viewer\n\n\ndef worker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, reward, done, info = env.step(data)\n            if 'bool' in done.__class__.__name__:\n                if done:\n                    ob = env.reset()\n            else:\n                if np.all(done):\n                    ob = env.reset()\n\n            remote.send((ob, reward, info[\"cost\"], done, info))\n        elif cmd == 'reset':\n            ob = env.reset()\n            remote.send((ob))\n        elif cmd == 'render':\n            if data == \"rgb_array\":\n                fr = env.render(mode=data)\n                remote.send(fr)\n            elif data == \"human\":\n                env.render(mode=data)\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'get_spaces':\n            remote.send((env.observation_space, env.share_observation_space, env.action_space))\n        else:\n            raise NotImplementedError\n\n\nclass GuardSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = False  # could cause zombie process\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv()\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, rews, cos, dones, infos = zip(*results)\n        return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos\n\n    def reset(self):\n        for remote in self.remotes:\n            remote.send(('reset', None))\n        obs = [remote.recv() for remote in self.remotes]\n        return np.stack(obs)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\nclass SubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = True  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv()\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, rews, cos, dones, infos = zip(*results)\n        return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos\n\n    def reset(self):\n        for remote in self.remotes:\n            remote.send(('reset', None))\n        obs = [remote.recv() for remote in self.remotes]\n        return np.stack(obs)\n\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n    def render(self, mode=\"rgb_array\"):\n        for remote in self.remotes:\n            remote.send(('render', mode))\n        if mode == \"rgb_array\":   \n            frame = [remote.recv() for remote in self.remotes]\n            return np.stack(frame) \n\n\ndef shareworker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, s_ob, reward, done, info, available_actions = env.step(data)\n            if 'bool' in done.__class__.__name__:\n                if done:\n                    ob, s_ob, available_actions = env.reset()\n            else:\n                if np.all(done):\n                    ob, s_ob, available_actions = env.reset()\n\n            remote.send((ob, s_ob, reward, done, info, available_actions))\n        elif cmd == 'reset':\n            ob, s_ob, available_actions = env.reset()\n            remote.send((ob, s_ob, available_actions))\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'render':\n            if data == \"rgb_array\":\n                fr = env.render(mode=data)\n                remote.send(fr)\n            elif data == \"human\":\n                env.render(mode=data)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'get_spaces':\n            remote.send(\n                (env.observation_space, env.share_observation_space, env.action_space))\n        elif cmd == 'render_vulnerability':\n            fr = env.render_vulnerability(data)\n            remote.send((fr))\n        elif cmd == 'get_num_agents':\n            remote.send((env.n_agents))\n        else:\n            raise NotImplementedError\n\n\nclass ShareSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=shareworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = True  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n        self.remotes[0].send(('get_num_agents', None))\n        self.n_agents = self.remotes[0].recv()\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv(\n        )\n        # print(\"wrapper:\", share_observation_space)\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, share_obs, rews, dones, infos, available_actions = zip(*results)\n\n        cost_x= np.array([item[0]['cost'] for item in infos])\n        # print(\"=====cost_x=====: \", cost_x.sum())\n        # print(\"=====np.stack(dones)=====: \", np.stack(dones))\n        return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(cost_x), np.stack(dones), infos, np.stack(available_actions)\n\n    def reset(self):\n        for remote in self.remotes:\n            remote.send(('reset', None))\n        results = [remote.recv() for remote in self.remotes]\n        obs, share_obs, available_actions = zip(*results)\n        return np.stack(obs), np.stack(share_obs), np.stack(available_actions)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\ndef choosesimpleworker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, reward, done, info = env.step(data)\n            remote.send((ob, reward, info[\"cost\"], done, info))\n        elif cmd == 'reset':\n            ob = env.reset(data)\n            remote.send((ob))\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'render':\n            if data == \"rgb_array\":\n                fr = env.render(mode=data)\n                remote.send(fr)\n            elif data == \"human\":\n                env.render(mode=data)\n        elif cmd == 'get_spaces':\n            remote.send(\n                (env.observation_space, env.share_observation_space, env.action_space))\n        else:\n            raise NotImplementedError\n\n\nclass ChooseSimpleSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=choosesimpleworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = True  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv()\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, rews, cos, dones, infos = zip(*results)\n        return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos\n\n    def reset(self, reset_choose):\n        for remote, choose in zip(self.remotes, reset_choose):\n            remote.send(('reset', choose))\n        obs = [remote.recv() for remote in self.remotes]\n        return np.stack(obs)\n\n    def render(self, mode=\"rgb_array\"):\n        for remote in self.remotes:\n            remote.send(('render', mode))\n        if mode == \"rgb_array\":   \n            frame = [remote.recv() for remote in self.remotes]\n            return np.stack(frame)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\ndef chooseworker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, s_ob, reward, done, info, available_actions = env.step(data)\n            remote.send((ob, s_ob, reward, info[\"cost\"], done, info, available_actions))\n        elif cmd == 'reset':\n            ob, s_ob, available_actions = env.reset(data)\n            remote.send((ob, s_ob, available_actions))\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'render':\n            remote.send(env.render(mode='rgb_array'))\n        elif cmd == 'get_spaces':\n            remote.send(\n                (env.observation_space, env.share_observation_space, env.action_space))\n        else:\n            raise NotImplementedError\n\n\nclass ChooseSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=chooseworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = True  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv(\n        )\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, share_obs, rews, cos, dones, infos, available_actions = zip(*results)\n        return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(cos), np.stack(dones), infos, np.stack(available_actions)\n\n    def reset(self, reset_choose):\n        for remote, choose in zip(self.remotes, reset_choose):\n            remote.send(('reset', choose))\n        results = [remote.recv() for remote in self.remotes]\n        obs, share_obs, available_actions = zip(*results)\n        return np.stack(obs), np.stack(share_obs), np.stack(available_actions)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\ndef chooseguardworker(remote, parent_remote, env_fn_wrapper):\n    parent_remote.close()\n    env = env_fn_wrapper.x()\n    while True:\n        cmd, data = remote.recv()\n        if cmd == 'step':\n            ob, reward, done, info = env.step(data)\n            remote.send((ob, reward, info[\"cost\"], done, info))\n        elif cmd == 'reset':\n            ob = env.reset(data)\n            remote.send((ob))\n        elif cmd == 'reset_task':\n            ob = env.reset_task()\n            remote.send(ob)\n        elif cmd == 'close':\n            env.close()\n            remote.close()\n            break\n        elif cmd == 'get_spaces':\n            remote.send(\n                (env.observation_space, env.share_observation_space, env.action_space))\n        else:\n            raise NotImplementedError\n\n\nclass ChooseGuardSubprocVecEnv(ShareVecEnv):\n    def __init__(self, env_fns, spaces=None):\n        \"\"\"\n        envs: list of gym environments to run in subprocesses\n        \"\"\"\n        self.waiting = False\n        self.closed = False\n        nenvs = len(env_fns)\n        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n        self.ps = [Process(target=chooseguardworker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))\n                   for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]\n        for p in self.ps:\n            p.daemon = False  # if the main process crashes, we should not cause things to hang\n            p.start()\n        for remote in self.work_remotes:\n            remote.close()\n        self.remotes[0].send(('get_spaces', None))\n        observation_space, share_observation_space, action_space = self.remotes[0].recv(\n        )\n        ShareVecEnv.__init__(self, len(env_fns), observation_space,\n                             share_observation_space, action_space)\n\n    def step_async(self, actions):\n        for remote, action in zip(self.remotes, actions):\n            remote.send(('step', action))\n        self.waiting = True\n\n    def step_wait(self):\n        results = [remote.recv() for remote in self.remotes]\n        self.waiting = False\n        obs, rews, cos, dones, infos = zip(*results)\n        return np.stack(obs), np.stack(rews), np.stack(cos), np.stack(dones), infos\n\n    def reset(self, reset_choose):\n        for remote, choose in zip(self.remotes, reset_choose):\n            remote.send(('reset', choose))\n        obs = [remote.recv() for remote in self.remotes]\n        return np.stack(obs)\n\n    def reset_task(self):\n        for remote in self.remotes:\n            remote.send(('reset_task', None))\n        return np.stack([remote.recv() for remote in self.remotes])\n\n    def close(self):\n        if self.closed:\n            return\n        if self.waiting:\n            for remote in self.remotes:\n                remote.recv()\n        for remote in self.remotes:\n            remote.send(('close', None))\n        for p in self.ps:\n            p.join()\n        self.closed = True\n\n\n# single env\nclass DummyVecEnv(ShareVecEnv):\n    def __init__(self, env_fns):\n        self.envs = [fn() for fn in env_fns]\n        env = self.envs[0]\n        ShareVecEnv.__init__(self, len(\n            env_fns), env.observation_space, env.share_observation_space, env.action_space)\n        self.actions = None\n\n    def step_async(self, actions):\n        self.actions = actions\n\n    def step_wait(self):\n        results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]\n        obs, rews, cos, dones, infos = map(np.array, zip(*results))\n\n        for (i, done) in enumerate(dones):\n            if 'bool' in done.__class__.__name__:\n                if done:\n                    obs[i] = self.envs[i].reset()\n            else:\n                if np.all(done):\n                    obs[i] = self.envs[i].reset()\n\n        self.actions = None\n        return obs, rews, cos, dones, infos\n\n    def reset(self):\n        obs = [env.reset() for env in self.envs]\n        return np.array(obs)\n\n    def close(self):\n        for env in self.envs:\n            env.close()\n\n    def render(self, mode=\"human\"):\n        if mode == \"rgb_array\":\n            return np.array([env.render(mode=mode) for env in self.envs])\n        elif mode == \"human\":\n            for env in self.envs:\n                env.render(mode=mode)\n        else:\n            raise NotImplementedError\n\n\n\nclass ShareDummyVecEnv(ShareVecEnv):\n    def __init__(self, env_fns):\n        self.envs = [fn() for fn in env_fns]\n        env = self.envs[0]\n        ShareVecEnv.__init__(self, len(\n            env_fns), env.observation_space, env.share_observation_space, env.action_space)\n        self.actions = None\n\n    def step_async(self, actions):\n        self.actions = actions\n\n    def step_wait(self):\n        results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]\n        obs, share_obs, rews, cos, dones, infos, available_actions = map(\n            np.array, zip(*results))\n\n        for (i, done) in enumerate(dones):\n            if 'bool' in done.__class__.__name__:\n                if done:\n                    obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()\n            else:\n                if np.all(done):\n                    obs[i], share_obs[i], available_actions[i] = self.envs[i].reset()\n        self.actions = None\n\n        return obs, share_obs, rews, cos, dones, infos, available_actions\n\n    def reset(self):\n        results = [env.reset() for env in self.envs]\n        obs, share_obs, available_actions = map(np.array, zip(*results))\n        return obs, share_obs, available_actions\n\n    def close(self):\n        for env in self.envs:\n            env.close()\n    \n    def render(self, mode=\"human\"):\n        if mode == \"rgb_array\":\n            return np.array([env.render(mode=mode) for env in self.envs])\n        elif mode == \"human\":\n            for env in self.envs:\n                env.render(mode=mode)\n        else:\n            raise NotImplementedError\n\n\nclass ChooseDummyVecEnv(ShareVecEnv):\n    def __init__(self, env_fns):\n        self.envs = [fn() for fn in env_fns]\n        env = self.envs[0]\n        ShareVecEnv.__init__(self, len(\n            env_fns), env.observation_space, env.share_observation_space, env.action_space)\n        self.actions = None\n\n    def step_async(self, actions):\n        self.actions = actions\n\n    def step_wait(self):\n        results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]\n        obs, share_obs, rews, cos, dones, infos, available_actions = map(\n            np.array, zip(*results))\n        self.actions = None\n        return obs, share_obs, rews, cos, dones, infos, available_actions\n\n    def reset(self, reset_choose):\n        results = [env.reset(choose)\n                   for (env, choose) in zip(self.envs, reset_choose)]\n        obs, share_obs, available_actions = map(np.array, zip(*results))\n        return obs, share_obs, available_actions\n\n    def close(self):\n        for env in self.envs:\n            env.close()\n\n    def render(self, mode=\"human\"):\n        if mode == \"rgb_array\":\n            return np.array([env.render(mode=mode) for env in self.envs])\n        elif mode == \"human\":\n            for env in self.envs:\n                env.render(mode=mode)\n        else:\n            raise NotImplementedError\n\nclass ChooseSimpleDummyVecEnv(ShareVecEnv):\n    def __init__(self, env_fns):\n        self.envs = [fn() for fn in env_fns]\n        env = self.envs[0]\n        ShareVecEnv.__init__(self, len(\n            env_fns), env.observation_space, env.share_observation_space, env.action_space)\n        self.actions = None\n\n    def step_async(self, actions):\n        self.actions = actions\n\n    def step_wait(self):\n        results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]\n        obs, rews, cos, dones, infos = map(np.array, zip(*results))\n        self.actions = None\n        return obs, rews, cos, dones, infos\n\n    def reset(self, reset_choose):\n        obs = [env.reset(choose)\n                   for (env, choose) in zip(self.envs, reset_choose)]\n        return np.array(obs)\n\n    def close(self):\n        for env in self.envs:\n            env.close()\n\n    def render(self, mode=\"human\"):\n        if mode == \"rgb_array\":\n            return np.array([env.render(mode=mode) for env in self.envs])\n        elif mode == \"human\":\n            for env in self.envs:\n                env.render(mode=mode)\n        else:\n            raise NotImplementedError\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/MUJOCO_LOG.TXT",
    "content": "Sun Aug 29 11:16:41 2021\nERROR: Expired activation key\n\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/README.md",
    "content": "#### Safety Multi-agent Mujoco \n\n\n## 1. Sate Many Agent Ant\n\nAccording to Zanger's work, \n\nThe reward function is equal to the rewards in the common Ant-v2 environment and comprises the torso velocity in global x-direction, a negative control reward on exerted torque, a negative contact reward and a constant positive reward for survival, which results in\n\n<img src=\"https://latex.codecogs.com/png.image?\\dpi{110}&space;r=\\frac{x_{\\text&space;{torso&space;},&space;t&plus;1}-x_{\\text&space;{torso&space;},&space;t}}{d&space;t}-\\frac{1}{2}\\left\\|\\boldsymbol{a}_{t}\\right\\|_{2}^{2}-\\frac{1}{2&space;*&space;10^{3}}&space;\\|&space;\\text&space;{&space;contact&space;}_{t}&space;\\|_{2}^{2}&plus;1\" title=\"r=\\frac{x_{\\text {torso }, t+1}-x_{\\text {torso }, t}}{d t}-\\frac{1}{2}\\left\\|\\boldsymbol{a}_{t}\\right\\|_{2}^{2}-\\frac{1}{2 * 10^{3}} \\| \\text { contact }_{t} \\|_{2}^{2}+1\" />\n\n```python\nxposafter = self.get_body_com(\"torso_0\")[0]\nforward_reward = (xposafter - xposbefore)/self.dt\nctrl_cost = .5 * np.square(a).sum()\ncontact_cost = 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\nsurvive_reward = 1.0\n        \nreward = forward_reward - ctrl_cost - contact_cost + survive_reward\n```\n\nAnd the cost,\n\n\n<img src=\"https://latex.codecogs.com/png.image?\\dpi{110}&space;c=&space;\\begin{cases}0,&space;&&space;\\text&space;{&space;for&space;}&space;\\quad&space;0.2&space;\\leq&space;z_{\\text&space;{torso&space;},&space;t&plus;1}&space;\\leq&space;1.0&space;\\\\&space;&&space;\\text&space;{&space;and&space;}\\left\\|\\boldsymbol{x}_{\\text&space;{torso&space;},&space;t&plus;1}-\\boldsymbol{x}_{\\text&space;{wall&space;}}\\right\\|_{2}&space;\\geq&space;1.8&space;\\\\&space;1,&space;&&space;\\text&space;{&space;else&space;}\\end{cases}\" title=\"c= \\begin{cases}0, & \\text { for } \\quad 0.2 \\leq z_{\\text {torso }, t+1} \\leq 1.0 \\\\ & \\text { and }\\left\\|\\boldsymbol{x}_{\\text {torso }, t+1}-\\boldsymbol{x}_{\\text {wall }}\\right\\|_{2} \\geq 1.8 \\\\ 1, & \\text { else }\\end{cases}\" />\n\n\n\n```python\nyposafter = self.get_body_com(\"torso_0\")[1]\nywall = np.array([-5, 5])\nif xposafter < 20:\n  y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall\nelif xposafter>20 and xposafter<60:\n  y_walldist = yposafter + (xposafter-40)*np.tan(30/360*2*np.pi) - ywall\nelif xposafter>60 and xposafter<100:\n  y_walldist = yposafter - (xposafter-80)*np.tan(30/360*2*np.pi) + ywall\nelse:\n  y_walldist = yposafter - 20*np.tan(30/360*2*np.pi) + ywall\nobj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n\nbody_quat = self.data.get_body_xquat('torso_0')\n z_rot = 1-2*(body_quat[1]**2+body_quat[2]**2)  ### normally xx-rotation, not sure what axes mujoco uses\n\nstate = self.state_vector()\nnotdone = np.isfinite(state).all() \\\n                      and state[2] >= 0.2 and state[2] <= 1.0\\\n                      and z_rot>=-0.7 #ADDED\ndone = not notdone\ndone_cost = done * 1.0\n\ncost = np.clip(obj_cost + done_cost, 0, 1)\n```\n\n\n[1] Zanger, Moritz A., Karam Daaboul, and J. Marius Zöllner. 2021. “Safe Continuous Control with Constrained Model-Based Policy Optimization.” arXiv [cs.LG]. arXiv. http://arxiv.org/abs/2104.06922.\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/__init__.py",
    "content": ""
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/__init__.py",
    "content": "from .mujoco_multi import MujocoMulti\nfrom .coupled_half_cheetah import CoupledHalfCheetah\nfrom .manyagent_swimmer import ManyAgentSwimmerEnv\nfrom .manyagent_ant import ManyAgentAntEnv\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/ant.py",
    "content": "import numpy as np\n# from mujoco_safety_gym.envs import mujoco_env\nfrom mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nfrom gym import utils\nimport mujoco_py as mjp\n\n\nclass AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)\n        utils.EzPickle.__init__(self)\n\n    def step(self, a):\n        xposbefore = self.get_body_com(\"torso\")[0]\n        self.do_simulation(a, self.frame_skip)\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        xposafter = self.get_body_com(\"torso\")[0]\n        forward_reward = (xposafter - xposbefore) / self.dt\n        ctrl_cost = .5 * np.square(a).sum()\n        contact_cost = 0.5 * 1e-3 * np.sum(\n            np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n        survive_reward = 1.0\n\n        ### safety stuff\n        yposafter = self.get_body_com(\"torso\")[1]\n        ywall = np.array([-5, 5])\n        if xposafter < 20:\n            y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall\n        elif xposafter > 20 and xposafter < 60:\n            y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall\n        elif xposafter > 60 and xposafter < 100:\n            y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall\n        else:\n            y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall\n\n        obj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n        reward = forward_reward - ctrl_cost - contact_cost + survive_reward\n\n        body_quat = self.data.get_body_xquat('torso')\n        z_rot = 1 - 2 * (\n                    body_quat[1] ** 2 + body_quat[2] ** 2)  ### normally xx-rotation, not sure what axes mujoco uses\n        state = self.state_vector()\n        notdone = np.isfinite(state).all() \\\n                  and state[2] >= 0.2 and state[2] <= 1.0 \\\n                  and z_rot >= -0.7\n        done = not notdone\n        done_cost = done * 1.0\n        cost = np.clip(obj_cost + done_cost, 0, 1)\n        ob = self._get_obs()\n        return ob, reward, done, dict(\n            reward_forward=forward_reward,\n            reward_ctrl=-ctrl_cost,\n            reward_contact=-contact_cost,\n            reward_survive=survive_reward,\n            cost_obj=obj_cost,\n            cost_done=done_cost,\n            cost=cost,\n        )\n\n    def _get_obs(self):\n        x = self.sim.data.qpos.flat[0]\n        y = self.sim.data.qpos.flat[1]\n        if x < 20:\n            y_off = y - x * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 20 and x < 60:\n            y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 60 and x < 100:\n            y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi)\n        else:\n            y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi)\n\n        return np.concatenate([\n            self.sim.data.qpos.flat[2:-42],\n            self.sim.data.qvel.flat[:-36],\n            [x / 5],\n            [y_off],\n            # np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\n        ])\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n        qpos[-42:] = self.init_qpos[-42:]\n        qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n        qvel[-36:] = self.init_qvel[-36:]\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.distance = self.model.stat.extent * 0.5"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/.gitignore",
    "content": "*.auto.xml\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/__init__.py",
    "content": ""
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/ant.xml",
    "content": "<mujoco model=\"ant\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option integrator=\"RK4\" timestep=\"0.01\"/>\n  <custom>\n    <numeric data=\"0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0\" name=\"init_qpos\"/>\n  </custom>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"0\" condim=\"3\" density=\"5.0\" friction=\"1 0.5 0.5\" margin=\"0.01\" rgba=\"0.8 0.6 0.4 1\"/>\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texbox\" rgb1=\"#ff66ff\" rgb2=\"#ff66ff\" type=\"2d\" width=\"100\"/>\n    <material name=\"BoxMat\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texbox\"/>\n\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"30 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"70 25 40\" type=\"plane\"/>\n    <!-- <geom conaffinity=\"1\" condim=\"3\" name=\"obj11\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"10  0 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj12\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"10 -10 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj13\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"10  10 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj21\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"20 -4 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj22\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"20  4 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj23\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"20 -14 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj24\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"20  14 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj31\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30  0 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj32\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30 -9 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj33\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30  11 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj34\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30 -16 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"obj35\" type=\"box\" material=\"BoxMat\" size=\"0.5 0.5 0.5\" pos=\"30  19 .5\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" material=\"BoxMat\" size=\"0.1 14 1.0\" pos=\"-14  0 1\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" material=\"BoxMat\" size=\"14 .1 1.0\" pos=\"0  14 1\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" material=\"BoxMat\" size=\"14 0.1 1.0\" pos=\"0  -14 1.0\"    rgba=\"#ff66ff\"/> -->\n    <!-- <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"0   6 1.0\"   euler='0 0 30'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"40 -6 1.0\"  euler='0 0 -30'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"40  6 1.0\"  euler='0 0 -30'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"80 -6 1.0\"   euler='0 0 30'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" density=\".01\" size=\"20 0.1 1.0\" pos=\"80  6 1.0\"   euler='0 0 30'  rgba=\"1 0.5 0.5 1\"/> -->\n    <body name=\"torso\" pos=\"0 0 0.75\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -10 -10\" xyaxes=\".8 .4 0 0 .4 .6\"/>\n      <geom name=\"torso_geom\" pos=\"0 0 0\" size=\"0.25\" type=\"sphere\"/>\n      <joint armature=\"0\" damping=\"0\" limited=\"false\" margin=\"0.01\" name=\"root\" pos=\"0 0 0\" type=\"free\"/>\n      <body name=\"front_left_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"aux_1_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_1\" pos=\"0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_1\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"left_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle_1\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 0.4 0.0\" name=\"left_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"front_right_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"aux_2_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_2\" pos=\"-0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_2\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"-0.2 0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle_2\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"back_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"aux_3_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_3\" pos=\"-0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_3\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"-0.2 -0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle_3\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"right_back_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"aux_4_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_4\" pos=\"0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_4\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"rightback_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 -0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle_4\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 -0.4 0.0\" name=\"fourth_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n    <body name='b1' pos=\"0 5 1\" euler='0 0 30'>\n      <freejoint name=\"b1_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b2' pos=\"0 -5 1\" euler='0 0 30'>\n      <freejoint name=\"b2_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b3' pos=\"40 5 1\" euler='0 0 -30'>\n      <freejoint name=\"b3_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b4' pos=\"40 -5 1\" euler='0 0 -30'>\n      <freejoint name=\"b4_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b5' pos=\"80 5 1\" euler='0 0 30'>\n      <freejoint name=\"b5_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b6' pos=\"80 -5 1\" euler='0 0 30'>\n      <freejoint name=\"b6_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_3\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_3\" gear=\"150\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/beifen_hopper.xml",
    "content": "<mujoco model=\"hopper\">\n  <compiler angle=\"degree\" coordinate=\"global\" inertiafromgeom=\"true\"/>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" margin=\"0.001\" material=\"geom\" rgba=\"0.8 0.6 .4 1\" solimp=\".8 .8 .01\" solref=\".02 1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-.4 .4\"/>\n  </default>\n  <option integrator=\"RK4\" timestep=\"0.002\"/>\n  <visual>\n    <map znear=\"0.02\"/>\n  </visual>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"floor\" pos=\"40 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"100 25 .125\" type=\"plane\" material=\"MatPlane\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1N\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"-50  -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2N\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"-50   4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50 -4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50  4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n\n    <body name=\"mocap1\" pos=\"5 0 .5\" mocap=\"true\">\n        <geom conaffinity=\"0\" condim=\"3\" name=\"mocap_geom\" pos='5 0 .5' density=\"0.0001\" type=\"box\" size=\".3 1.3 1.3\"  rgba=\"1 0.5 0.5 0\"/>\n    </body>\n    <body name=\"obj1\" pos=\"5 0 .5\">\n        <freejoint name=\"obj1_fj\"/>\n        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .5' density=\"0.0001\" type=\"box\" size=\".3 1.3 1.3\"  rgba=\"1 0.5 0.5 1\"/>\n    </body>\n\n<!--    <body name=\"obj2\" pos=\"-39 0 .7\">-->\n<!--        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>-->\n<!--          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"2 0 .7\" range=\"-30 30\" stiffness=\".0\" type=\"slide\"/>-->\n<!--    </body>-->\n    <body name=\"torso\" pos=\"120 1 1.25\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 1\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz\" pos=\"0 0 0\" ref=\"1.25\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty\" pos=\"0 0 1.25\" stiffness=\"0\" type=\"hinge\"/>\n      <geom friction=\"0.9\" fromto=\"0 0 1.45 0 0 1.05\" name=\"torso_geom\" size=\"0.05\" type=\"capsule\"/>\n      <body name=\"thigh\" pos=\"0 0 1.05\">\n        <joint axis=\"0 -1 0\" name=\"thigh_joint\" pos=\"0 0 1.05\" range=\"-150 0\" type=\"hinge\"/>\n        <geom friction=\"0.9\" fromto=\"0 0 1.05 0 0 0.6\" name=\"thigh_geom\" size=\"0.05\" type=\"capsule\"/>\n        <body name=\"leg\" pos=\"0 0 0.35\">\n          <joint axis=\"0 -1 0\" name=\"leg_joint\" pos=\"0 0 0.6\" range=\"-150 0\" type=\"hinge\"/>\n          <geom friction=\"0.9\" fromto=\"0 0 0.6 0 0 0.1\" name=\"leg_geom\" size=\"0.04\" type=\"capsule\"/>\n          <body name=\"foot\" pos=\"0.13/2 0 0.1\">\n            <joint axis=\"0 -1 0\" name=\"foot_joint\" pos=\"0 0 0.1\" range=\"-45 45\" type=\"hinge\"/>\n            <geom friction=\"2.0\" fromto=\"-0.13 0 0.1 0.26 0 0.1\" name=\"foot_geom\" size=\"0.06\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <equality>\n    <weld name=\"weld1\" body1=\"mocap1\" body2=\"obj1\" solref=\".02 .5\"/>\n  </equality>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"thigh_joint\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"leg_joint\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"foot_joint\"/>\n<!--    <motor gear=\"120\" joint=\"wall_joint\" name=\"wall_joint_ac\"/>-->\n  </actuator>\n    <asset>\n        <texture type=\"skybox\" builtin=\"gradient\" rgb1=\".4 .5 .6\" rgb2=\"0 0 0\"\n            width=\"100\" height=\"100\"/>\n        <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n        <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n        <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n        <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n    </asset>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/coupled_half_cheetah.xml",
    "content": "<!-- Cheetah Model\n    The state space is populated with joints in the order that they are\n    defined in this file. The actuators also operate on joints.\n    State-Space (name/joint/parameter):\n        - rootx     slider      position (m)\n        - rootz     slider      position (m)\n        - rooty     hinge       angle (rad)\n        - bthigh    hinge       angle (rad)\n        - bshin     hinge       angle (rad)\n        - bfoot     hinge       angle (rad)\n        - fthigh    hinge       angle (rad)\n        - fshin     hinge       angle (rad)\n        - ffoot     hinge       angle (rad)\n        - rootx     slider      velocity (m/s)\n        - rootz     slider      velocity (m/s)\n        - rooty     hinge       angular velocity (rad/s)\n        - bthigh    hinge       angular velocity (rad/s)\n        - bshin     hinge       angular velocity (rad/s)\n        - bfoot     hinge       angular velocity (rad/s)\n        - fthigh    hinge       angular velocity (rad/s)\n        - fshin     hinge       angular velocity (rad/s)\n        - ffoot     hinge       angular velocity (rad/s)\n    Actuators (name/actuator/parameter):\n        - bthigh    hinge       torque (N m)\n        - bshin     hinge       torque (N m)\n        - bfoot     hinge       torque (N m)\n        - fthigh    hinge       torque (N m)\n        - fshin     hinge       torque (N m)\n        - ffoot     hinge       torque (N m)\n-->\n<mujoco model=\"cheetah\">\n  <compiler angle=\"radian\" coordinate=\"local\" inertiafromgeom=\"true\" settotalmass=\"14\"/>\n  <default>\n    <joint armature=\".1\" damping=\".01\" limited=\"true\" solimplimit=\"0 .8 .03\" solreflimit=\".02 1\" stiffness=\"8\"/>\n    <geom conaffinity=\"0\" condim=\"3\" contype=\"1\" friction=\".4 .1 .1\" rgba=\"0.8 0.6 .4 1\" solimp=\"0.0 0.8 0.01\" solref=\"0.02 1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\"/>\n  </default>\n  <size nstack=\"300000\" nuser_geom=\"1\"/>\n  <option gravity=\"0 0 -9.81\" timestep=\"0.01\"/>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"65 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"150 40 40\" type=\"plane\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -7.3 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   7.3 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50 -4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50  4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall7\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"150 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall8\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"150  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall9\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"-50  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall10\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"-50  -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n\n    <body name=\"obj1\" pos=\"-39 0 .7\">\n        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>\n          <!--<joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"5 0 .7\" range=\"-10000 10000\" stiffness=\".0\" type=\"slide\"/>-->\n          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"2 0 .7\" range=\"-30 30\" stiffness=\".0\" type=\"slide\"/>\n          <!--<joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint1\" pos=\"2 0 .7\" range=\"10 20\" stiffness=\".0\" type=\"slide\"/>-->\n    </body>\n\n<!--    <body name=\"obj2\" pos=\"5 0 .7\">-->\n<!--        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom1\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>-->\n<!--          &lt;!&ndash;<joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"5 0 .7\" range=\"-10000 10000\" stiffness=\".0\" type=\"slide\"/>&ndash;&gt;-->\n<!--          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint1\" pos=\"2 0 .7\" range=\"-10000 10000\" stiffness=\".0\" type=\"slide\"/>-->\n<!--          &lt;!&ndash;<joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint1\" pos=\"2 0 .7\" range=\"10 20\" stiffness=\".0\" type=\"slide\"/>&ndash;&gt;-->\n<!--    </body>-->\nwallpos1\n    <!--<geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 0\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 40\" type=\"plane\"/>-->\n    <body name=\"torso\" pos=\"0 -1 .7\">\n      <site name=\"t1\" pos=\"0.0 0 0\" size=\"0.1\"/>\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty\" pos=\"0 0 0\" stiffness=\"0\" type=\"hinge\"/>\n      <geom fromto=\"-.5 0 0 .5 0 0\" name=\"torso\" size=\"0.046\" type=\"capsule\"/>\n      <geom axisangle=\"0 1 0 .87\" name=\"head\" pos=\".6 0 .1\" size=\"0.046 .15\" type=\"capsule\"/>\n      <!-- <site name='tip'  pos='.15 0 .11'/>-->\n      <body name=\"bthigh\" pos=\"-.5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"6\" name=\"bthigh\" pos=\"0 0 0\" range=\"-.52 1.05\" stiffness=\"240\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 -3.8\" name=\"bthigh\" pos=\".1 0 -.13\" size=\"0.046 .145\" type=\"capsule\"/>\n        <body name=\"bshin\" pos=\".16 0 -.25\">\n          <joint axis=\"0 1 0\" damping=\"4.5\" name=\"bshin\" pos=\"0 0 0\" range=\"-.785 .785\" stiffness=\"180\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -2.03\" name=\"bshin\" pos=\"-.14 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .15\" type=\"capsule\"/>\n          <body name=\"bfoot\" pos=\"-.28 0 -.14\">\n            <joint axis=\"0 1 0\" damping=\"3\" name=\"bfoot\" pos=\"0 0 0\" range=\"-.4 .785\" stiffness=\"120\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.27\" name=\"bfoot\" pos=\".03 0 -.097\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .094\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"fthigh\" pos=\".5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"4.5\" name=\"fthigh\" pos=\"0 0 0\" range=\"-1 .7\" stiffness=\"180\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 .52\" name=\"fthigh\" pos=\"-.07 0 -.12\" size=\"0.046 .133\" type=\"capsule\"/>\n        <body name=\"fshin\" pos=\"-.14 0 -.24\">\n          <joint axis=\"0 1 0\" damping=\"3\" name=\"fshin\" pos=\"0 0 0\" range=\"-1.2 .87\" stiffness=\"120\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -.6\" name=\"fshin\" pos=\".065 0 -.09\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .106\" type=\"capsule\"/>\n          <body name=\"ffoot\" pos=\".13 0 -.18\">\n            <joint axis=\"0 1 0\" damping=\"1.5\" name=\"ffoot\" pos=\"0 0 0\" range=\"-.5 .5\" stiffness=\"60\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.6\" name=\"ffoot\" pos=\".045 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .07\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n    <!-- second cheetah definition -->\n    <body name=\"torso2\" pos=\"0 1 .7\">\n      <site name=\"t2\" pos=\"0 0 0\" size=\"0.1\"/>\n      <camera name=\"track2\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx2\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz2\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty2\" pos=\"0 0 0\" stiffness=\"0\" type=\"hinge\"/>\n      <geom fromto=\"-.5 0 0 .5 0 0\" name=\"torso2\" size=\"0.046\" type=\"capsule\"/>\n      <geom axisangle=\"0 1 0 .87\" name=\"head2\" pos=\".6 0 .1\" size=\"0.046 .15\" type=\"capsule\"/>\n      <!-- <site name='tip'  pos='.15 0 .11'/>-->\n      <body name=\"bthigh2\" pos=\"-.5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"6\" name=\"bthigh2\" pos=\"0 0 0\" range=\"-.52 1.05\" stiffness=\"240\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 -3.8\" name=\"bthigh2\" pos=\".1 0 -.13\" size=\"0.046 .145\" type=\"capsule\"/>\n        <body name=\"bshin2\" pos=\".16 0 -.25\">\n          <joint axis=\"0 1 0\" damping=\"4.5\" name=\"bshin2\" pos=\"0 0 0\" range=\"-.785 .785\" stiffness=\"180\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -2.03\" name=\"bshin2\" pos=\"-.14 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .15\" type=\"capsule\"/>\n          <body name=\"bfoot2\" pos=\"-.28 0 -.14\">\n            <joint axis=\"0 1 0\" damping=\"3\" name=\"bfoot2\" pos=\"0 0 0\" range=\"-.4 .785\" stiffness=\"120\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.27\" name=\"bfoot2\" pos=\".03 0 -.097\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .094\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"fthigh2\" pos=\".5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"4.5\" name=\"fthigh2\" pos=\"0 0 0\" range=\"-1 .7\" stiffness=\"180\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 .52\" name=\"fthigh2\" pos=\"-.07 0 -.12\" size=\"0.046 .133\" type=\"capsule\"/>\n        <body name=\"fshin2\" pos=\"-.14 0 -.24\">\n          <joint axis=\"0 1 0\" damping=\"3\" name=\"fshin2\" pos=\"0 0 0\" range=\"-1.2 .87\" stiffness=\"120\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -.6\" name=\"fshin2\" pos=\".065 0 -.09\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .106\" type=\"capsule\"/>\n          <body name=\"ffoot2\" pos=\".13 0 -.18\">\n            <joint axis=\"0 1 0\" damping=\"1.5\" name=\"ffoot2\" pos=\"0 0 0\" range=\"-.5 .5\" stiffness=\"60\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.6\" name=\"ffoot2\" pos=\".045 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .07\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n\n  </worldbody>\n  <tendon>\n    <spatial name=\"tendon1\" width=\"0.05\" rgba=\".95 .3 .3 1\" limited=\"true\" range=\"1.5 3.5\" stiffness=\"0.1\">\n        <site site=\"t1\"/>\n        <site site=\"t2\"/>\n    </spatial>\n  </tendon>-\n  <actuator>\n    <motor gear=\"120\" joint=\"bthigh\" name=\"bthigh\"/>\n    <motor gear=\"90\" joint=\"bshin\" name=\"bshin\"/>\n    <motor gear=\"60\" joint=\"bfoot\" name=\"bfoot\"/>\n    <motor gear=\"120\" joint=\"fthigh\" name=\"fthigh\"/>\n    <motor gear=\"60\" joint=\"fshin\" name=\"fshin\"/>\n    <motor gear=\"30\" joint=\"ffoot\" name=\"ffoot\"/>\n    <motor gear=\"120\" joint=\"bthigh2\" name=\"bthigh2\"/>\n    <motor gear=\"90\" joint=\"bshin2\" name=\"bshin2\"/>\n    <motor gear=\"60\" joint=\"bfoot2\" name=\"bfoot2\"/>\n    <motor gear=\"120\" joint=\"fthigh2\" name=\"fthigh2\"/>\n    <motor gear=\"60\" joint=\"fshin2\" name=\"fshin2\"/>\n    <motor gear=\"30\" joint=\"ffoot2\" name=\"ffoot2\"/>\n    <motor gear=\"120\" joint=\"wall_joint\" name=\"wall_joint_ac\"/>\n    <!--<motor gear=\"120\" joint=\"wall_joint1\" name=\"wall_joint_ac1\"/>-->\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/half_cheetah.xml",
    "content": "<!-- Cheetah Model\n    The state space is populated with joints in the order that they are\n    defined in this file. The actuators also operate on joints.\n    State-Space (name/joint/parameter):\n        - rootx     slider      position (m)\n        - rootz     slider      position (m)\n        - rooty     hinge       angle (rad)\n        - bthigh    hinge       angle (rad)\n        - bshin     hinge       angle (rad)\n        - bfoot     hinge       angle (rad)\n        - fthigh    hinge       angle (rad)\n        - fshin     hinge       angle (rad)\n        - ffoot     hinge       angle (rad)\n        - rootx     slider      velocity (m/s)\n        - rootz     slider      velocity (m/s)\n        - rooty     hinge       angular velocity (rad/s)\n        - bthigh    hinge       angular velocity (rad/s)\n        - bshin     hinge       angular velocity (rad/s)\n        - bfoot     hinge       angular velocity (rad/s)\n        - fthigh    hinge       angular velocity (rad/s)\n        - fshin     hinge       angular velocity (rad/s)\n        - ffoot     hinge       angular velocity (rad/s)\n    Actuators (name/actuator/parameter):\n        - bthigh    hinge       torque (N m)\n        - bshin     hinge       torque (N m)\n        - bfoot     hinge       torque (N m)\n        - fthigh    hinge       torque (N m)\n        - fshin     hinge       torque (N m)\n        - ffoot     hinge       torque (N m)\n-->\n<mujoco model=\"cheetah\">\n  <compiler angle=\"radian\" coordinate=\"local\" inertiafromgeom=\"true\" settotalmass=\"14\"/>\n  <default>\n    <joint armature=\".1\" damping=\".01\" limited=\"true\" solimplimit=\"0 .8 .03\" solreflimit=\".02 1\" stiffness=\"8\"/>\n    <geom conaffinity=\"0\" condim=\"3\" contype=\"1\" friction=\".4 .1 .1\" rgba=\"0.8 0.6 .4 1\" solimp=\"0.0 0.8 0.01\" solref=\"0.02 1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\"/>\n  </default>\n  <size nstack=\"300000\" nuser_geom=\"1\"/>\n  <option gravity=\"0 0 -9.81\" timestep=\"0.01\"/>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"65 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"150 40 40\" type=\"plane\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50 -4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50  4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall7\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"150 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall8\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"150  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n\n<!--    <body name=\"obj1\" pos=\"5 0 .7\">-->\n<!--        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>-->\n<!--          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"5 0 .7\" range=\"-10000 10000\" stiffness=\".0\" type=\"slide\"/>-->\n<!--    </body>-->\n<!--    <body name=\"obj1\" pos=\"-39 0 .7\">-->\n<!--        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>-->\n<!--          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"2 0 .7\" range=\"-30 30\" stiffness=\".0\" type=\"slide\"/>-->\n<!--    </body>-->\n    <body name=\"obj1\" pos=\"5 0 .7\">\n        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .7' density=\"0.0001\" type=\"box\" size=\".1 2.3 1.3\"  rgba=\"1 0.5 0.5 .8\"/>\n          <joint axis=\"1 0 0\" damping=\".2\" name=\"wall_joint\" pos=\"5 0 .7\" range=\"-5000 5000\" stiffness=\".0\" type=\"slide\"/>\n    </body>\n\n    <body name=\"torso\" pos=\"0 0 .7\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty\" pos=\"0 0 0\" stiffness=\"0\" type=\"hinge\"/>\n      <geom fromto=\"-.5 0 0 .5 0 0\" name=\"torso\" size=\"0.046\" type=\"capsule\"/>\n      <geom axisangle=\"0 1 0 .87\" name=\"head\" pos=\".6 0 .1\" size=\"0.046 .15\" type=\"capsule\"/>\n      <!-- <site name='tip'  pos='.15 0 .11'/>-->\n      <body name=\"bthigh\" pos=\"-.5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"6\" name=\"bthigh\" pos=\"0 0 0\" range=\"-.52 1.05\" stiffness=\"240\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 -3.8\" name=\"bthigh\" pos=\".1 0 -.13\" size=\"0.046 .145\" type=\"capsule\"/>\n        <body name=\"bshin\" pos=\".16 0 -.25\">\n          <joint axis=\"0 1 0\" damping=\"4.5\" name=\"bshin\" pos=\"0 0 0\" range=\"-.785 .785\" stiffness=\"180\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -2.03\" name=\"bshin\" pos=\"-.14 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .15\" type=\"capsule\"/>\n          <body name=\"bfoot\" pos=\"-.28 0 -.14\">\n            <joint axis=\"0 1 0\" damping=\"3\" name=\"bfoot\" pos=\"0 0 0\" range=\"-.4 .785\" stiffness=\"120\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.27\" name=\"bfoot\" pos=\".03 0 -.097\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .094\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"fthigh\" pos=\".5 0 0\">\n        <joint axis=\"0 1 0\" damping=\"4.5\" name=\"fthigh\" pos=\"0 0 0\" range=\"-1 .7\" stiffness=\"180\" type=\"hinge\"/>\n        <geom axisangle=\"0 1 0 .52\" name=\"fthigh\" pos=\"-.07 0 -.12\" size=\"0.046 .133\" type=\"capsule\"/>\n        <body name=\"fshin\" pos=\"-.14 0 -.24\">\n          <joint axis=\"0 1 0\" damping=\"3\" name=\"fshin\" pos=\"0 0 0\" range=\"-1.2 .87\" stiffness=\"120\" type=\"hinge\"/>\n          <geom axisangle=\"0 1 0 -.6\" name=\"fshin\" pos=\".065 0 -.09\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .106\" type=\"capsule\"/>\n          <body name=\"ffoot\" pos=\".13 0 -.18\">\n            <joint axis=\"0 1 0\" damping=\"1.5\" name=\"ffoot\" pos=\"0 0 0\" range=\"-.5 .5\" stiffness=\"60\" type=\"hinge\"/>\n            <geom axisangle=\"0 1 0 -.6\" name=\"ffoot\" pos=\".045 0 -.07\" rgba=\"0.9 0.6 0.6 1\" size=\"0.046 .07\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <!-- <equality>\n    <weld name=\"weld1\" body1=\"mocap1\" body2=\"obj1\" solref=\".02 2.5\"/>\n  </equality> -->\n  <actuator>\n    <motor gear=\"120\" joint=\"bthigh\" name=\"bthigh\"/>\n    <motor gear=\"90\" joint=\"bshin\" name=\"bshin\"/>\n    <motor gear=\"60\" joint=\"bfoot\" name=\"bfoot\"/>\n    <motor gear=\"120\" joint=\"fthigh\" name=\"fthigh\"/>\n    <motor gear=\"60\" joint=\"fshin\" name=\"fshin\"/>\n    <motor gear=\"30\" joint=\"ffoot\" name=\"ffoot\"/>\n    <motor gear=\"120\" joint=\"wall_joint\" name=\"wall_joint_ac\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/hopper.xml",
    "content": "<mujoco model=\"hopper\">\n  <compiler angle=\"degree\" coordinate=\"global\" inertiafromgeom=\"true\"/>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" margin=\"0.001\" material=\"geom\" rgba=\"0.8 0.6 .4 1\" solimp=\".8 .8 .01\" solref=\".02 1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-.4 .4\"/>\n  </default>\n  <option integrator=\"RK4\" timestep=\"0.002\"/>\n  <visual>\n    <map znear=\"0.02\"/>\n  </visual>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"floor\" pos=\"40 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"100 25 .125\" type=\"plane\" material=\"MatPlane\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50 -4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"50  4 1.0\"  euler='0 0 -0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100 -4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"100  4 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <body name=\"mocap1\" pos=\"5 0 .5\" mocap=\"true\">\n        <geom conaffinity=\"0\" condim=\"3\" name=\"mocap_geom\" pos='5 0 .5' type=\"box\" size=\".3 0.3 0.3\"  rgba=\"1 0.5 0.5 0\"/>\n    </body>\n    <body name=\"obj1\" pos=\"5 0 .5\">\n        <freejoint name=\"obj1_fj\"/>\n        <geom conaffinity=\"1\" condim=\"3\" name=\"obj_geom\" pos='5 0 .5' type=\"box\" size=\".3 0.3 0.3\"  rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name=\"torso\" pos=\"0 1 1.25\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 1\" xyaxes=\"1 0 0 0 0 1\"/>\n      <joint armature=\"0\" axis=\"1 0 0\" damping=\"0\" limited=\"false\" name=\"rootx\" pos=\"0 0 0\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 0 1\" damping=\"0\" limited=\"false\" name=\"rootz\" pos=\"0 0 0\" ref=\"1.25\" stiffness=\"0\" type=\"slide\"/>\n      <joint armature=\"0\" axis=\"0 1 0\" damping=\"0\" limited=\"false\" name=\"rooty\" pos=\"0 0 1.25\" stiffness=\"0\" type=\"hinge\"/>\n      <geom friction=\"0.9\" fromto=\"0 0 1.45 0 0 1.05\" name=\"torso_geom\" size=\"0.05\" type=\"capsule\"/>\n      <body name=\"thigh\" pos=\"0 0 1.05\">\n        <joint axis=\"0 -1 0\" name=\"thigh_joint\" pos=\"0 0 1.05\" range=\"-150 0\" type=\"hinge\"/>\n        <geom friction=\"0.9\" fromto=\"0 0 1.05 0 0 0.6\" name=\"thigh_geom\" size=\"0.05\" type=\"capsule\"/>\n        <body name=\"leg\" pos=\"0 0 0.35\">\n          <joint axis=\"0 -1 0\" name=\"leg_joint\" pos=\"0 0 0.6\" range=\"-150 0\" type=\"hinge\"/>\n          <geom friction=\"0.9\" fromto=\"0 0 0.6 0 0 0.1\" name=\"leg_geom\" size=\"0.04\" type=\"capsule\"/>\n          <body name=\"foot\" pos=\"0.13/2 0 0.1\">\n            <joint axis=\"0 -1 0\" name=\"foot_joint\" pos=\"0 0 0.1\" range=\"-45 45\" type=\"hinge\"/>\n            <geom friction=\"2.0\" fromto=\"-0.13 0 0.1 0.26 0 0.1\" name=\"foot_geom\" size=\"0.06\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <equality>\n    <weld name=\"weld1\" body1=\"mocap1\" body2=\"obj1\" solref=\".02 .5\"/>\n  </equality>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"thigh_joint\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"leg_joint\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" gear=\"200.0\" joint=\"foot_joint\"/>\n  </actuator>\n    <asset>\n        <texture type=\"skybox\" builtin=\"gradient\" rgb1=\".4 .5 .6\" rgb2=\"0 0 0\"\n            width=\"100\" height=\"100\"/>\n        <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n        <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n        <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n        <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n    </asset>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/humanoid.xml",
    "content": "<mujoco model=\"humanoid\">\n    <compiler angle=\"degree\" inertiafromgeom=\"true\"/>\n    <default>\n        <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n        <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" margin=\"0.001\" material=\"geom\" rgba=\"0.8 0.6 .4 1\"/>\n        <motor ctrllimited=\"true\" ctrlrange=\"-.4 .4\"/>\n    </default>\n    <option integrator=\"RK4\" iterations=\"50\" solver=\"PGS\" timestep=\"0.003\">\n        <!-- <flags solverstat=\"enable\" energy=\"enable\"/>-->\n    </option>\n    <size nkey=\"5\" nuser_geom=\"1\"/>\n    <visual>\n        <map fogend=\"5\" fogstart=\"3\"/>\n    </visual>\n    <asset>\n        <texture builtin=\"gradient\" height=\"100\" rgb1=\".4 .5 .6\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n        <!-- <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>-->\n        <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n        <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n\n        <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"#2c5987\" rgb2=\"#1f4060\" type=\"2d\" width=\"100\"/>\n        <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n\n    </asset>\n    <worldbody>\n        <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n        <geom condim=\"3\" friction=\"1 .1 .1\" material=\"MatPlane\" name=\"floor\" pos=\"30 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"70 25 0.125\" type=\"plane\"/>\n\n        <!-- <geom condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 0\" size=\"10 10 0.125\" type=\"plane\"/>-->\n        <body name=\"torso\" pos=\"0 0 1.4\">\n            <camera name=\"track\" mode=\"trackcom\" pos=\"0 -4 0\" xyaxes=\"1 0 0 0 0 1\"/>\n            <joint armature=\"0\" damping=\"0\" limited=\"false\" name=\"root\" pos=\"0 0 0\" stiffness=\"0\" type=\"free\"/>\n            <geom fromto=\"0 -.07 0 0 .07 0\" name=\"torso1\" size=\"0.07\" type=\"capsule\"/>\n            <geom name=\"head\" pos=\"0 0 .19\" size=\".09\" type=\"sphere\" user=\"258\"/>\n            <geom fromto=\"-.01 -.06 -.12 -.01 .06 -.12\" name=\"uwaist\" size=\"0.06\" type=\"capsule\"/>\n            <body name=\"lwaist\" pos=\"-.01 0 -0.260\" quat=\"1.000 0 -0.002 0\">\n                <geom fromto=\"0 -.06 0 0 .06 0\" name=\"lwaist\" size=\"0.06\" type=\"capsule\"/>\n                <joint armature=\"0.02\" axis=\"0 0 1\" damping=\"5\" name=\"abdomen_z\" pos=\"0 0 0.065\" range=\"-45 45\" stiffness=\"20\" type=\"hinge\"/>\n                <joint armature=\"0.02\" axis=\"0 1 0\" damping=\"5\" name=\"abdomen_y\" pos=\"0 0 0.065\" range=\"-75 30\" stiffness=\"10\" type=\"hinge\"/>\n                <body name=\"pelvis\" pos=\"0 0 -0.165\" quat=\"1.000 0 -0.002 0\">\n                    <joint armature=\"0.02\" axis=\"1 0 0\" damping=\"5\" name=\"abdomen_x\" pos=\"0 0 0.1\" range=\"-35 35\" stiffness=\"10\" type=\"hinge\"/>\n                    <geom fromto=\"-.02 -.07 0 -.02 .07 0\" name=\"butt\" size=\"0.09\" type=\"capsule\"/>\n                    <body name=\"right_thigh\" pos=\"0 -0.1 -0.04\">\n                        <joint armature=\"0.01\" axis=\"1 0 0\" damping=\"5\" name=\"right_hip_x\" pos=\"0 0 0\" range=\"-25 5\" stiffness=\"10\" type=\"hinge\"/>\n                        <joint armature=\"0.01\" axis=\"0 0 1\" damping=\"5\" name=\"right_hip_z\" pos=\"0 0 0\" range=\"-60 35\" stiffness=\"10\" type=\"hinge\"/>\n                        <joint armature=\"0.0080\" axis=\"0 1 0\" damping=\"5\" name=\"right_hip_y\" pos=\"0 0 0\" range=\"-110 20\" stiffness=\"20\" type=\"hinge\"/>\n                        <geom fromto=\"0 0 0 0 0.01 -.34\" name=\"right_thigh1\" size=\"0.06\" type=\"capsule\"/>\n                        <body name=\"right_shin\" pos=\"0 0.01 -0.403\">\n                            <joint armature=\"0.0060\" axis=\"0 -1 0\" name=\"right_knee\" pos=\"0 0 .02\" range=\"-160 -2\" type=\"hinge\"/>\n                            <geom fromto=\"0 0 0 0 0 -.3\" name=\"right_shin1\" size=\"0.049\" type=\"capsule\"/>\n                            <body name=\"right_foot\" pos=\"0 0 -0.45\">\n                                <geom name=\"right_foot\" pos=\"0 0 0.1\" size=\"0.075\" type=\"sphere\" user=\"0\"/>\n                            </body>\n                        </body>\n                    </body>\n                    <body name=\"left_thigh\" pos=\"0 0.1 -0.04\">\n                        <joint armature=\"0.01\" axis=\"-1 0 0\" damping=\"5\" name=\"left_hip_x\" pos=\"0 0 0\" range=\"-25 5\" stiffness=\"10\" type=\"hinge\"/>\n                        <joint armature=\"0.01\" axis=\"0 0 -1\" damping=\"5\" name=\"left_hip_z\" pos=\"0 0 0\" range=\"-60 35\" stiffness=\"10\" type=\"hinge\"/>\n                        <joint armature=\"0.01\" axis=\"0 1 0\" damping=\"5\" name=\"left_hip_y\" pos=\"0 0 0\" range=\"-110 20\" stiffness=\"20\" type=\"hinge\"/>\n                        <geom fromto=\"0 0 0 0 -0.01 -.34\" name=\"left_thigh1\" size=\"0.06\" type=\"capsule\"/>\n                        <body name=\"left_shin\" pos=\"0 -0.01 -0.403\">\n                            <joint armature=\"0.0060\" axis=\"0 -1 0\" name=\"left_knee\" pos=\"0 0 .02\" range=\"-160 -2\" stiffness=\"1\" type=\"hinge\"/>\n                            <geom fromto=\"0 0 0 0 0 -.3\" name=\"left_shin1\" size=\"0.049\" type=\"capsule\"/>\n                            <body name=\"left_foot\" pos=\"0 0 -0.45\">\n                                <geom name=\"left_foot\" type=\"sphere\" size=\"0.075\" pos=\"0 0 0.1\" user=\"0\" />\n                            </body>\n                        </body>\n                    </body>\n                </body>\n            </body>\n            <body name=\"right_upper_arm\" pos=\"0 -0.17 0.06\">\n                <joint armature=\"0.0068\" axis=\"2 1 1\" name=\"right_shoulder1\" pos=\"0 0 0\" range=\"-85 60\" stiffness=\"1\" type=\"hinge\"/>\n                <joint armature=\"0.0051\" axis=\"0 -1 1\" name=\"right_shoulder2\" pos=\"0 0 0\" range=\"-85 60\" stiffness=\"1\" type=\"hinge\"/>\n                <geom fromto=\"0 0 0 .16 -.16 -.16\" name=\"right_uarm1\" size=\"0.04 0.16\" type=\"capsule\"/>\n                <body name=\"right_lower_arm\" pos=\".18 -.18 -.18\">\n                    <joint armature=\"0.0028\" axis=\"0 -1 1\" name=\"right_elbow\" pos=\"0 0 0\" range=\"-90 50\" stiffness=\"0\" type=\"hinge\"/>\n                    <geom fromto=\"0.01 0.01 0.01 .17 .17 .17\" name=\"right_larm\" size=\"0.031\" type=\"capsule\"/>\n                    <geom name=\"right_hand\" pos=\".18 .18 .18\" size=\"0.04\" type=\"sphere\"/>\n                    <camera pos=\"0 0 0\"/>\n                </body>\n            </body>\n            <body name=\"left_upper_arm\" pos=\"0 0.17 0.06\">\n                <joint armature=\"0.0068\" axis=\"2 -1 1\" name=\"left_shoulder1\" pos=\"0 0 0\" range=\"-60 85\" stiffness=\"1\" type=\"hinge\"/>\n                <joint armature=\"0.0051\" axis=\"0 1 1\" name=\"left_shoulder2\" pos=\"0 0 0\" range=\"-60 85\" stiffness=\"1\" type=\"hinge\"/>\n                <geom fromto=\"0 0 0 .16 .16 -.16\" name=\"left_uarm1\" size=\"0.04 0.16\" type=\"capsule\"/>\n                <body name=\"left_lower_arm\" pos=\".18 .18 -.18\">\n                    <joint armature=\"0.0028\" axis=\"0 -1 -1\" name=\"left_elbow\" pos=\"0 0 0\" range=\"-90 50\" stiffness=\"0\" type=\"hinge\"/>\n                    <geom fromto=\"0.01 -0.01 0.01 .17 -.17 .17\" name=\"left_larm\" size=\"0.031\" type=\"capsule\"/>\n                    <geom name=\"left_hand\" pos=\".18 -.18 .18\" size=\"0.04\" type=\"sphere\"/>\n                </body>\n            </body>\n        </body>\n        <body name='b1' pos=\"0 2.3 1\" euler='0 0 30'>\n            <freejoint name=\"b1_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b2' pos=\"0 -2.3 1\" euler='0 0 30'>\n            <freejoint name=\"b2_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b3' pos=\"40 2.3 1\" euler='0 0 -30'>\n            <freejoint name=\"b3_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b4' pos=\"40 -2.3 1\" euler='0 0 -30'>\n            <freejoint name=\"b4_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b5' pos=\"80 2.3 1\" euler='0 0 30'>\n            <freejoint name=\"b5_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n        <body name='b6' pos=\"80 -2.3 1\" euler='0 0 30'>\n            <freejoint name=\"b6_fj\"/>\n            <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n        </body>\n    </worldbody>\n    <tendon>\n        <fixed name=\"left_hipknee\">\n            <joint coef=\"-1\" joint=\"left_hip_y\"/>\n            <joint coef=\"1\" joint=\"left_knee\"/>\n        </fixed>\n        <fixed name=\"right_hipknee\">\n            <joint coef=\"-1\" joint=\"right_hip_y\"/>\n            <joint coef=\"1\" joint=\"right_knee\"/>\n        </fixed>\n    </tendon>\n\n    <actuator>\n        <motor gear=\"100\" joint=\"abdomen_y\" name=\"abdomen_y\"/>\n        <motor gear=\"100\" joint=\"abdomen_z\" name=\"abdomen_z\"/>\n        <motor gear=\"100\" joint=\"abdomen_x\" name=\"abdomen_x\"/>\n        <motor gear=\"100\" joint=\"right_hip_x\" name=\"right_hip_x\"/>\n        <motor gear=\"100\" joint=\"right_hip_z\" name=\"right_hip_z\"/>\n        <motor gear=\"300\" joint=\"right_hip_y\" name=\"right_hip_y\"/>\n        <motor gear=\"200\" joint=\"right_knee\" name=\"right_knee\"/>\n        <motor gear=\"100\" joint=\"left_hip_x\" name=\"left_hip_x\"/>\n        <motor gear=\"100\" joint=\"left_hip_z\" name=\"left_hip_z\"/>\n        <motor gear=\"300\" joint=\"left_hip_y\" name=\"left_hip_y\"/>\n        <motor gear=\"200\" joint=\"left_knee\" name=\"left_knee\"/>\n        <motor gear=\"25\" joint=\"right_shoulder1\" name=\"right_shoulder1\"/>\n        <motor gear=\"25\" joint=\"right_shoulder2\" name=\"right_shoulder2\"/>\n        <motor gear=\"25\" joint=\"right_elbow\" name=\"right_elbow\"/>\n        <motor gear=\"25\" joint=\"left_shoulder1\" name=\"left_shoulder1\"/>\n        <motor gear=\"25\" joint=\"left_shoulder2\" name=\"left_shoulder2\"/>\n        <motor gear=\"25\" joint=\"left_elbow\" name=\"left_elbow\"/>\n    </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant.xml",
    "content": "<mujoco model=\"ant\">\n  <size nconmax=\"200\"/>\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option integrator=\"RK4\" timestep=\"0.01\"/>\n  <custom>\n    <numeric data=\"0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0\" name=\"init_qpos\"/>\n  </custom>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"0\" condim=\"3\" density=\"5.0\" friction=\"1 0.5 0.5\" margin=\"0.01\" rgba=\"0.8 0.6 0.4 1\"/>\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n\n    <texture builtin=\"checker\" height=\"100\" name=\"texbox\" rgb1=\"#ff66ff\" rgb2=\"#ff66ff\" type=\"2d\" width=\"100\"/>\n    <material name=\"BoxMat\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texbox\"/>\n\n\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n<!--    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 0\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 40\" type=\"plane\"/>-->\n\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"30 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"70 25 40\" type=\"plane\"/>\n\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" material=\"BoxMat\" size=\"0.1 14 1.0\" pos=\"-14  0 1\"    rgba=\"#ff66ff\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" material=\"BoxMat\" size=\"14 .1 1.0\" pos=\"0  14 1\"    rgba=\"#ff66ff\"/>\n\n    <body name=\"torso\" pos=\"0 0 0.75\">\n<!--      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>-->\n\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -10 -10\" xyaxes=\".8 .4 0 0 .4 .6\"/>\n\n      <geom name=\"torso_geom\" pos=\"0 0 0\" size=\"0.25\" type=\"sphere\"/>\n      <joint armature=\"0\" damping=\"0\" limited=\"false\" margin=\"0.01\" name=\"root\" pos=\"0 0 0\" type=\"free\"/>\n      <body name=\"front_left_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"aux_1_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_1\" pos=\"0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_1\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"left_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle_1\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 0.4 0.0\" name=\"left_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"right_back_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"aux_4_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_4\" pos=\"0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_4\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"rightback_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 -0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle_4\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 -0.4 0.0\" name=\"fourth_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"midx\" pos=\"0.0 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <!--<joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>-->\n        <body name=\"front_right_legx\" pos=\"-1 0 0\">\n          <geom fromto=\"0.0 0.0 0.0 0.0 0.2 0.0\" name=\"aux_2_geomx\" size=\"0.08\" type=\"capsule\"/>\n          <body name=\"aux_2x\" pos=\"0.0 0.2 0\">\n            <joint axis=\"0 0 1\" name=\"hip_2x\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geomx\" size=\"0.08\" type=\"capsule\"/>\n            <body pos=\"-0.2 0.2 0\">\n              <joint axis=\"1 1 0\" name=\"ankle_2x\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geomx\" size=\"0.08\" type=\"capsule\"/>\n            </body>\n          </body>\n        </body>\n        <body name=\"back_legx\" pos=\"-1 0 0\">\n          <geom fromto=\"0.0 0.0 0.0 0.0 -0.2 0.0\" name=\"aux_3_geomx\" size=\"0.08\" type=\"capsule\"/>\n          <body name=\"aux_3x\" pos=\"0.0 -0.2 0\">\n            <joint axis=\"0 0 1\" name=\"hip_3x\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geomx\" size=\"0.08\" type=\"capsule\"/>\n            <body pos=\"-0.2 -0.2 0\">\n              <joint axis=\"-1 1 0\" name=\"ankle_3x\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geomx\" size=\"0.08\" type=\"capsule\"/>\n            </body>\n          </body>\n        </body>\n        <body name=\"mid\" pos=\"-1 0 0\">\n          <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n          <!--<joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>-->\n          <!--<body name=\"front_right_leg\" pos=\"-1 0 0\">\n            <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"aux_2_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body name=\"aux_2\" pos=\"-0.2 0.2 0\">\n              <joint axis=\"0 0 1\" name=\"hip_2\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n              <body pos=\"-0.2 0.2 0\">\n                <joint axis=\"1 1 0\" name=\"ankle_2\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n              </body>\n            </body>\n          </body>\n          <body name=\"back_leg\" pos=\"-1 0 0\">\n            <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"aux_3_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body name=\"aux_3\" pos=\"-0.2 -0.2 0\">\n              <joint axis=\"0 0 1\" name=\"hip_3\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n              <body pos=\"-0.2 -0.2 0\">\n                <joint axis=\"-1 1 0\" name=\"ankle_3\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n              </body>\n            </body>\n          </body>-->\n          <body name=\"front_right_leg\" pos=\"-1 0 0\">\n            <geom fromto=\"0.0 0.0 0.0 0.0 0.2 0.0\" name=\"aux_2_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body name=\"aux_2\" pos=\"0.0 0.2 0\">\n              <joint axis=\"0 0 1\" name=\"hip_2\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n              <body pos=\"-0.2 0.2 0\">\n                <joint axis=\"1 1 0\" name=\"ankle_2\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n              </body>\n            </body>\n          </body>\n          <body name=\"back_leg\" pos=\"-1 0 0\">\n            <geom fromto=\"0.0 0.0 0.0 0.0 -0.2 0.0\" name=\"aux_3_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body name=\"aux_3\" pos=\"0.0 -0.2 0\">\n              <joint axis=\"0 0 1\" name=\"hip_3\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n              <body pos=\"-0.2 -0.2 0\">\n                <joint axis=\"-1 1 0\" name=\"ankle_3\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n              </body>\n            </body>\n          </body>\n        </body>\n      </body>\n    </body>\n        <body name='b1' pos=\"0 5 1\" euler='0 0 30'>\n      <freejoint name=\"b1_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b2' pos=\"0 -5 1\" euler='0 0 30'>\n      <freejoint name=\"b2_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b3' pos=\"40 5 1\" euler='0 0 -30'>\n      <freejoint name=\"b3_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall3\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b4' pos=\"40 -5 1\" euler='0 0 -30'>\n      <freejoint name=\"b4_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall4\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b5' pos=\"80 5 1\" euler='0 0 30'>\n      <freejoint name=\"b5_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall5\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b6' pos=\"80 -5 1\" euler='0 0 30'>\n      <freejoint name=\"b6_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall6\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_3\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_3\" gear=\"150\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant.xml.template",
    "content": "<mujoco model=\"ant\">\n  <size nconmax=\"200\"/>\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option integrator=\"RK4\" timestep=\"0.005\"/>\n  <custom>\n    <numeric data=\"0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0\" name=\"init_qpos\"/>\n  </custom>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"0\" condim=\"3\" density=\"5.0\" friction=\"1 0.5 0.5\" margin=\"0.01\" rgba=\"0.8 0.6 0.4 1\"/>\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"1.0 1.0 1.0\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texbox\" rgb1=\"#ff66ff\" rgb2=\"#ff66ff\" type=\"2d\" width=\"100\"/>\n    <material name=\"BoxMat\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texbox\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"30 0 0\" rgba=\"0.2 0.2 0.2 1\" size=\"70 25 40\" type=\"plane\"/>\n    <body name=\"torso_0\" pos=\"0 0 0.75\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -10 -10\" xyaxes=\".8 .4 0 0 .4 .6\"/>\n      <!--<geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>-->\n      <joint armature=\"0\" damping=\"0\" limited=\"false\" margin=\"0.01\" name=\"root\" pos=\"0 0 0\" type=\"free\"/>\n      <body name=\"front_left_leg_0\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"aux1_geom_0\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux1_0\" pos=\"0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip1_0\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"left_leg_geom_0\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle1_0\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 0.4 0.0\" name=\"left_ankle_geom_0\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"right_back_leg_0\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"aux2_geom_0\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux2_0\" pos=\"0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip2_0\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"rightback_leg_geom_0\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 -0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle2_0\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 -0.4 0.0\" name=\"second_ankle_geom_0\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      {{ body }}\n    </body>\n    <body name='b1' pos=\"0 4.5 1\" euler='0 0 30'>\n      <freejoint name=\"b1_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n    <body name='b2' pos=\"0 -4.5 1\" euler='0 0 30'>\n      <freejoint name=\"b2_fj\"/>\n      <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" density=\".000001\" size=\"20 0.01 .7\"    rgba=\"1 0.5 0.5 1\"/>\n    </body>\n  </worldbody>\n  <actuator>\n    {{ actuators }}\n  </actuator>\n</mujoco>\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_ant__stage1.xml",
    "content": "<mujoco model=\"ant\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option integrator=\"RK4\" timestep=\"0.01\"/>\n  <custom>\n    <numeric data=\"0.0 0.0 0.55 1.0 0.0 0.0 0.0 0.0 1.0 0.0 -1.0 0.0 -1.0 0.0 1.0\" name=\"init_qpos\"/>\n  </custom>\n  <default>\n    <joint armature=\"1\" damping=\"1\" limited=\"true\"/>\n    <geom conaffinity=\"0\" condim=\"3\" density=\"5.0\" friction=\"1 0.5 0.5\" margin=\"0.01\" rgba=\"0.8 0.6 0.4 1\"/>\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"60 60\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 0\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 40\" type=\"plane\"/>\n    <body name=\"torso\" pos=\" 0 0.75\">\n      <camera name=\"track\" mode=\"trackcom\" pos=\"0 -3 0.3\" xyaxes=\"1 0 0 0 0 1\"/>\n      <!--<geom name=\"torso_geom\" pos=\"0 0 0\" size=\"0.25\" type=\"sphere\"/>-->\n      <joint armature=\"0\" damping=\"0\" limited=\"false\" margin=\"0.01\" name=\"root\" pos=\"0 0 0\" type=\"free\"/>\n      <body name=\"front_left_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"aux_1_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_1\" pos=\"0.2 0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_1\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 0.2 0.0\" name=\"left_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 0.2 0\">\n            <joint axis=\"-1 1 0\" name=\"ankle_1\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 0.4 0.0\" name=\"left_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"right_back_leg\" pos=\"0 0 0\">\n        <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"aux_4_geom\" size=\"0.08\" type=\"capsule\"/>\n        <body name=\"aux_4\" pos=\"0.2 -0.2 0\">\n          <joint axis=\"0 0 1\" name=\"hip_4\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n          <geom fromto=\"0.0 0.0 0.0 0.2 -0.2 0.0\" name=\"rightback_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body pos=\"0.2 -0.2 0\">\n            <joint axis=\"1 1 0\" name=\"ankle_4\" pos=\"0.0 0.0 0.0\" range=\"30 70\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 0.4 -0.4 0.0\" name=\"fourth_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n          </body>\n        </body>\n      </body>\n      <body name=\"mid\" pos=\"0.0 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        <body name=\"front_right_leg\" pos=\"-1 0 0\">\n          <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"aux_2_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body name=\"aux_2\" pos=\"-0.2 0.2 0\">\n            <joint axis=\"0 0 1\" name=\"hip_2\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body pos=\"-0.2 0.2 0\">\n              <joint axis=\"1 1 0\" name=\"ankle_2\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n            </body>\n          </body>\n        </body>\n        <body name=\"back_leg\" pos=\"-1 0 0\">\n          <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"aux_3_geom\" size=\"0.08\" type=\"capsule\"/>\n          <body name=\"aux_3\" pos=\"-0.2 -0.2 0\">\n            <joint axis=\"0 0 1\" name=\"hip_3\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n            <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom\" size=\"0.08\" type=\"capsule\"/>\n            <body pos=\"-0.2 -0.2 0\">\n              <joint axis=\"-1 1 0\" name=\"ankle_3\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n              <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom\" size=\"0.08\" type=\"capsule\"/>\n            </body>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_4\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_1\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_2\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip_3\" gear=\"150\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle_3\" gear=\"150\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer.xml.template",
    "content": "<mujoco model=\"swimmer\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option collision=\"predefined\" density=\"4000\" integrator=\"RK4\" timestep=\"0.005\" viscosity=\"0.1\"/>\n  <default>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" material=\"geom\" rgba=\"0.8 0.6 .4 1\"/>\n    <joint armature='0.1'  />\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"30 30\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 -0.1\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 0.1\" type=\"plane\"/>\n    <!--  ================= SWIMMER ================= /-->\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall1\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0  -2.3 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" name=\"wall2\" type=\"box\" size=\"24.8 0.1 1.0\" pos=\"0   2.3 1.0\"  euler='0 0 0'  rgba=\"1 0.5 0.5 1\"/>\n\n    <body name=\"torso\" pos=\"0 0 0\">\n      <geom density=\"1000\" fromto=\"1.5 0 0 0.5 0 0\" size=\"0.1\" type=\"capsule\"/>\n      <joint axis=\"1 0 0\" name=\"slider1\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 1 0\" name=\"slider2\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 0 1\" name=\"rot\" pos=\"0 0 0\" type=\"hinge\"/>\n      <body name=\"mid0\" pos=\"0.5 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <joint axis=\"0 0 1\" limited=\"true\" name=\"rot0\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        {{ body }}\n      </body>\n    </body>\n  </worldbody>\n  <actuator>\n{{ actuators }}\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer__bckp2.xml",
    "content": "<mujoco model=\"swimmer\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option collision=\"predefined\" density=\"4000\" integrator=\"RK4\" timestep=\"0.01\" viscosity=\"0.1\"/>\n  <default>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" material=\"geom\" rgba=\"0.8 0.6 .4 1\"/>\n    <joint armature='0.1'  />\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"30 30\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 -0.1\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 0.1\" type=\"plane\"/>\n    <!--  ================= SWIMMER ================= /-->\n    <body name=\"torso\" pos=\"0 0 0\">\n      <geom density=\"1000\" fromto=\"1.5 0 0 0.5 0 0\" size=\"0.1\" type=\"capsule\"/>\n      <joint axis=\"1 0 0\" name=\"slider1\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 1 0\" name=\"slider2\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 0 1\" name=\"rot\" pos=\"0 0 0\" type=\"hinge\"/>\n      <body name=\"mid1\" pos=\"0.5 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <joint axis=\"0 0 1\" limited=\"true\" name=\"rot0\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        <body name=\"mid2\" pos=\"-1 0 0\">\n          <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n          <joint axis=\"0 0 -1\" limited=\"true\" name=\"rot1\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n          <body name=\"mid3\" pos=\"-1 0 0\">\n            <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n            <joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n            <body name=\"back\" pos=\"-1 0 0\">\n              <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n              <joint axis=\"0 0 1\" limited=\"true\" name=\"rot3\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n            </body>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot0\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot2\"/>\n     <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot3\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/manyagent_swimmer_bckp.xml",
    "content": "<mujoco model=\"swimmer\">\n  <compiler angle=\"degree\" coordinate=\"local\" inertiafromgeom=\"true\"/>\n  <option collision=\"predefined\" density=\"4000\" integrator=\"RK4\" timestep=\"0.01\" viscosity=\"0.1\"/>\n  <default>\n    <geom conaffinity=\"1\" condim=\"1\" contype=\"1\" material=\"geom\" rgba=\"0.8 0.6 .4 1\"/>\n    <joint armature='0.1'  />\n  </default>\n  <asset>\n    <texture builtin=\"gradient\" height=\"100\" rgb1=\"1 1 1\" rgb2=\"0 0 0\" type=\"skybox\" width=\"100\"/>\n    <texture builtin=\"flat\" height=\"1278\" mark=\"cross\" markrgb=\"1 1 1\" name=\"texgeom\" random=\"0.01\" rgb1=\"0.8 0.6 0.4\" rgb2=\"0.8 0.6 0.4\" type=\"cube\" width=\"127\"/>\n    <texture builtin=\"checker\" height=\"100\" name=\"texplane\" rgb1=\"0 0 0\" rgb2=\"0.8 0.8 0.8\" type=\"2d\" width=\"100\"/>\n    <material name=\"MatPlane\" reflectance=\"0.5\" shininess=\"1\" specular=\"1\" texrepeat=\"30 30\" texture=\"texplane\"/>\n    <material name=\"geom\" texture=\"texgeom\" texuniform=\"true\"/>\n  </asset>\n  <worldbody>\n    <light cutoff=\"100\" diffuse=\"1 1 1\" dir=\"-0 0 -1.3\" directional=\"true\" exponent=\"1\" pos=\"0 0 1.3\" specular=\".1 .1 .1\"/>\n    <geom conaffinity=\"1\" condim=\"3\" material=\"MatPlane\" name=\"floor\" pos=\"0 0 -0.1\" rgba=\"0.8 0.9 0.8 1\" size=\"40 40 0.1\" type=\"plane\"/>\n    <!--  ================= SWIMMER ================= /-->\n    <body name=\"torso\" pos=\"0 0 0\">\n      <geom density=\"1000\" fromto=\"1.5 0 0 0.5 0 0\" size=\"0.1\" type=\"capsule\"/>\n      <joint axis=\"1 0 0\" name=\"slider1\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 1 0\" name=\"slider2\" pos=\"0 0 0\" type=\"slide\"/>\n      <joint axis=\"0 0 1\" name=\"rot\" pos=\"0 0 0\" type=\"hinge\"/>\n      <body name=\"mid1\" pos=\"0.5 0 0\">\n        <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n        <joint axis=\"0 0 1\" limited=\"true\" name=\"rot0\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        <body name=\"mid2\" pos=\"-1 0 0\">\n          <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n          <joint axis=\"0 0 -1\" limited=\"true\" name=\"rot1\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n          <body name=\"back\" pos=\"-1 0 0\">\n            <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n            <joint axis=\"0 0 1\" limited=\"true\" name=\"rot2\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n          </body>\n        </body>\n      </body>\n    </body>\n  </worldbody>\n  <actuator>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot0\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot1\"/>\n    <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot2\"/>\n  </actuator>\n</mujoco>"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py",
    "content": "import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\nfrom mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nimport os\nimport mujoco_py as mjp\nfrom gym import error, spaces\n\nclass CoupledHalfCheetah(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        mujoco_env.MujocoEnv.__init__(self, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'coupled_half_cheetah.xml'), 5)\n        utils.EzPickle.__init__(self)\n\n    def step(self, action):\n\n        #ADDED\n        # xposbefore = self.sim.data.qpos[1]\n        # t = self.data.time\n        # wall_act = .02 * np.sin(t / 3) ** 2 - .004\n        # mjp.functions.mj_rnePostConstraint(self.sim.model,\n        #                                    self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        # action_p_wall = np.concatenate((np.squeeze(action), [wall_act]))\n        # self.do_simulation(action_p_wall, self.frame_skip)\n        # xposafter = self.sim.data.qpos[1]\n        # wallpos = self.data.get_geom_xpos(\"obj_geom\")[0]\n        # wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        # xdist = wallpos - xposafter\n        # obj_cost = int(np.abs(xdist) < 2)\n        # if obj_cost > 0:\n        #     self.model.geom_rgba[9] = [1.0, 0, 0, 1.0]\n        # else:\n        #     self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8]\n        # ob = self._get_obs()\n        # reward_ctrl = - 0.1 * np.square(action).sum()\n        # reward_run = (xposafter - xposbefore) / self.dt\n        # reward = reward_ctrl + reward_run\n        # done = False\n\n\n\n\n        # xposbefore1 = self.sim.data.qpos[0]\n        # xposbefore2 = self.sim.data.qpos[len(self.sim.data.qpos) // 2]\n        # print(\"self.sim.data.qpos\", self.sim.data.qpos)\n\n        xposbefore1 = self.get_body_com(\"torso\")[0]\n        xposbefore2 = self.get_body_com(\"torso2\")[0]\n\n        yposbefore1 = self.get_body_com(\"torso\")[1]\n        yposbefore2 = self.get_body_com(\"torso2\")[1]\n\n        # ADDED\n        t = self.data.time\n        wall_act = .02 * np.sin(t / 3) ** 2 - .004\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        action_p_wall = np.concatenate((np.squeeze(action), [wall_act]))\n        # print(\"action_p_wall\", np.array(action_p_wall).shape)\n        # print(\"action\", np.array(action).shape)\n        # print(\"self.frame_skip\", self.frame_skip)\n        self.do_simulation(action_p_wall, self.frame_skip)\n\n        # self.do_simulation(action, self.frame_skip)\n        # xposafter1 = self.sim.data.qpos[0]\n        # xposafter2 = self.sim.data.qpos[len(self.sim.data.qpos)//2]\n        xposafter1 = self.get_body_com(\"torso\")[0]\n        xposafter2 = self.get_body_com(\"torso2\")[0]\n\n        yposafter1 = self.get_body_com(\"torso\")[1]\n        yposafter2 = self.get_body_com(\"torso2\")[1]\n\n        # ADDED\n        wallpos = self.data.get_geom_xpos(\"obj_geom\")[0]\n        # wallpos1 = self.data.get_geom_xpos(\"obj_geom1\")[0]\n        y_wallpos1 = self.data.get_geom_xpos(\"wall1\")[1]\n        y_wallpos2 = self.data.get_geom_xpos(\"wall2\")[1]\n        # print(\"x_wallpos1 = self.data.get_geom_xpos\", x_wallpos1)\n        # print(\"x_wallpos2 = self.data.get_geom_xpos\", x_wallpos2)\n        wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        xdist = np.abs(wallpos - xposafter1)  #+ np.abs(wallpos - xposafter2) #+ (wallpos1 - xposafter1)  + (wallpos1 - xposafter2)\n        obj_cost = 0 # or int(np.abs(wallpos1 - xposafter2) < 5) or int(np.abs(wallpos1 - xposafter2) < 5)\\\n        #\n        if int(np.abs(wallpos - xposafter1) < 5) or int(np.abs(wallpos - xposafter2) < 5) \\\n                or int(np.abs(y_wallpos1 - yposafter1) < 5) or int(np.abs(y_wallpos2 - yposafter2) < 5):\n            obj_cost = 1\n\n        # obj_cost = int(np.abs(xdist) < 5)\n        # print(\"xposbefore1\", xposbefore1)\n        # print(\"xposbefore2\", xposbefore2)\n        # print(\"yposafter1\", yposafter1)\n        # print(\"yposafter2\", yposafter2)\n        # print(\"np.abs(x_wallpos1 - yposafter1)\", np.abs(x_wallpos1 - yposafter1))\n        # print(\"xposafter1\", xposafter1)\n        # print(\"xposafter2\", xposafter2)\n        # print(\"wallpos\", wallpos)\n        # print(\"wallpos1\", wallpos1)\n        # print(\"xdist\", xdist)\n        # print(\"(wallpos1 - xposafter2)\", (wallpos1 - xposafter2))\n        # print(\"(wallpos - xposafter1)\", (wallpos - xposafter1))\n        # print(\"(wallpos - xposafter2)\", (wallpos - xposafter2))\n        if obj_cost > 0:\n            self.model.geom_rgba[9] = [1.0, 0, 0, 1.0]\n        else:\n            self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8]\n        ob = self._get_obs()\n\n        ob = self._get_obs()\n        reward_ctrl1 = - 0.1 * np.square(action[0:len(action)//2]).sum()\n        reward_ctrl2 = - 0.1 * np.square(action[len(action)//2:]).sum()\n        reward_run1 = (xposafter1 - xposbefore1)/self.dt\n        reward_run2 = (xposafter2 - xposbefore2) / self.dt\n        reward = (reward_ctrl1 + reward_ctrl2)/2.0 + (reward_run1 + reward_run2)/2.0\n        done = False\n        return ob, reward, done, dict(cost=obj_cost, reward_run1=reward_run1, reward_ctrl1=reward_ctrl1,\n                                      reward_run2=reward_run2, reward_ctrl2=reward_ctrl2)\n\n    def _get_obs(self):\n\n        #AADED\n        wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004\n        xdist = (self.data.get_geom_xpos(\"obj_geom\")[0] - self.sim.data.qpos[1]) / 10\n\n        return np.concatenate([\n            self.sim.data.qpos.flat[2:],\n            self.sim.data.qvel.flat[1:],\n            [wallvel],\n            [wall_f],\n            np.clip([xdist], -5, 5),\n        ])\n\n        # return np.concatenate([\n        #     self.sim.data.qpos.flat[1:],\n        #     self.sim.data.qvel.flat,\n        # ])\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n        qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.distance = self.model.stat.extent * 0.5\n\n    def get_env_info(self):\n        return {\"episode_limit\": self.episode_limit}\n\n    def _set_action_space(self):\n        bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)\n        low, high = bounds.T\n        low, high = low[:-1], high[:-1]\n        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n        return self.action_space"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/half_cheetah.py",
    "content": "import numpy as np\nfrom gym import utils\n# from mujoco_safety_gym.envs import mujoco_env\n# from gym.envs.mujoco import mujoco_env\nfrom mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nimport mujoco_py as mjp\nfrom gym import error, spaces\n\n\nclass HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        # print(\"half_aaaa\")\n        mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 5)\n        utils.EzPickle.__init__(self)\n\n    def step(self, action):\n        xposbefore = self.sim.data.qpos[1]\n\n        t = self.data.time\n        wall_act = .02 * np.sin(t / 3) ** 2 - .004\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        action_p_wall = np.concatenate((np.squeeze(action), [wall_act]))\n\n        self.do_simulation(action_p_wall, self.frame_skip)\n        xposafter = self.sim.data.qpos[1]\n\n        wallpos = self.data.get_geom_xpos(\"obj_geom\")[0]\n        wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        xdist = wallpos - xposafter\n        # print(\"wallpos\", wallpos)\n        # print(\"xposafter\", xposafter)\n        # print(\"xdist\", xdist)\n        obj_cost = int(np.abs(xdist) < 9)\n        if obj_cost > 0:\n            self.model.geom_rgba[9] = [1.0, 0, 0, 1.0]\n        else:\n            self.model.geom_rgba[9] = [1.0, 0.5, 0.5, .8]\n        ob = self._get_obs()\n        reward_ctrl = - 0.1 * np.square(action).sum()\n        reward_run = (xposafter - xposbefore) / self.dt\n        reward = reward_ctrl + reward_run\n        cost = obj_cost\n        # print(\"cost1\", cost)\n        done = False\n        return ob, reward, done, dict(cost=cost, reward_run=reward_run, reward_ctrl=reward_ctrl)\n\n    def _get_obs(self):\n        wallvel = self.data.get_body_xvelp(\"obj1\")[0]\n        wall_f = .02 * np.sin(self.data.time / 3) ** 2 - .004\n        xdist = (self.data.get_geom_xpos(\"obj_geom\")[0] - self.sim.data.qpos[1]) / 10\n\n        return np.concatenate([\n            self.sim.data.qpos.flat[2:],\n            self.sim.data.qvel.flat[1:],\n            [wallvel],\n            [wall_f],\n            np.clip([xdist], -5, 5),\n        ])\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)\n        qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.distance = self.model.stat.extent * 0.5\n\n    def _set_action_space(self):\n        bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)\n        low, high = bounds.T\n        low, high = low[:-1], high[:-1]\n        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n        return self.action_space\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/hopper.py",
    "content": "import numpy as np\nfrom mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nfrom gym import utils\nimport mujoco_py as mjp\n\n\nclass HopperEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        mujoco_env.MujocoEnv.__init__(self, 'hopper.xml', 4)\n        utils.EzPickle.__init__(self)\n        self.last_mocx = 5  #### vel readings are super noisy for mocap weld\n\n    def step(self, a):\n        posbefore = self.sim.data.qpos[3]\n        t = self.data.time\n        pos = (t + np.sin(t)) + 3\n        self.data.set_mocap_pos('mocap1', [pos, 0, 0.5])\n\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        self.do_simulation(a, self.frame_skip)\n        posafter, height, ang = self.sim.data.qpos[3:6]\n        alive_bonus = 1.0\n\n        mocapx = self.sim.data.qpos[0]\n        xdist = mocapx - posafter\n        cost = int(np.abs(xdist) < 1)\n\n        reward = (posafter - posbefore) / self.dt\n        reward += alive_bonus\n        reward -= 1e-3 * np.square(a).sum()\n        s = self.state_vector()\n        # done = not (np.isfinite(s).all() and (np.abs(s[5:]) < 100).all() and\n        #             (height > .7) and (abs(ang) < .2))\n\n        done = not (\n                np.isfinite(s).all()\n                and (np.abs(s[2:]) < 100).all()\n                and (height > 0.7)\n                and (abs(ang) < 0.2)\n        )\n        print(\"np.isfinite(s).all()\", np.isfinite(s).all())\n        print(\"np.abs(s[5:])\", (np.abs(s[2:]) < 100).all())\n        print(\"height\", (height > 0.7))\n        print(\"abs(ang) \", (abs(ang) < 0.2))\n\n        ob = self._get_obs()\n        return ob, reward, done, dict(cost=cost)\n\n    def _get_obs(self):\n        x = self.sim.data.qpos[3]\n        mocapx = self.sim.data.qpos[0]\n        mocvel = 1 + np.cos(self.data.time)\n        mocacc = -np.sin(self.data.time)\n        return np.concatenate([\n            self.sim.data.qpos.flat[4:],\n            np.clip(self.sim.data.qvel[3:].flat, -10, 10),\n            [mocvel],\n            [mocacc],\n            [mocapx - x],\n        ])\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(low=-.005, high=.005, size=self.model.nq)\n        qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def last_mocap_x(self):\n        return self.last_mocx\n\n    def viewer_setup(self):\n        self.viewer.cam.trackbodyid = 2\n        self.viewer.cam.distance = self.model.stat.extent * 0.75\n        self.viewer.cam.lookat[2] = 1.15\n        self.viewer.cam.elevation = -20"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/humanoid.py",
    "content": "import numpy as np\n# from mujoco_safety_gym.envs import mujoco_env\nfrom mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import mujoco_env\nfrom gym import utils\nimport mujoco_py as mjp\n\n\ndef mass_center(model, sim):\n    mass = np.expand_dims(model.body_mass, 1)\n    xpos = sim.data.xipos\n    return (np.sum(mass * xpos, 0) / np.sum(mass))[0]\n\n\nclass HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        mujoco_env.MujocoEnv.__init__(self, 'humanoid.xml', 5)\n        utils.EzPickle.__init__(self)\n\n    def _get_obs(self):\n        data = self.sim.data\n        x = data.qpos.flat[0]\n        y = data.qpos.flat[1]\n        if x < 20:\n            y_off = y - x * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 20 and x < 60:\n            y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 60 and x < 100:\n            y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi)\n        else:\n            y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi)\n\n        return np.concatenate([data.qpos.flat[2:-42],\n                               data.qvel.flat[:-36],\n                               [x / 5],\n                               [y_off]])\n\n        # return np.concatenate([data.qpos.flat[2:],\n        #                        data.qvel.flat,\n        #                        data.cinert.flat,\n        #                        data.cvel.flat,\n        #                        data.qfrc_actuator.flat,\n        #                        data.cfrc_ext.flat])\n\n    def step(self, a):\n        pos_before = mass_center(self.model, self.sim)\n        self.do_simulation(a, self.frame_skip)\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n        pos_after = mass_center(self.model, self.sim)\n        alive_bonus = 5.0\n        data = self.sim.data\n        lin_vel_cost = 1.25 * (pos_after - pos_before) / self.dt\n        quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()\n        quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()\n        quad_impact_cost = min(quad_impact_cost, 10)\n        reward = lin_vel_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus\n\n        yposafter = self.get_body_com(\"torso\")[1]\n        ywall = np.array([-2.3, 2.3])\n        if pos_after < 20:\n            y_walldist = yposafter - pos_after * np.tan(30 / 360 * 2 * np.pi) + ywall\n        elif pos_after > 20 and pos_after < 60:\n            y_walldist = yposafter + (pos_after - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall\n        elif pos_after > 60 and pos_after < 100:\n            y_walldist = yposafter - (pos_after - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall\n        else:\n            y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall\n        obj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n\n        qpos = self.sim.data.qpos\n        done = bool((qpos[2] < 1.0) or (qpos[2] > 2.0))\n\n        done_cost = done * 1.0\n        cost = np.clip(obj_cost + done_cost, 0, 1)\n\n        return self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost,\n                                                   reward_quadctrl=-quad_ctrl_cost,\n                                                   reward_alive=alive_bonus,\n                                                   reward_impact=-quad_impact_cost,\n                                                   cost_obj=obj_cost,\n                                                   cost_done=done_cost,\n                                                   cost=cost,\n                                                   )\n\n    def reset_model(self):\n        c = 0.01\n        # self.set_state(\n        #     self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),\n        #     self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv,)\n        # )\n        # return self._get_obs()\n        qpos = self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq)\n        qpos[-42:] = self.init_qpos[-42:]\n        qvel = self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv, )\n        qvel[-36:] = self.init_qvel[-36:]\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.trackbodyid = 1\n        self.viewer.cam.distance = self.model.stat.extent * 1.0\n        self.viewer.cam.lookat[2] = 2.0\n        self.viewer.cam.elevation = -20"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_ant.py",
    "content": "import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\nfrom jinja2 import Template\n\nimport mujoco_py as mjp\n\nimport os\n\nclass ManyAgentAntEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        # Return Flag: Distinguish the mujoco and Wrapper env.\n        self.rflag = 0\n        agent_conf = kwargs.get(\"agent_conf\")\n        n_agents = int(agent_conf.split(\"x\")[0])\n        n_segs_per_agents = int(agent_conf.split(\"x\")[1])\n        n_segs = n_agents * n_segs_per_agents\n\n        # Check whether asset file exists already, otherwise create it\n        asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',\n                                                  'manyagent_ant_{}_agents_each_{}_segments.auto.xml'.format(n_agents,\n                                                                                                                 n_segs_per_agents))\n        # if not os.path.exists(asset_path):\n        # print(\"Auto-Generating Manyagent Ant asset with {} segments at {}.\".format(n_segs, asset_path))\n        self._generate_asset(n_segs=n_segs, asset_path=asset_path)\n\n        #asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p\n        #                          'manyagent_swimmer.xml')\n\n        mujoco_env.MujocoEnv.__init__(self, asset_path, 4)\n        utils.EzPickle.__init__(self)\n\n    def _generate_asset(self, n_segs, asset_path):\n        template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',\n                                                  'manyagent_ant.xml.template')\n        with open(template_path, \"r\") as f:\n            t = Template(f.read())\n        body_str_template = \"\"\"\n        <body name=\"torso_{:d}\" pos=\"-1 0 0\">\n           <!--<joint axis=\"0 1 0\" name=\"nnn_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-1 1\" type=\"hinge\"/>-->\n            <geom density=\"100\" fromto=\"1 0 0 0 0 0\" size=\"0.1\" type=\"capsule\"/>\n            <body name=\"front_right_leg_{:d}\" pos=\"0 0 0\">\n              <geom fromto=\"0.0 0.0 0.0 0.0 0.2 0.0\" name=\"aux1_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n              <body name=\"aux_2_{:d}\" pos=\"0.0 0.2 0\">\n                <joint axis=\"0 0 1\" name=\"hip1_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.2 0.2 0.0\" name=\"right_leg_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n                <body pos=\"-0.2 0.2 0\">\n                  <joint axis=\"1 1 0\" name=\"ankle1_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                  <geom fromto=\"0.0 0.0 0.0 -0.4 0.4 0.0\" name=\"right_ankle_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n                </body>\n              </body>\n            </body>\n            <body name=\"back_leg_{:d}\" pos=\"0 0 0\">\n              <geom fromto=\"0.0 0.0 0.0 0.0 -0.2 0.0\" name=\"aux2_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n              <body name=\"aux2_{:d}\" pos=\"0.0 -0.2 0\">\n                <joint axis=\"0 0 1\" name=\"hip2_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-30 30\" type=\"hinge\"/>\n                <geom fromto=\"0.0 0.0 0.0 -0.2 -0.2 0.0\" name=\"back_leg_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n                <body pos=\"-0.2 -0.2 0\">\n                  <joint axis=\"-1 1 0\" name=\"ankle2_{:d}\" pos=\"0.0 0.0 0.0\" range=\"-70 -30\" type=\"hinge\"/>\n                  <geom fromto=\"0.0 0.0 0.0 -0.4 -0.4 0.0\" name=\"third_ankle_geom_{:d}\" size=\"0.08\" type=\"capsule\"/>\n                </body>\n              </body>\n            </body>\n        \"\"\"\n\n        body_close_str_template =\"</body>\\n\"\n        actuator_str_template = \"\"\"\\t     <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip1_{:d}\" gear=\"150\"/>\n                                          <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle1_{:d}\" gear=\"150\"/>\n                                          <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"hip2_{:d}\" gear=\"150\"/>\n                                          <motor ctrllimited=\"true\" ctrlrange=\"-1.0 1.0\" joint=\"ankle2_{:d}\" gear=\"150\"/>\\n\"\"\"\n\n        body_str = \"\"\n        for i in range(1,n_segs):\n            body_str += body_str_template.format(*([i]*16))\n        body_str += body_close_str_template*(n_segs-1)\n\n        actuator_str = \"\"\n        for i in range(n_segs):\n            actuator_str += actuator_str_template.format(*([i]*8))\n\n        rt = t.render(body=body_str, actuators=actuator_str)\n        with open(asset_path, \"w\") as f:\n            f.write(rt)\n        pass\n\n    def step(self, a):\n        xposbefore = self.get_body_com(\"torso_0\")[0]\n        self.do_simulation(a, self.frame_skip)\n\n        #ADDED\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)  #### calc contacts, this is a mujoco py version mismatch issue with mujoco200\n\n        xposafter = self.get_body_com(\"torso_0\")[0]\n        forward_reward = (xposafter - xposbefore)/self.dt\n        ctrl_cost = .5 * np.square(a).sum()\n        contact_cost = 0.5 * 1e-3 * np.sum(\n            np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))\n        survive_reward = 1.0\n\n        ### ADDED safety stuff\n        yposafter = self.get_body_com(\"torso_0\")[1]\n        ywall = np.array([-4.5, 4.5])\n        if xposafter < 20:\n            y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall\n        elif xposafter>20 and xposafter<60:\n            y_walldist = yposafter + (xposafter-40)*np.tan(30/360*2*np.pi) - ywall\n        elif xposafter>60 and xposafter<100:\n            y_walldist = yposafter - (xposafter-80)*np.tan(30/360*2*np.pi) + ywall\n        else:\n            y_walldist = yposafter - 20*np.tan(30/360*2*np.pi) + ywall\n        obj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n\n        reward = forward_reward - ctrl_cost - contact_cost + survive_reward\n\n        #### ADDED\n        body_quat = self.data.get_body_xquat('torso_0')\n        z_rot = 1-2*(body_quat[1]**2+body_quat[2]**2)  ### normally xx-rotation, not sure what axes mujoco uses\n\n        state = self.state_vector()\n        notdone = np.isfinite(state).all() \\\n            and state[2] >= 0.2 and state[2] <= 1.0\\\n            and z_rot>=-0.7 #ADDED\n\n        done = not notdone       \n     \n        #ADDED\n        done_cost = done * 1.0\n        cost = np.clip(obj_cost + done_cost, 0, 1)\n        \n        ob = self._get_obs()\n        if self.rflag == 0:\n            self.rflag += 1\n            return ob, reward, done, dict(\n                cost=cost,\n                reward_forward=forward_reward, #\n                reward_ctrl=-ctrl_cost,\n                reward_contact=-contact_cost,\n                reward_survive=survive_reward,\n                cost_obj=obj_cost,  # ADDED\n                cost_done=done_cost,  # ADDED\n            )\n        else:\n            return ob, reward, done, dict(\n                cost=cost,\n                reward_forward=forward_reward, # cost = cost,\n                reward_ctrl=-ctrl_cost,\n                reward_contact=-contact_cost,\n                reward_survive=survive_reward,\n                cost_obj=obj_cost, #ADDED\n                cost_done=done_cost, #ADDED\n            )\n\n    def _get_obs(self):\n        x = self.sim.data.qpos.flat[0] #ADDED\n        y = self.sim.data.qpos.flat[1] #ADDED\n\n        #ADDED\n        if x<20:\n            y_off = y - x*np.tan(30/360*2*np.pi)\n        elif x>20 and x<60:\n            y_off = y + (x-40)*np.tan(30/360*2*np.pi)\n        elif x>60 and x<100:\n            y_off = y - (x-80)*np.tan(30/360*2*np.pi)\n        else:\n            y_off = y - 20*np.tan(30/360*2*np.pi)\n        # return np.concatenate([\n        #     self.sim.data.qpos.flat[2:],\n        #     self.sim.data.qvel.flat,\n        #     # np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\n        # ])\n        return np.concatenate([\n            self.sim.data.qpos.flat[2:-42], # size = 3\n            self.sim.data.qvel.flat[:-36], # size = 6\n            [x/5],\n            [y_off],\n            # np.clip(self.sim.data.cfrc_ext, -1, 1).flat,\n        ])\n\n    # def reset_model(self):\n    #     qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n    #     qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n    #     self.set_state(qpos, qvel)\n    #     return self._get_obs()\n\n    def reset_model(self):\n        qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)\n        qpos[-42:] = self.init_qpos[-42:]\n        qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1\n        qvel[-36:] = self.init_qvel[-36:]\n        self.set_state(qpos, qvel)\n        return self._get_obs()\n\n    def viewer_setup(self):\n        self.viewer.cam.distance = self.model.stat.extent * 0.5\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_swimmer.py",
    "content": "import numpy as np\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\n\nimport os\nfrom jinja2 import Template\nimport mujoco_py as mjp\n\nclass ManyAgentSwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n    def __init__(self, **kwargs):\n        agent_conf = kwargs.get(\"agent_conf\")\n        n_agents = int(agent_conf.split(\"x\")[0])\n        n_segs_per_agents = int(agent_conf.split(\"x\")[1])\n        n_segs = n_agents * n_segs_per_agents\n\n        # Check whether asset file exists already, otherwise create it\n        asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',\n                                                  'manyagent_swimmer_{}_agents_each_{}_segments.auto.xml'.format(n_agents,\n                                                                                                                 n_segs_per_agents))\n        # if not os.path.exists(asset_path):\n        print(\"Auto-Generating Manyagent Swimmer asset with {} segments at {}.\".format(n_segs, asset_path))\n        self._generate_asset(n_segs=n_segs, asset_path=asset_path)\n\n        #asset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',git p\n        #                          'manyagent_swimmer.xml')\n\n        mujoco_env.MujocoEnv.__init__(self, asset_path, 4)\n        utils.EzPickle.__init__(self)\n\n    def _generate_asset(self, n_segs, asset_path):\n        template_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets',\n                                                  'manyagent_swimmer.xml.template')\n        with open(template_path, \"r\") as f:\n            t = Template(f.read())\n        body_str_template = \"\"\"\n        <body name=\"mid{:d}\" pos=\"-1 0 0\">\n          <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n          <joint axis=\"0 0 {:d}\" limited=\"true\" name=\"rot{:d}\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n        \"\"\"\n\n        body_end_str_template = \"\"\"\n        <body name=\"back\" pos=\"-1 0 0\">\n            <geom density=\"1000\" fromto=\"0 0 0 -1 0 0\" size=\"0.1\" type=\"capsule\"/>\n            <joint axis=\"0 0 1\" limited=\"true\" name=\"rot{:d}\" pos=\"0 0 0\" range=\"-100 100\" type=\"hinge\"/>\n          </body>\n        \"\"\"\n\n        body_close_str_template =\"</body>\\n\"\n        actuator_str_template = \"\"\"\\t <motor ctrllimited=\"true\" ctrlrange=\"-1 1\" gear=\"150.0\" joint=\"rot{:d}\"/>\\n\"\"\"\n\n        body_str = \"\"\n        for i in range(1,n_segs-1):\n            body_str += body_str_template.format(i, (-1)**(i+1), i)\n        body_str += body_end_str_template.format(n_segs-1)\n        body_str += body_close_str_template*(n_segs-2)\n\n        actuator_str = \"\"\n        for i in range(n_segs):\n            actuator_str += actuator_str_template.format(i)\n\n        rt = t.render(body=body_str, actuators=actuator_str)\n        with open(asset_path, \"w\") as f:\n            f.write(rt)\n        pass\n\n    def step(self, a):\n\n        # ctrl_cost_coeff = 0.0001\n        # xposbefore = self.sim.data.qpos[0]\n        # self.do_simulation(a, self.frame_skip)\n        # xposafter = self.sim.data.qpos[0]\n        # reward_fwd = (xposafter - xposbefore) / self.dt\n        # reward_ctrl = -ctrl_cost_coeff * np.square(a).sum()\n        # reward = reward_fwd + reward_ctrl\n\n        ctrl_cost_coeff = 0.0001\n        xposbefore = self.sim.data.qpos[0]\n        # yposbefore = self.sim.data.qpos[1]\n        self.do_simulation(a, self.frame_skip)\n        # ADDED\n        mjp.functions.mj_rnePostConstraint(self.sim.model,\n                                           self.sim.data)\n        xposafter = self.sim.data.qpos[0]\n        # yposbefore = self.sim.data.qpos[1]\n        y_wallpos1 = self.data.get_geom_xpos(\"wall1\")[1]\n        y_wallpos2 = self.data.get_geom_xpos(\"wall2\")[1]\n        reward_fwd = (xposafter - xposbefore) / self.dt\n        reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()\n        reward = reward_fwd + reward_ctrl\n        ### ADDED safety stuff\n        yposafter = self.get_body_com(\"torso\")[1]\n        ywall = np.array([-2.3, 2.3])\n        if xposafter < 20:\n            y_walldist = yposafter - xposafter * np.tan(30 / 360 * 2 * np.pi) + ywall\n        elif xposafter > 20 and xposafter < 60:\n            y_walldist = yposafter + (xposafter - 40) * np.tan(30 / 360 * 2 * np.pi) - ywall\n        elif xposafter > 60 and xposafter < 100:\n            y_walldist = yposafter - (xposafter - 80) * np.tan(30 / 360 * 2 * np.pi) + ywall\n        else:\n            y_walldist = yposafter - 20 * np.tan(30 / 360 * 2 * np.pi) + ywall\n        obj_cost = (abs(y_walldist) < 1.8).any() * 1.0\n\n        # print(\"y_wallpos1-yposafter\", y_wallpos1-yposafter)\n        # print(\"y_wallpos2-yposafter\", y_wallpos2-yposafter)\n        #### ADDED\n        # body_quat = self.data.get_body_xquat('torso')\n        # z_rot = 1 - 2 * (\n        #             body_quat[1] ** 2 + body_quat[2] ** 2)  ### normally xx-rotation, not sure what axes mujoco uses\n        #\n        # state = self.state_vector()\n\n        done  = False\n\n        # ADDED\n        # print(\"y_walldist\", y_walldist)\n        # print(\"obj_cost\", obj_cost)\n        # print(\"done_cost\", done_cost)\n        cost = np.clip(obj_cost, 0, 1)\n        #cost = obj_cost\n        ob = self._get_obs()\n        return ob, reward, done, dict(cost=cost, reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)\n\n    def _get_obs(self):\n        qpos = self.sim.data.qpos\n        qvel = self.sim.data.qvel\n\n        #ADDED\n        x = self.sim.data.qpos.flat[0]  # ADDED\n        y = self.sim.data.qpos.flat[1]  # ADDED\n\n        # ADDED\n        if x < 20:\n            y_off = y - x * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 20 and x < 60:\n            y_off = y + (x - 40) * np.tan(30 / 360 * 2 * np.pi)\n        elif x > 60 and x < 100:\n            y_off = y - (x - 80) * np.tan(30 / 360 * 2 * np.pi)\n        else:\n            y_off = y - 20 * np.tan(30 / 360 * 2 * np.pi)\n\n        return np.concatenate([qpos.flat[2:], qvel.flat, [x/5],\n            [y_off]])\n\n    def reset_model(self):\n        self.set_state(\n            self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),\n            self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)\n        )\n        return self._get_obs()"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_env.py",
    "content": "from collections import OrderedDict\nimport os\n\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\n\ntry:\n    import mujoco_py\nexcept ImportError as e:\n    raise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nDEFAULT_SIZE = 500\n\n\ndef convert_observation_to_space(observation):\n    if isinstance(observation, dict):\n        space = spaces.Dict(OrderedDict([\n            (key, convert_observation_to_space(value))\n            for key, value in observation.items()\n        ]))\n    elif isinstance(observation, np.ndarray):\n        low = np.full(observation.shape, -float('inf'), dtype=np.float32)\n        high = np.full(observation.shape, float('inf'), dtype=np.float32)\n        space = spaces.Box(low, high, dtype=observation.dtype)\n    else:\n        raise NotImplementedError(type(observation), observation)\n\n    return space\n\n\nclass MujocoEnv(gym.Env):\n    \"\"\"Superclass for all MuJoCo environments.\n    \"\"\"\n\n    def __init__(self, model_path, frame_skip):\n        if model_path.startswith(\"/\"):\n            fullpath = model_path\n        else:\n            fullpath = os.path.join(os.path.dirname(__file__), \"./assets\", model_path)\n        if not path.exists(fullpath):\n            raise IOError(\"File %s does not exist\" % fullpath)\n        self.frame_skip = frame_skip\n        self.model = mujoco_py.load_model_from_path(fullpath)\n        self.sim = mujoco_py.MjSim(self.model)\n        self.data = self.sim.data\n        self.viewer = None\n        self._viewers = {}\n\n        self.metadata = {\n            'render.modes': ['human', 'rgb_array', 'depth_array'],\n            'video.frames_per_second': int(np.round(1.0 / self.dt))\n        }\n\n        self.init_qpos = self.sim.data.qpos.ravel().copy()\n        self.init_qvel = self.sim.data.qvel.ravel().copy()\n\n        self._set_action_space()\n\n        action = self.action_space.sample()\n        observation, _reward, done, _info = self.step(action)\n        # assert not done\n\n        self._set_observation_space(observation)\n\n        self.seed()\n\n    def _set_action_space(self):\n        bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)\n        low, high = bounds.T\n        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)\n        return self.action_space\n\n    def _set_observation_space(self, observation):\n        self.observation_space = convert_observation_to_space(observation)\n        return self.observation_space\n\n    def seed(self, seed=None):\n        self.np_random, seed = seeding.np_random(seed)\n        return [seed]\n\n    # methods to override:\n    # ----------------------------\n\n    def reset_model(self):\n        \"\"\"\n        Reset the robot degrees of freedom (qpos and qvel).\n        Implement this in each subclass.\n        \"\"\"\n        raise NotImplementedError\n\n    def viewer_setup(self):\n        \"\"\"\n        This method is called when the viewer is initialized.\n        Optionally implement this method, if you need to tinker with camera position\n        and so forth.\n        \"\"\"\n        pass\n\n    # -----------------------------\n\n    def reset(self):\n        self.sim.reset()\n        ob = self.reset_model()\n        return ob\n\n    def set_state(self, qpos, qvel):\n        assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n        old_state = self.sim.get_state()\n        new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n                                         old_state.act, old_state.udd_state)\n        self.sim.set_state(new_state)\n        self.sim.forward()\n\n    @property\n    def dt(self):\n        return self.model.opt.timestep * self.frame_skip\n\n    def do_simulation(self, ctrl, n_frames):\n        self.sim.data.ctrl[:] = ctrl\n        for _ in range(n_frames):\n            self.sim.step()\n\n    def render(self,\n               mode='human',\n               width=DEFAULT_SIZE,\n               height=DEFAULT_SIZE,\n               camera_id=None,\n               camera_name=None):\n        if mode == 'rgb_array':\n            if camera_id is not None and camera_name is not None:\n                raise ValueError(\"Both `camera_id` and `camera_name` cannot be\"\n                                 \" specified at the same time.\")\n\n            no_camera_specified = camera_name is None and camera_id is None\n            if no_camera_specified:\n                camera_name = 'track'\n\n            if camera_id is None and camera_name in self.model._camera_name2id:\n                camera_id = self.model.camera_name2id(camera_name)\n\n            self._get_viewer(mode).render(width, height, camera_id=camera_id)\n            # window size used for old mujoco-py:\n            data = self._get_viewer(mode).read_pixels(width, height, depth=False)\n            # original image is upside-down, so flip it\n            return data[::-1, :, :]\n        elif mode == 'depth_array':\n            self._get_viewer(mode).render(width, height)\n            # window size used for old mujoco-py:\n            # Extract depth part of the read_pixels() tuple\n            data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1]\n            # original image is upside-down, so flip it\n            return data[::-1, :]\n        elif mode == 'human':\n            self._get_viewer(mode).render()\n\n    def close(self):\n        if self.viewer is not None:\n            # self.viewer.finish()\n            self.viewer = None\n            self._viewers = {}\n\n    def _get_viewer(self, mode):\n        self.viewer = self._viewers.get(mode)\n        if self.viewer is None:\n            if mode == 'human':\n                self.viewer = mujoco_py.MjViewer(self.sim)\n            elif mode == 'rgb_array' or mode == 'depth_array':\n                self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)\n\n            self.viewer_setup()\n            self._viewers[mode] = self.viewer\n        return self.viewer\n\n    def get_body_com(self, body_name):\n        return self.data.get_body_xpos(body_name)\n\n    def state_vector(self):\n        return np.concatenate([\n            self.sim.data.qpos.flat,\n            self.sim.data.qvel.flat\n        ])\n\n    def place_random_objects(self):\n        for i in range(9):\n            random_color_array = np.append(np.random.uniform(0, 1, size=3), 1)\n            random_pos_array = np.append(np.random.uniform(-10., 10., size=2), 0.5)\n            site_id = self.sim.model.geom_name2id('obj' + str(i))\n            self.sim.model.geom_rgba[site_id] = random_color_array\n            self.sim.model.geom_pos[site_id] = random_pos_array\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_multi.py",
    "content": "from functools import partial\nimport gym\nfrom gym.spaces import Box\nfrom gym.wrappers import TimeLimit\nimport numpy as np\n\nfrom .multiagentenv import MultiAgentEnv\nfrom .manyagent_ant import ManyAgentAntEnv\nfrom .manyagent_swimmer import ManyAgentSwimmerEnv\nfrom .obsk import get_joints_at_kdist, get_parts_and_edges, build_obs\n\n\ndef env_fn(env, **kwargs) -> MultiAgentEnv:  # TODO: this may be a more complex function\n    # env_args = kwargs.get(\"env_args\", {})\n    return env(**kwargs)\n\n\n# env_REGISTRY = {}\n# env_REGISTRY[\"manyagent_ant\"] = partial(env_fn, env=ManyAgentAntEnv)\n#\n# env_REGISTRY = {}\n# env_REGISTRY[\"manyagent_swimmer\"] = partial(env_fn, env=ManyAgentSwimmerEnv)\n\n\n# using code from https://github.com/ikostrikov/pytorch-ddpg-naf\nclass NormalizedActions(gym.ActionWrapper):\n\n    def _action(self, action):\n        action = (action + 1) / 2\n        action *= (self.action_space.high - self.action_space.low)\n        action += self.action_space.low\n        return action\n\n    def action(self, action_):\n        return self._action(action_)\n\n    def _reverse_action(self, action):\n        action -= self.action_space.low\n        action /= (self.action_space.high - self.action_space.low)\n        action = action * 2 - 1\n        return action\n\n\nclass MujocoMulti(MultiAgentEnv):\n\n    def __init__(self, batch_size=None, **kwargs):\n        super().__init__(batch_size, **kwargs)\n        self.scenario = kwargs[\"env_args\"][\"scenario\"]  # e.g. Ant-v2\n        self.agent_conf = kwargs[\"env_args\"][\"agent_conf\"]  # e.g. '2x3'\n\n        self.agent_partitions, self.mujoco_edges, self.mujoco_globals = get_parts_and_edges(self.scenario,\n                                                                                            self.agent_conf)\n\n        self.n_agents = len(self.agent_partitions)\n        self.n_actions = max([len(l) for l in self.agent_partitions])\n        self.obs_add_global_pos = kwargs[\"env_args\"].get(\"obs_add_global_pos\", False)\n\n        self.agent_obsk = kwargs[\"env_args\"].get(\"agent_obsk\",\n                                                 None)  # if None, fully observable else k>=0 implies observe nearest k agents or joints\n        self.agent_obsk_agents = kwargs[\"env_args\"].get(\"agent_obsk_agents\",\n                                                        False)  # observe full k nearest agents (True) or just single joints (False)\n\n        if self.agent_obsk is not None:\n            # print(\"this is agent_obsk\")\n            self.k_categories_label = kwargs[\"env_args\"].get(\"k_categories\")\n            if self.k_categories_label is None:\n                if self.scenario in [\"Ant-v2\", \"manyagent_ant\"]:\n                    self.k_categories_label = \"qpos,qvel,cfrc_ext|qpos\"\n                    # print(\"this is agent_obsk --- ant\")\n                elif self.scenario in [\"Swimmer-v2\", \"manyagent_swimmer\"]:\n                    self.k_categories_label = \"qpos,qvel|qpos\"\n                    # print(\"this is agent_obsk --- swimmer\")\n                elif self.scenario in [\"Humanoid-v2\", \"HumanoidStandup-v2\"]:\n                    self.k_categories_label = \"qpos,qvel,cfrc_ext,cvel,cinert,qfrc_actuator|qpos\"\n                elif self.scenario in [\"Reacher-v2\"]:\n                    self.k_categories_label = \"qpos,qvel,fingertip_dist|qpos\"\n                elif self.scenario in [\"coupled_half_cheetah\"]:\n                    self.k_categories_label = \"qpos,qvel,ten_J,ten_length,ten_velocity|\"\n                else:\n                    self.k_categories_label = \"qpos,qvel|qpos\"\n\n            k_split = self.k_categories_label.split(\"|\")\n            self.k_categories = [k_split[k if k < len(k_split) else -1].split(\",\") for k in range(self.agent_obsk + 1)]\n\n            self.global_categories_label = kwargs[\"env_args\"].get(\"global_categories\")\n            self.global_categories = self.global_categories_label.split(\n                \",\") if self.global_categories_label is not None else []\n\n        if self.agent_obsk is not None:\n            self.k_dicts = [get_joints_at_kdist(agent_id,\n                                                self.agent_partitions,\n                                                self.mujoco_edges,\n                                                k=self.agent_obsk,\n                                                kagents=False, ) for agent_id in range(self.n_agents)]\n\n        # load scenario from script\n        self.episode_limit = self.args.episode_limit\n\n        self.env_version = kwargs[\"env_args\"].get(\"env_version\", 2)\n        if self.env_version == 2:\n            if self.scenario in [\"manyagent_ant\"]:\n                from .manyagent_ant import ManyAgentAntEnv as this_env\n            elif self.scenario in [\"manyagent_swimmer\"]:\n                from .manyagent_swimmer import ManyAgentSwimmerEnv as this_env\n            elif self.scenario in [\"coupled_half_cheetah\"]:\n                from .coupled_half_cheetah import CoupledHalfCheetah as this_env\n            elif self.scenario in [\"HalfCheetah-v2\"]:\n                from .half_cheetah import HalfCheetahEnv as this_env\n                # print(\"HalfCheetahEnv1111\") Hopper-v2 #\n            elif self.scenario in [\"Hopper-v2\"]:\n                from .hopper import HopperEnv as this_env\n                # print(\"Hopper-v2\")\n            elif self.scenario in [\"Humanoid-v2\"]:\n                from .humanoid import HumanoidEnv as this_env\n                # print(\"Hopper-v2\")\n            elif self.scenario in [\"Ant-v2\"]:\n                from .ant import AntEnv as this_env\n            else:\n                raise NotImplementedError('Custom env not implemented!')\n            # print(\"self.scenario\", self.scenario)\n            # aaa= this_env(**kwargs[\"env_args\"])\n            # print(\"aaa\", aaa)\n            self.wrapped_env = NormalizedActions(\n                TimeLimit(this_env(**kwargs[\"env_args\"]), max_episode_steps=self.episode_limit))\n            # try:\n            #     self.wrapped_env = NormalizedActions(gym.make(self.scenario))\n            #     print(\"this managent1\")\n            # except gym.error.Error:\n            #     if self.scenario in [\"manyagent_ant\"]:\n            #         from .manyagent_ant import ManyAgentAntEnv as this_env\n            #     elif self.scenario in [\"manyagent_swimmer\"]:\n            #         from .manyagent_swimmer import ManyAgentSwimmerEnv as this_env\n            #     elif self.scenario in [\"coupled_half_cheetah\"]:\n            #         from .coupled_half_cheetah import CoupledHalfCheetah as this_env\n            #     elif self.scenario in [\"HalfCheetah-v2\"]:\n            #         from .half_cheetah import HalfCheetahEnv as this_env\n            #         print(\"HalfCheetahEnv1111\")\n            #     else:\n            #         raise NotImplementedError('Custom env not implemented!')\n            #     self.wrapped_env = NormalizedActions(\n            #         TimeLimit(this_env(**kwargs[\"env_args\"]), max_episode_steps=self.episode_limit))\n                # if self.scenario == \"manyagent_swimmer\":\n                #     env_REGISTRY = {}\n                #     env_REGISTRY[\"manyagent_swimmer\"] = partial(env_fn, env=ManyAgentSwimmerEnv)\n                #     print(\"this is swimmer 2\")\n                # elif self.scenario == \"manyagent_ant\":\n                #     env_REGISTRY = {}\n                #     env_REGISTRY[\"manyagent_ant\"] = partial(env_fn, env=ManyAgentAntEnv)\n                #     print(\"this managent2\")\n                # self.wrapped_env = NormalizedActions(\n                #     TimeLimit(partial(env_REGISTRY[self.scenario], **kwargs[\"env_args\"])(),\n                #               max_episode_steps=self.episode_limit))\n        else:\n            assert False, \"not implemented!\"\n        self.timelimit_env = self.wrapped_env.env\n        self.timelimit_env._max_episode_steps = self.episode_limit\n        self.env = self.timelimit_env.env\n        self.timelimit_env.reset()\n        self.obs_size = self.get_obs_size()\n        self.share_obs_size = self.get_state_size()\n\n        # COMPATIBILITY\n        self.n = self.n_agents\n        # self.observation_space = [Box(low=np.array([-10]*self.n_agents), high=np.array([10]*self.n_agents)) for _ in range(self.n_agents)]\n        self.observation_space = [Box(low=-10, high=10, shape=(self.obs_size,)) for _ in range(self.n_agents)]\n        self.share_observation_space = [Box(low=-10, high=10, shape=(self.share_obs_size,)) for _ in\n                                        range(self.n_agents)]\n\n        acdims = [len(ap) for ap in self.agent_partitions]\n        self.action_space = tuple([Box(self.env.action_space.low[sum(acdims[:a]):sum(acdims[:a + 1])],\n                                       self.env.action_space.high[sum(acdims[:a]):sum(acdims[:a + 1])]) for a in\n                                   range(self.n_agents)])\n\n        pass\n\n    def step(self, actions):\n\n        # need to remove dummy actions that arise due to unequal action vector sizes across agents\n        flat_actions = np.concatenate([actions[i][:self.action_space[i].low.shape[0]] for i in range(self.n_agents)])\n        obs_n, reward_n, done_n, info_n = self.wrapped_env.step(flat_actions)\n        self.steps += 1\n\n        info = {}\n        info.update(info_n)\n\n        # if done_n:\n        #     if self.steps < self.episode_limit:\n        #         info[\"episode_limit\"] = False   # the next state will be masked out\n        #     else:\n        #         info[\"episode_limit\"] = True    # the next state will not be masked out\n        if done_n:\n            if self.steps < self.episode_limit:\n                info[\"bad_transition\"] = False  # the next state will be masked out\n            else:\n                info[\"bad_transition\"] = True  # the next state will not be masked out\n\n        # return reward_n, done_n, info\n        rewards = [[reward_n]] * self.n_agents\n        # print(\"self.n_agents\", self.n_agents)\n        info[\"cost\"] = [[info[\"cost\"]]] * self.n_agents\n        dones = [done_n] * self.n_agents\n        infos = [info for _ in range(self.n_agents)]\n        return self.get_obs(), self.get_state(), rewards, dones, infos, self.get_avail_actions()\n\n    def get_obs(self):\n        \"\"\" Returns all agent observat3ions in a list \"\"\"\n        state = self.env._get_obs()\n        obs_n = []\n        for a in range(self.n_agents):\n            agent_id_feats = np.zeros(self.n_agents, dtype=np.float32)\n            agent_id_feats[a] = 1.0\n            # obs_n.append(self.get_obs_agent(a))\n            # obs_n.append(np.concatenate([state, self.get_obs_agent(a), agent_id_feats]))\n            # obs_n.append(np.concatenate([self.get_obs_agent(a), agent_id_feats]))\n            obs_i = np.concatenate([state, agent_id_feats])\n            obs_i = (obs_i - np.mean(obs_i)) / np.std(obs_i)\n            obs_n.append(obs_i)\n        return obs_n\n\n    def get_obs_agent(self, agent_id):\n        if self.agent_obsk is None:\n            return self.env._get_obs()\n        else:\n            # return build_obs(self.env,\n            #                       self.k_dicts[agent_id],\n            #                       self.k_categories,\n            #                       self.mujoco_globals,\n            #                       self.global_categories,\n            #                       vec_len=getattr(self, \"obs_size\", None))\n            return build_obs(self.env,\n                             self.k_dicts[agent_id],\n                             self.k_categories,\n                             self.mujoco_globals,\n                             self.global_categories)\n\n    def get_obs_size(self):\n        \"\"\" Returns the shape of the observation \"\"\"\n        if self.agent_obsk is None:\n            return self.get_obs_agent(0).size\n        else:\n            return len(self.get_obs()[0])\n            # return max([len(self.get_obs_agent(agent_id)) for agent_id in range(self.n_agents)])\n\n    def get_state(self, team=None):\n        # TODO: May want global states for different teams (so cannot see what the other team is communicating e.g.)\n        state = self.env._get_obs()\n        share_obs = []\n        for a in range(self.n_agents):\n            agent_id_feats = np.zeros(self.n_agents, dtype=np.float32)\n            agent_id_feats[a] = 1.0\n            # share_obs.append(np.concatenate([state, self.get_obs_agent(a), agent_id_feats]))\n            state_i = np.concatenate([state, agent_id_feats])\n            state_i = (state_i - np.mean(state_i)) / np.std(state_i)\n            share_obs.append(state_i)\n        return share_obs\n\n    def get_state_size(self):\n        \"\"\" Returns the shape of the state\"\"\"\n        return len(self.get_state()[0])\n\n    def get_avail_actions(self):  # all actions are always available\n        return np.ones(shape=(self.n_agents, self.n_actions,))\n\n    def get_avail_agent_actions(self, agent_id):\n        \"\"\" Returns the available actions for agent_id \"\"\"\n        return np.ones(shape=(self.n_actions,))\n\n    def get_total_actions(self):\n        \"\"\" Returns the total number of actions an agent could ever take \"\"\"\n        return self.n_actions  # CAREFUL! - for continuous dims, this is action space dim rather\n        # return self.env.action_space.shape[0]\n\n    def get_stats(self):\n        return {}\n\n    # TODO: Temp hack\n    def get_agg_stats(self, stats):\n        return {}\n\n    def reset(self, **kwargs):\n        \"\"\" Returns initial observations and states\"\"\"\n        self.steps = 0\n        self.timelimit_env.reset()\n        return self.get_obs(), self.get_state(), self.get_avail_actions()\n\n    def render(self, **kwargs):\n        self.env.render(**kwargs)\n\n    def close(self):\n        pass\n\n    def seed(self, args):\n        pass\n\n    def get_env_info(self):\n\n        env_info = {\"state_shape\": self.get_state_size(),\n                    \"obs_shape\": self.get_obs_size(),\n                    \"n_actions\": self.get_total_actions(),\n                    \"n_agents\": self.n_agents,\n                    \"episode_limit\": self.episode_limit,\n                    \"action_spaces\": self.action_space,\n                    \"actions_dtype\": np.float32,\n                    \"normalise_actions\": False\n                    }\n        return env_info\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/multiagentenv.py",
    "content": "from collections import namedtuple\nimport numpy as np\n\n\ndef convert(dictionary):\n    return namedtuple('GenericDict', dictionary.keys())(**dictionary)\n\nclass MultiAgentEnv(object):\n\n    def __init__(self, batch_size=None, **kwargs):\n        # Unpack arguments from sacred\n        args = kwargs[\"env_args\"]\n        if isinstance(args, dict):\n            args = convert(args)\n        self.args = args\n\n        if getattr(args, \"seed\", None) is not None:\n            self.seed = args.seed\n            self.rs = np.random.RandomState(self.seed) # initialise numpy random state\n\n    def step(self, actions):\n        \"\"\" Returns reward, terminated, info \"\"\"\n        raise NotImplementedError\n\n    def get_obs(self):\n        \"\"\" Returns all agent observations in a list \"\"\"\n        raise NotImplementedError\n\n    def get_obs_agent(self, agent_id):\n        \"\"\" Returns observation for agent_id \"\"\"\n        raise NotImplementedError\n\n    def get_obs_size(self):\n        \"\"\" Returns the shape of the observation \"\"\"\n        raise NotImplementedError\n\n    def get_state(self):\n        raise NotImplementedError\n\n    def get_state_size(self):\n        \"\"\" Returns the shape of the state\"\"\"\n        raise NotImplementedError\n\n    def get_avail_actions(self):\n        raise NotImplementedError\n\n    def get_avail_agent_actions(self, agent_id):\n        \"\"\" Returns the available actions for agent_id \"\"\"\n        raise NotImplementedError\n\n    def get_total_actions(self):\n        \"\"\" Returns the total number of actions an agent could ever take \"\"\"\n        # TODO: This is only suitable for a discrete 1 dimensional action space for each agent\n        raise NotImplementedError\n\n    def get_stats(self):\n        raise NotImplementedError\n\n    # TODO: Temp hack\n    def get_agg_stats(self, stats):\n        return {}\n\n    def reset(self):\n        \"\"\" Returns initial observations and states\"\"\"\n        raise NotImplementedError\n\n    def render(self):\n        raise NotImplementedError\n\n    def close(self):\n        raise NotImplementedError\n\n    def seed(self, seed):\n        raise NotImplementedError\n\n    def get_env_info(self):\n        env_info = {\"state_shape\": self.get_state_size(),\n                    \"obs_shape\": self.get_obs_size(),\n                    \"n_actions\": self.get_total_actions(),\n                    \"n_agents\": self.n_agents,\n                    \"episode_limit\": self.episode_limit}\n        return env_info"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/obsk.py",
    "content": "import itertools\nimport numpy as np\nfrom copy import deepcopy\n\nclass Node():\n    def __init__(self, label, qpos_ids, qvel_ids, act_ids, body_fn=None, bodies=None, extra_obs=None, tendons=None):\n        self.label = label\n        self.qpos_ids = qpos_ids\n        self.qvel_ids = qvel_ids\n        self.act_ids = act_ids\n        self.bodies = bodies\n        self.extra_obs = {} if extra_obs is None else extra_obs\n        self.body_fn = body_fn\n        self.tendons = tendons\n        pass\n\n    def __str__(self):\n        return self.label\n\n    def __repr__(self):\n        return self.label\n\n\nclass HyperEdge():\n    def __init__(self, *edges):\n        self.edges = set(edges)\n\n    def __contains__(self, item):\n        return item in self.edges\n\n    def __str__(self):\n        return \"HyperEdge({})\".format(self.edges)\n\n    def __repr__(self):\n        return \"HyperEdge({})\".format(self.edges)\n\n\ndef get_joints_at_kdist(agent_id, agent_partitions, hyperedges, k=0, kagents=False,):\n    \"\"\" Identify all joints at distance <= k from agent agent_id\n\n    :param agent_id: id of agent to be considered\n    :param agent_partitions: list of joint tuples in order of agentids\n    :param edges: list of tuples (joint1, joint2)\n    :param k: kth degree\n    :param kagents: True (observe all joints of an agent if a single one is) or False (individual joint granularity)\n    :return:\n        dict with k as key, and list of joints at that distance\n    \"\"\"\n    assert not kagents, \"kagents not implemented!\"\n\n    agent_joints = agent_partitions[agent_id]\n\n    def _adjacent(lst, kagents=False):\n        # return all sets adjacent to any element in lst\n        ret = set([])\n        for l in lst:\n            ret = ret.union(set(itertools.chain(*[e.edges.difference({l}) for e in hyperedges if l in e])))\n        return ret\n\n    seen = set([])\n    new = set([])\n    k_dict = {}\n    for _k in range(k+1):\n        if not _k:\n            new = set(agent_joints)\n        else:\n            print(hyperedges)\n            new = _adjacent(new) - seen\n        seen = seen.union(new)\n        k_dict[_k] = sorted(list(new), key=lambda x:x.label)\n    return k_dict\n\n\ndef build_obs(env, k_dict, k_categories, global_dict, global_categories, vec_len=None):\n    \"\"\"Given a k_dict from get_joints_at_kdist, extract observation vector.\n\n    :param k_dict: k_dict\n    :param qpos: qpos numpy array\n    :param qvel: qvel numpy array\n    :param vec_len: if None no padding, else zero-pad to vec_len\n    :return:\n    observation vector\n    \"\"\"\n\n    # TODO: This needs to be fixed, it was designed for half-cheetah only!\n    #if add_global_pos:\n    #    obs_qpos_lst.append(global_qpos)\n    #    obs_qvel_lst.append(global_qvel)\n\n\n    body_set_dict = {}\n    obs_lst = []\n    # Add parts attributes\n    for k in sorted(list(k_dict.keys())):\n        cats = k_categories[k]\n        for _t in k_dict[k]:\n            for c in cats:\n                if c in _t.extra_obs:\n                    items = _t.extra_obs[c](env).tolist()\n                    obs_lst.extend(items if isinstance(items, list) else [items])\n                else:\n                    if c in [\"qvel\",\"qpos\"]: # this is a \"joint position/velocity\" item\n                        items = getattr(env.sim.data, c)[getattr(_t, \"{}_ids\".format(c))]\n                        obs_lst.extend(items if isinstance(items, list) else [items])\n                    elif c in [\"qfrc_actuator\"]: # this is a \"vel position\" item\n                        items = getattr(env.sim.data, c)[getattr(_t, \"{}_ids\".format(\"qvel\"))]\n                        obs_lst.extend(items if isinstance(items, list) else [items])\n                    elif c in [\"cvel\", \"cinert\", \"cfrc_ext\"]:  # this is a \"body position\" item\n                        if _t.bodies is not None:\n                            for b in _t.bodies:\n                                if c not in body_set_dict:\n                                    body_set_dict[c] = set()\n                                if b not in body_set_dict[c]:\n                                    items = getattr(env.sim.data, c)[b].tolist()\n                                    items = getattr(_t, \"body_fn\", lambda _id,x:x)(b, items)\n                                    obs_lst.extend(items if isinstance(items, list) else [items])\n                                    body_set_dict[c].add(b)\n\n    # Add global attributes\n    body_set_dict = {}\n    for c in global_categories:\n        if c in [\"qvel\", \"qpos\"]:  # this is a \"joint position\" item\n            for j in global_dict.get(\"joints\", []):\n                items = getattr(env.sim.data, c)[getattr(j, \"{}_ids\".format(c))]\n                obs_lst.extend(items if isinstance(items, list) else [items])\n        else:\n            for b in global_dict.get(\"bodies\", []):\n                if c not in body_set_dict:\n                    body_set_dict[c] = set()\n                if b not in body_set_dict[c]:\n                    obs_lst.extend(getattr(env.sim.data, c)[b].tolist())\n                    body_set_dict[c].add(b)\n\n    if vec_len is not None:\n        pad = np.array((vec_len - len(obs_lst))*[0])\n        if len(pad):\n            return np.concatenate([np.array(obs_lst), pad])\n    return np.array(obs_lst)\n\n\ndef build_actions(agent_partitions, k_dict):\n    # Composes agent actions output from networks\n    # into coherent joint action vector to be sent to the env.\n    pass\n\ndef get_parts_and_edges(label, partitioning):\n    if label in [\"half_cheetah\", \"HalfCheetah-v2\"]:\n\n        # define Mujoco graph\n        bthigh = Node(\"bthigh\", -6, -6, 0)\n        bshin = Node(\"bshin\", -5, -5, 1)\n        bfoot = Node(\"bfoot\", -4, -4, 2)\n        fthigh = Node(\"fthigh\", -3, -3, 3)\n        fshin = Node(\"fshin\", -2, -2, 4)\n        ffoot = Node(\"ffoot\", -1, -1, 5)\n\n        edges = [HyperEdge(bfoot, bshin),\n                 HyperEdge(bshin, bthigh),\n                 HyperEdge(bthigh, fthigh),\n                 HyperEdge(fthigh, fshin),\n                 HyperEdge(fshin, ffoot)]\n\n        root_x = Node(\"root_x\", 0, 0, -1,\n                      extra_obs={\"qpos\": lambda env: np.array([])})\n        root_z = Node(\"root_z\", 1, 1, -1)\n        root_y = Node(\"root_y\", 2, 2, -1)\n        globals = {\"joints\":[root_x, root_y, root_z]}\n\n        if partitioning == \"2x3\":\n            parts = [(bfoot, bshin, bthigh),\n                     (ffoot, fshin, fthigh)]\n        elif partitioning == \"6x1\":\n            parts = [(bfoot,), (bshin,), (bthigh,), (ffoot,), (fshin,), (fthigh,)]\n        elif partitioning == \"3x2\":\n            parts = [(bfoot, bshin,), (bthigh, ffoot,), (fshin, fthigh,)]\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Ant-v2\"]:\n\n        # define Mujoco graph\n        torso = 1\n        front_left_leg = 2\n        aux_1 = 3\n        ankle_1 = 4\n        front_right_leg = 5\n        aux_2 = 6\n        ankle_2 = 7\n        back_leg = 8\n        aux_3 = 9\n        ankle_3 = 10\n        right_back_leg = 11\n        aux_4 = 12\n        ankle_4 = 13\n\n        hip1 = Node(\"hip1\", -8, -8, 2, bodies=[torso, front_left_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) #\n        ankle1 = Node(\"ankle1\", -7, -7, 3, bodies=[front_left_leg, aux_1, ankle_1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        hip2 = Node(\"hip2\", -6, -6, 4, bodies=[torso, front_right_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        ankle2 = Node(\"ankle2\", -5, -5, 5, bodies=[front_right_leg, aux_2, ankle_2], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        hip3 = Node(\"hip3\", -4, -4, 6, bodies=[torso, back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        ankle3 = Node(\"ankle3\", -3, -3, 7, bodies=[back_leg, aux_3, ankle_3], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        hip4 = Node(\"hip4\", -2, -2, 0, bodies=[torso, right_back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        ankle4 = Node(\"ankle4\", -1, -1, 1, bodies=[right_back_leg, aux_4, ankle_4], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n\n        edges = [HyperEdge(ankle4, hip4),\n                 HyperEdge(ankle1, hip1),\n                 HyperEdge(ankle2, hip2),\n                 HyperEdge(ankle3, hip3),\n                 HyperEdge(hip4, hip1, hip2, hip3),\n                 ]\n\n        free_joint = Node(\"free\", 0, 0, -1, extra_obs={\"qpos\": lambda env: env.sim.data.qpos[:7],\n                                                       \"qvel\": lambda env: env.sim.data.qvel[:6],\n                                                       \"cfrc_ext\": lambda env: np.clip(env.sim.data.cfrc_ext[0:1], -1, 1)})\n        globals = {\"joints\": [free_joint]}\n\n        if partitioning == \"2x4\": # neighbouring legs together\n            parts = [(hip1, ankle1, hip2, ankle2),\n                     (hip3, ankle3, hip4, ankle4)]\n        elif partitioning == \"2x4d\": # diagonal legs together\n            parts = [(hip1, ankle1, hip3, ankle3),\n                     (hip2, ankle2, hip4, ankle4)]\n        elif partitioning == \"4x2\":\n            parts = [(hip1, ankle1),\n                     (hip2, ankle2),\n                     (hip3, ankle3),\n                     (hip4, ankle4)]\n        elif partitioning == \"8x1\":\n            parts = [(hip1,), (ankle1,),\n                     (hip2,), (ankle2,),\n                     (hip3,), (ankle3,),\n                     (hip4,), (ankle4,)]\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Hopper-v2\"]:\n\n        # define Mujoco-Graph\n        thigh_joint = Node(\"thigh_joint\", -3, -3, 0,\n                           extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[-3]]), -10, 10)})\n        leg_joint = Node(\"leg_joint\", -2, -2, 1,\n                         extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[-2]]), -10, 10)})\n        foot_joint = Node(\"foot_joint\", -1, -1, 2,\n                          extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[-1]]), -10, 10)})\n\n        edges = [HyperEdge(foot_joint, leg_joint),\n                 HyperEdge(leg_joint, thigh_joint)]\n\n        root_x = Node(\"root_x\", 0, 0, -1, extra_obs={\"qpos\": lambda env: np.array([]),\n                                                     \"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[1]]), -10, 10)})\n        root_z = Node(\"root_z\", 1, 1, -1, extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[1]]), -10, 10)})\n        root_y = Node(\"root_y\", 2, 2, -1, extra_obs={\"qvel\": lambda env: np.clip(np.array([env.sim.data.qvel[2]]), -10, 10)})\n        globals = {\"joints\":[root_x, root_y, root_z]}\n\n        if partitioning == \"3x1\":\n            parts = [(thigh_joint,),\n                     (leg_joint,),\n                     (foot_joint,)]\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Humanoid-v2\", \"HumanoidStandup-v2\"]:\n\n        # define Mujoco-Graph\n        abdomen_y = Node(\"abdomen_y\", -16, -16, 0) # act ordering bug in env -- double check!\n        abdomen_z = Node(\"abdomen_z\", -17, -17, 1)\n        abdomen_x = Node(\"abdomen_x\", -15, -15, 2)\n        right_hip_x = Node(\"right_hip_x\", -14, -14, 3)\n        right_hip_z = Node(\"right_hip_z\", -13, -13, 4)\n        right_hip_y = Node(\"right_hip_y\", -12, -12, 5)\n        right_knee = Node(\"right_knee\", -11, -11, 6)\n        left_hip_x = Node(\"left_hip_x\", -10, -10, 7)\n        left_hip_z = Node(\"left_hip_z\", -9, -9, 8)\n        left_hip_y = Node(\"left_hip_y\", -8, -8, 9)\n        left_knee = Node(\"left_knee\", -7, -7, 10)\n        right_shoulder1 = Node(\"right_shoulder1\", -6, -6, 11)\n        right_shoulder2 = Node(\"right_shoulder2\", -5, -5, 12)\n        right_elbow = Node(\"right_elbow\", -4, -4, 13)\n        left_shoulder1 = Node(\"left_shoulder1\", -3, -3, 14)\n        left_shoulder2 = Node(\"left_shoulder2\", -2, -2, 15)\n        left_elbow = Node(\"left_elbow\", -1, -1, 16)\n\n        edges = [HyperEdge(abdomen_x, abdomen_y, abdomen_z),\n                 HyperEdge(right_hip_x, right_hip_y, right_hip_z),\n                 HyperEdge(left_hip_x, left_hip_y, left_hip_z),\n                 HyperEdge(left_elbow, left_shoulder1, left_shoulder2),\n                 HyperEdge(right_elbow, right_shoulder1, right_shoulder2),\n                 HyperEdge(left_knee, left_hip_x, left_hip_y, left_hip_z),\n                 HyperEdge(right_knee, right_hip_x, right_hip_y, right_hip_z),\n                 HyperEdge(left_shoulder1, left_shoulder2, abdomen_x, abdomen_y, abdomen_z),\n                 HyperEdge(right_shoulder1, right_shoulder2, abdomen_x, abdomen_y, abdomen_z),\n                 HyperEdge(abdomen_x, abdomen_y, abdomen_z, left_hip_x, left_hip_y, left_hip_z),\n                 HyperEdge(abdomen_x, abdomen_y, abdomen_z, right_hip_x, right_hip_y, right_hip_z),\n                 ]\n\n        globals = {}\n\n        if partitioning == \"9|8\": # 17 in total, so one action is a dummy (to be handled by pymarl)\n            # isolate upper and lower body\n            parts = [(left_shoulder1, left_shoulder2, abdomen_x, abdomen_y, abdomen_z,\n                      right_shoulder1, right_shoulder2,\n                      right_elbow, left_elbow),\n                     (left_hip_x, left_hip_y, left_hip_z,\n                      right_hip_x, right_hip_y, right_hip_z,\n                      right_knee, left_knee)]\n            # TODO: There could be tons of decompositions here\n        elif partitioning == \"17x1\": # 17 in total, so one action is a dummy (to be handled by pymarl)\n            # isolate upper and lower body\n            parts = [(left_shoulder1,), (left_shoulder2,), (abdomen_x,), (abdomen_y,), (abdomen_z,),\n                     (right_shoulder1,), (right_shoulder2,), (right_elbow,), (left_elbow,),\n                     (left_hip_x,), (left_hip_y,), (left_hip_z,), (right_hip_x,), (right_hip_y,), (right_hip_z,),\n                     (right_knee,), (left_knee,)]\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Reacher-v2\"]:\n\n        # define Mujoco-Graph\n        body0 = 1\n        body1 = 2\n        fingertip = 3\n        joint0 = Node(\"joint0\", -4, -4, 0,\n                      bodies=[body0, body1],\n                      extra_obs={\"qpos\":(lambda env:np.array([np.sin(env.sim.data.qpos[-4]),\n                                                              np.cos(env.sim.data.qpos[-4])]))})\n        joint1 = Node(\"joint1\", -3, -3, 1,\n                      bodies=[body1, fingertip],\n                      extra_obs={\"fingertip_dist\":(lambda env:env.get_body_com(\"fingertip\") - env.get_body_com(\"target\")),\n                                 \"qpos\":(lambda env:np.array([np.sin(env.sim.data.qpos[-3]),\n                                                              np.cos(env.sim.data.qpos[-3])]))})\n        edges = [HyperEdge(joint0, joint1)]\n\n        worldbody = 0\n        target = 4\n        target_x = Node(\"target_x\", -2, -2, -1, extra_obs={\"qvel\":(lambda env:np.array([]))})\n        target_y = Node(\"target_y\", -1, -1, -1, extra_obs={\"qvel\":(lambda env:np.array([]))})\n        globals = {\"bodies\":[worldbody, target],\n                   \"joints\":[target_x, target_y]}\n\n        if partitioning == \"2x1\":\n            # isolate upper and lower arms\n            parts = [(joint0,), (joint1,)]\n            # TODO: There could be tons of decompositions here\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Swimmer-v2\"]:\n\n        # define Mujoco-Graph\n        joint0 = Node(\"rot2\", -2, -2, 0) # TODO: double-check ids\n        joint1 = Node(\"rot3\", -1, -1, 1)\n\n        edges = [HyperEdge(joint0, joint1)]\n        globals = {}\n\n        if partitioning == \"2x1\":\n            # isolate upper and lower body\n            parts = [(joint0,), (joint1,)]\n            # TODO: There could be tons of decompositions here\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"Walker2d-v2\"]:\n\n        # define Mujoco-Graph\n        thigh_joint = Node(\"thigh_joint\", -6, -6, 0)\n        leg_joint = Node(\"leg_joint\", -5, -5, 1)\n        foot_joint = Node(\"foot_joint\", -4, -4, 2)\n        thigh_left_joint = Node(\"thigh_left_joint\", -3, -3, 3)\n        leg_left_joint = Node(\"leg_left_joint\", -2, -2, 4)\n        foot_left_joint = Node(\"foot_left_joint\", -1, -1, 5)\n\n        edges = [HyperEdge(foot_joint, leg_joint),\n                 HyperEdge(leg_joint, thigh_joint),\n                 HyperEdge(foot_left_joint, leg_left_joint),\n                 HyperEdge(leg_left_joint, thigh_left_joint),\n                 HyperEdge(thigh_joint, thigh_left_joint)\n                 ]\n        globals = {}\n\n        if partitioning == \"2x3\":\n            # isolate upper and lower body\n            parts = [(foot_joint, leg_joint, thigh_joint),\n                     (foot_left_joint, leg_left_joint, thigh_left_joint,)]\n            # TODO: There could be tons of decompositions here\n\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"coupled_half_cheetah\"]:\n\n        # define Mujoco graph\n        tendon = 0\n\n        bthigh = Node(\"bthigh\", -6, -6, 0,\n                     tendons=[tendon],\n                     extra_obs = {\"ten_J\": lambda env: env.sim.data.ten_J[tendon],\n                                  \"ten_length\": lambda env: env.sim.data.ten_length,\n                                  \"ten_velocity\": lambda env: env.sim.data.ten_velocity})\n        bshin = Node(\"bshin\", -5, -5, 1)\n        bfoot = Node(\"bfoot\", -4, -4, 2)\n        fthigh = Node(\"fthigh\", -3, -3, 3)\n        fshin = Node(\"fshin\", -2, -2, 4)\n        ffoot = Node(\"ffoot\", -1, -1, 5)\n\n        bthigh2 = Node(\"bthigh2\", -6, -6, 0,\n                      tendons=[tendon],\n                      extra_obs={\"ten_J\": lambda env: env.sim.data.ten_J[tendon],\n                                 \"ten_length\": lambda env: env.sim.data.ten_length,\n                                 \"ten_velocity\": lambda env: env.sim.data.ten_velocity})\n        bshin2 = Node(\"bshin2\", -5, -5, 1)\n        bfoot2 = Node(\"bfoot2\", -4, -4, 2)\n        fthigh2 = Node(\"fthigh2\", -3, -3, 3)\n        fshin2 = Node(\"fshin2\", -2, -2, 4)\n        ffoot2 = Node(\"ffoot2\", -1, -1, 5)\n\n\n        edges = [HyperEdge(bfoot, bshin),\n                 HyperEdge(bshin, bthigh),\n                 HyperEdge(bthigh, fthigh),\n                 HyperEdge(fthigh, fshin),\n                 HyperEdge(fshin, ffoot),\n                 HyperEdge(bfoot2, bshin2),\n                 HyperEdge(bshin2, bthigh2),\n                 HyperEdge(bthigh2, fthigh2),\n                 HyperEdge(fthigh2, fshin2),\n                 HyperEdge(fshin2, ffoot2)\n                 ]\n        globals = {}\n\n        root_x = Node(\"root_x\", 0, 0, -1,\n                      extra_obs={\"qpos\": lambda env: np.array([])})\n        root_z = Node(\"root_z\", 1, 1, -1)\n        root_y = Node(\"root_y\", 2, 2, -1)\n        globals = {\"joints\":[root_x, root_y, root_z]}\n\n        if partitioning == \"1p1\":\n            parts = [(bfoot, bshin, bthigh, ffoot, fshin, fthigh),\n                     (bfoot2, bshin2, bthigh2, ffoot2, fshin2, fthigh2)\n                     ]\n        else:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        return parts, edges, globals\n\n    elif label in [\"manyagent_swimmer\"]:\n\n        # Generate asset file\n        try:\n            n_agents = int(partitioning.split(\"x\")[0])\n            n_segs_per_agents = int(partitioning.split(\"x\")[1])\n            n_segs = n_agents * n_segs_per_agents\n        except Exception as e:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n        # Note: Default Swimmer corresponds to n_segs = 3\n\n        # define Mujoco-Graph\n        joints = [Node(\"rot{:d}\".format(i), -n_segs + i, -n_segs + i, i) for i in range(0, n_segs)]\n        edges = [HyperEdge(joints[i], joints[i+1]) for i in range(n_segs-1)]\n        globals = {}\n\n        parts = [tuple(joints[i * n_segs_per_agents:(i + 1) * n_segs_per_agents]) for i in range(n_agents)]\n        return parts, edges, globals\n\n    elif label in [\"manyagent_ant\"]: # TODO: FIX!\n\n        # Generate asset file\n        try:\n            n_agents = int(partitioning.split(\"x\")[0])\n            n_segs_per_agents = int(partitioning.split(\"x\")[1])\n            n_segs = n_agents * n_segs_per_agents\n        except Exception as e:\n            raise Exception(\"UNKNOWN partitioning config: {}\".format(partitioning))\n\n\n        # # define Mujoco graph\n        # torso = 1\n        # front_left_leg = 2\n        # aux_1 = 3\n        # ankle_1 = 4\n        # right_back_leg = 11\n        # aux_4 = 12\n        # ankle_4 = 13\n        #\n        # off = -4*(n_segs-1)\n        # hip1 = Node(\"hip1\", -4-off, -4-off, 2, bodies=[torso, front_left_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist()) #\n        # ankle1 = Node(\"ankle1\", -3-off, -3-off, 3, bodies=[front_left_leg, aux_1, ankle_1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        # hip4 = Node(\"hip4\", -2-off, -2-off, 0, bodies=[torso, right_back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        # ankle4 = Node(\"ankle4\", -1-off, -1-off, 1, bodies=[right_back_leg, aux_4, ankle_4], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())#,\n        #\n        # edges = [HyperEdge(ankle4, hip4),\n        #          HyperEdge(ankle1, hip1),\n        #          HyperEdge(hip4, hip1),\n        #          ]\n\n        edges = []\n        joints = []\n        for si in range(n_segs):\n\n            torso = 1 + si*7\n            front_right_leg = 2 + si*7\n            aux1 = 3 + si*7\n            ankle1 = 4 + si*7\n            back_leg = 5 + si*7\n            aux2 = 6 + si*7\n            ankle2 = 7 + si*7\n\n            off = -4 * (n_segs - 1 - si)\n            hip1n = Node(\"hip1_{:d}\".format(si), -4-off, -4-off, 2+4*si, bodies=[torso, front_right_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())\n            ankle1n = Node(\"ankle1_{:d}\".format(si), -3-off, -3-off, 3+4*si, bodies=[front_right_leg, aux1, ankle1], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())\n            hip2n = Node(\"hip2_{:d}\".format(si), -2-off, -2-off, 0+4*si, bodies=[torso, back_leg], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())\n            ankle2n = Node(\"ankle2_{:d}\".format(si), -1-off, -1-off, 1+4*si, bodies=[back_leg, aux2, ankle2], body_fn=lambda _id, x:np.clip(x, -1, 1).tolist())\n\n            edges += [HyperEdge(ankle1n, hip1n),\n                      HyperEdge(ankle2n, hip2n),\n                      HyperEdge(hip1n, hip2n)]\n            if si:\n                edges += [HyperEdge(hip1m, hip2m, hip1n, hip2n)]\n\n            hip1m = deepcopy(hip1n)\n            hip2m = deepcopy(hip2n)\n            joints.append([hip1n,\n                           ankle1n,\n                           hip2n,\n                           ankle2n])\n\n        free_joint = Node(\"free\", 0, 0, -1, extra_obs={\"qpos\": lambda env: env.sim.data.qpos[:7],\n                                                       \"qvel\": lambda env: env.sim.data.qvel[:6],\n                                                       \"cfrc_ext\": lambda env: np.clip(env.sim.data.cfrc_ext[0:1], -1, 1)})\n        globals = {\"joints\": [free_joint]}\n\n        parts =  [[x for sublist in joints[i * n_segs_per_agents:(i + 1) * n_segs_per_agents] for x in sublist] for i in range(n_agents)]\n\n        return parts, edges, globals\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/envs/safety_ma_mujoco/test.py",
    "content": "from safety_multiagent_mujoco.mujoco_multi import MujocoMulti\nimport numpy as np\nimport time\n\n\ndef main():\n\n    # Swimmer\n    # env_args = {\"scenario\": \"manyagent_swimmer\",\n    #             \"agent_conf\": \"10x2\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # coupled_half_cheetah\n    # env_args = {\"scenario\": \"coupled_half_cheetah\",\n    #             \"agent_conf\": \"1p1\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # ANT 4\n    # env_args = {\"scenario\": \"manyagent_ant\",\n    #               \"agent_conf\": \"3x2\",\n    #               \"agent_obsk\": 1,\n    #               \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"manyagent_ant\",\n    #               \"agent_conf\": \"1x1\",\n    #               \"agent_obsk\": 1,\n    #               \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"manyagent_swimmer\",\n    #             \"agent_conf\": \"10x2\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"manyagent_swimmer\",\n    #             \"agent_conf\": \"4x2\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"HalfCheetah-v2\",\n    #             \"agent_conf\": \"2x3\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Hopper-v2\",\n    #             \"agent_conf\": \"3x1\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    env_args = {\"scenario\": \"Humanoid-v2\",\n                \"agent_conf\": \"9|8\",\n                \"agent_obsk\": 1,\n                \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Humanoid-v2\",\n    #             \"agent_conf\": \"17x1\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Ant-v2\",\n    #             \"agent_conf\": \"2x4\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Ant-v2\",\n    #             \"agent_conf\": \"2x4d\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    # env_args = {\"scenario\": \"Ant-v2\",\n    #             \"agent_conf\": \"4x2\",\n    #             \"agent_obsk\": 1,\n    #             \"episode_limit\": 1000}\n\n    env = MujocoMulti(env_args=env_args)\n    env_info = env.get_env_info()\n    n_actions = env_info[\"n_actions\"]\n    n_agents = env_info[\"n_agents\"]\n    n_episodes = 10\n\n    for e in range(n_episodes):\n        ob=env.reset()\n        terminated = False\n        episode_reward = 0\n\n        while not terminated:\n            obs = env.get_obs()\n            state = env.get_state()\n\n            actions = []\n            for agent_id in range(n_agents):\n                avail_actions = env.get_avail_agent_actions(agent_id)\n                avail_actions_ind = np.nonzero(avail_actions)[0]\n                action = np.random.uniform(-10, 10.0, n_actions)\n                actions.append(action)\n\n            # reward, terminated, _ = env.step(actions)\n            # print(\"env.step(actions): \", env.step(actions))\n            get_obs, get_state, reward, dones, infos, get_avail_actions= env.step(actions)\n            # episode_reward += reward\n            # print(\"reward: \", reward)\n            cost_x= [[item['cost']] for item in infos]\n            print(\"cost_x:\", cost_x)\n            print(\"reward:\", reward)\n\n            # time.sleep(0.1)\n            env.render()\n\n\n        # print(\"Total reward in episode {} = {}\".format(e, episode_reward))\n\n    env.close()\n\nif __name__ == \"__main__\":\n    main()\n    \"\"\"\n    infos[cost]: [{'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,\n                   'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0},\n                  {'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,\n                   'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0},\n                  {'cost': 0.0, 'reward_forward': -0.6434413402233052, 'reward_ctrl': -4.010836585120964,\n                   'reward_contact': -1.2071856383999997e-13, 'reward_survive': 1.0, 'cost_obj': 0.0, 'cost_done': 0.0}]\n    \"\"\""
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/runner/__init__.py",
    "content": "from mappo_lagrangian.runner import separated\n\n__all__=[\n    \"separated\"\n]"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/runner/separated/__init__.py",
    "content": "from mappo_lagrangian.runner.separated import base_runner\n\n__all__=[\n    \"base_runner\"\n]"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/runner/separated/base_runner.py",
    "content": "    \nimport time\nimport wandb\nimport os\nimport numpy as np\nfrom itertools import chain\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom mappo_lagrangian.utils.separated_buffer import SeparatedReplayBuffer\nfrom mappo_lagrangian.utils.util import update_linear_schedule\n\ndef _t2n(x):\n    return x.detach().cpu().numpy()\n\nclass Runner(object):\n    def __init__(self, config):\n\n        self.all_args = config['all_args']\n        self.envs = config['envs']\n        self.eval_envs = config['eval_envs']\n        self.device = config['device']\n        self.num_agents = config['num_agents']\n\n        # parameters\n        self.env_name = self.all_args.env_name\n        self.algorithm_name = self.all_args.algorithm_name\n        self.experiment_name = self.all_args.experiment_name\n        self.use_centralized_V = self.all_args.use_centralized_V\n        self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state\n        self.num_env_steps = self.all_args.num_env_steps\n        self.episode_length = self.all_args.episode_length\n        self.n_rollout_threads = self.all_args.n_rollout_threads\n        self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads\n        self.use_linear_lr_decay = self.all_args.use_linear_lr_decay\n        self.hidden_size = self.all_args.hidden_size\n        self.use_wandb = self.all_args.use_wandb\n        self.use_render = self.all_args.use_render\n        self.recurrent_N = self.all_args.recurrent_N\n        self.use_single_network = self.all_args.use_single_network\n        # interval\n        self.save_interval = self.all_args.save_interval\n        self.use_eval = self.all_args.use_eval\n        self.eval_interval = self.all_args.eval_interval\n        self.log_interval = self.all_args.log_interval\n\n        # dir\n        self.model_dir = self.all_args.model_dir\n\n        if self.use_render:\n            import imageio\n            self.run_dir = config[\"run_dir\"]\n            self.gif_dir = str(self.run_dir / 'gifs')\n            if not os.path.exists(self.gif_dir):\n                os.makedirs(self.gif_dir)\n        else:\n            if self.use_wandb:\n                self.save_dir = str(wandb.run.dir)\n            else:\n                self.run_dir = config[\"run_dir\"]\n                self.log_dir = str(self.run_dir / 'logs')\n                if not os.path.exists(self.log_dir):\n                    os.makedirs(self.log_dir)\n                self.writter = SummaryWriter(self.log_dir)\n                self.save_dir = str(self.run_dir / 'models')\n                if not os.path.exists(self.save_dir):\n                    os.makedirs(self.save_dir)\n\n\n        from mappo_lagrangian.algorithms.r_mappo.r_mappo import R_MAPPO as TrainAlgo\n        from mappo_lagrangian.algorithms.r_mappo.algorithm.rMAPPOPolicy import R_MAPPOPolicy as Policy\n\n        print(\"share_observation_space: \", self.envs.share_observation_space)\n        print(\"observation_space: \", self.envs.observation_space)\n        print(\"action_space: \", self.envs.action_space)\n\n        self.policy = []\n        for agent_id in range(self.num_agents):\n            share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id]\n            # policy network\n            po = Policy(self.all_args,\n                        self.envs.observation_space[agent_id],\n                        share_observation_space,\n                        self.envs.action_space[agent_id],\n                        device = self.device)\n            self.policy.append(po)\n\n        if self.model_dir is not None:\n            self.restore()\n\n        self.trainer = []\n        self.buffer = []\n        for agent_id in range(self.num_agents):\n            # algorithm\n            tr = TrainAlgo(self.all_args, self.policy[agent_id], device = self.device)\n            # buffer\n            share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else self.envs.observation_space[agent_id]\n            bu = SeparatedReplayBuffer(self.all_args,\n                                       self.envs.observation_space[agent_id],\n                                       share_observation_space,\n                                       self.envs.action_space[agent_id])\n            self.buffer.append(bu)\n            self.trainer.append(tr)\n            \n    def run(self):\n        raise NotImplementedError\n\n    def warmup(self):\n        raise NotImplementedError\n\n    def collect(self, step):\n        raise NotImplementedError\n\n    def insert(self, data):\n        raise NotImplementedError\n    \n    @torch.no_grad()\n    def compute(self):\n        for agent_id in range(self.num_agents):\n            self.trainer[agent_id].prep_rollout()\n            next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1], \n                                                                self.buffer[agent_id].rnn_states_critic[-1],\n                                                                self.buffer[agent_id].masks[-1])\n            next_value = _t2n(next_value)\n            self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer)\n\n    def train(self):\n        # have modified for SAD_PPO\n        train_infos = []\n        \n        for agent_id in torch.randperm(self.num_agents):\n            self.trainer[agent_id].prep_training()\n            train_info = self.trainer[agent_id].train(self.buffer[agent_id])\n            train_infos.append(train_info)       \n            self.buffer[agent_id].after_update()\n\n        return train_infos\n\n    def save(self):\n        for agent_id in range(self.num_agents):\n            if self.use_single_network:\n                policy_model = self.trainer[agent_id].policy.model\n                torch.save(policy_model.state_dict(), str(self.save_dir) + \"/model_agent\" + str(agent_id) + \".pt\")\n            else:\n                policy_actor = self.trainer[agent_id].policy.actor\n                torch.save(policy_actor.state_dict(), str(self.save_dir) + \"/actor_agent\" + str(agent_id) + \".pt\")\n                policy_critic = self.trainer[agent_id].policy.critic\n                torch.save(policy_critic.state_dict(), str(self.save_dir) + \"/critic_agent\" + str(agent_id) + \".pt\")\n\n    def restore(self):\n        for agent_id in range(self.num_agents):\n            if self.use_single_network:\n                policy_model_state_dict = torch.load(str(self.model_dir) + '/model_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].model.load_state_dict(policy_model_state_dict)\n            else:\n                policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict)\n                policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict)\n\n    def log_train(self, train_infos, total_num_steps): \n        for agent_id in range(self.num_agents):\n            for k, v in train_infos[agent_id].items():\n                agent_k = \"agent%i/\" % agent_id + k\n                if self.use_wandb:\n                    wandb.log({agent_k: v}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)\n\n    def log_env(self, env_infos, total_num_steps):\n        for k, v in env_infos.items():\n            if len(v) > 0:\n                if self.use_wandb:\n                    wandb.log({k: np.mean(v)}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps)\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/runner/separated/base_runner_mappo_lagr.py",
    "content": "import copy\nimport time\nimport wandb\nimport os\nimport numpy as np\nfrom itertools import chain\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom mappo_lagrangian.utils.separated_buffer import SeparatedReplayBuffer\nfrom mappo_lagrangian.utils.util import update_linear_schedule\n\n\ndef _t2n(x):\n    return x.detach().cpu().numpy()\n\n\nclass Runner(object):\n    def __init__(self, config):\n\n        self.all_args = config['all_args']\n        self.envs = config['envs']\n        self.eval_envs = config['eval_envs']\n        self.device = config['device']\n        self.num_agents = config['num_agents']\n\n        # parameters\n        self.env_name = self.all_args.env_name\n        self.algorithm_name = self.all_args.algorithm_name\n        self.experiment_name = self.all_args.experiment_name\n        self.use_centralized_V = self.all_args.use_centralized_V\n        self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state\n        self.num_env_steps = self.all_args.num_env_steps\n        self.episode_length = self.all_args.episode_length\n        self.n_rollout_threads = self.all_args.n_rollout_threads\n        self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads\n        self.use_linear_lr_decay = self.all_args.use_linear_lr_decay\n        self.hidden_size = self.all_args.hidden_size\n        self.use_wandb = self.all_args.use_wandb\n        self.use_render = self.all_args.use_render\n        self.recurrent_N = self.all_args.recurrent_N\n        self.use_single_network = self.all_args.use_single_network\n        # interval\n        self.save_interval = self.all_args.save_interval\n        self.use_eval = self.all_args.use_eval\n        self.eval_interval = self.all_args.eval_interval\n        self.log_interval = self.all_args.log_interval\n        self.gamma = self.all_args.gamma\n        self.use_popart = self.all_args.use_popart\n\n        self.safety_bound = self.all_args.safety_bound\n\n        # dir\n        self.model_dir = self.all_args.model_dir\n\n        if self.use_render:\n            import imageio\n            self.run_dir = config[\"run_dir\"]\n            self.gif_dir = str(self.run_dir / 'gifs')\n            if not os.path.exists(self.gif_dir):\n                os.makedirs(self.gif_dir)\n        else:\n            if self.use_wandb:\n                self.save_dir = str(wandb.run.dir)\n            else:\n                self.run_dir = config[\"run_dir\"]\n                self.log_dir = str(self.run_dir / 'logs')\n                if not os.path.exists(self.log_dir):\n                    os.makedirs(self.log_dir)\n                self.writter = SummaryWriter(self.log_dir)\n                self.save_dir = str(self.run_dir / 'models')\n                if not os.path.exists(self.save_dir):\n                    os.makedirs(self.save_dir)\n\n        from mappo_lagrangian.algorithms.r_mappo.r_mappo_lagr import R_MAPPO_Lagr as TrainAlgo\n        from mappo_lagrangian.algorithms.r_mappo.algorithm.MACPPOPolicy import MACPPOPolicy as Policy\n\n        print(\"share_observation_space: \", self.envs.share_observation_space)\n        print(\"observation_space: \", self.envs.observation_space)\n        print(\"action_space: \", self.envs.action_space)\n\n        self.policy = []\n        for agent_id in range(self.num_agents):\n            share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else \\\n            self.envs.observation_space[agent_id]\n            # policy network\n            po = Policy(self.all_args,\n                        self.envs.observation_space[agent_id],\n                        share_observation_space,\n                        self.envs.action_space[agent_id],\n                        device=self.device)\n            self.policy.append(po)\n\n        if self.model_dir is not None:\n            self.restore()\n\n        self.trainer = []\n        self.buffer = []\n        # todo: revise this for trpo\n        for agent_id in range(self.num_agents):\n            # algorithm\n            tr = TrainAlgo(self.all_args, self.policy[agent_id], device=self.device)\n            # buffer\n            share_observation_space = self.envs.share_observation_space[agent_id] if self.use_centralized_V else \\\n            self.envs.observation_space[agent_id]\n            bu = SeparatedReplayBuffer(self.all_args,\n                                       self.envs.observation_space[agent_id],\n                                       share_observation_space,\n                                       self.envs.action_space[agent_id])\n            self.buffer.append(bu)\n            self.trainer.append(tr)\n\n    def run(self):\n        raise NotImplementedError\n\n    def warmup(self):\n        raise NotImplementedError\n\n    def collect(self, step):\n        raise NotImplementedError\n\n    def insert(self, data):\n        raise NotImplementedError\n\n    @torch.no_grad()\n    def compute(self):\n        for agent_id in range(self.num_agents):\n            self.trainer[agent_id].prep_rollout()\n            next_value = self.trainer[agent_id].policy.get_values(self.buffer[agent_id].share_obs[-1],\n                                                                  self.buffer[agent_id].rnn_states_critic[-1],\n                                                                  self.buffer[agent_id].masks[-1])\n            next_value = _t2n(next_value)\n            self.buffer[agent_id].compute_returns(next_value, self.trainer[agent_id].value_normalizer)\n\n            next_costs = self.trainer[agent_id].policy.get_cost_values(self.buffer[agent_id].share_obs[-1],\n                                                                       self.buffer[agent_id].rnn_states_cost[-1],\n                                                                       self.buffer[agent_id].masks[-1])\n            next_costs = _t2n(next_costs)\n            self.buffer[agent_id].compute_cost_returns(next_costs, self.trainer[agent_id].value_normalizer)\n\n    def train(self):\n        # have modified for SAD_PPO\n        train_infos = []\n        cost_train_infos = []\n        # random update order\n        action_dim = self.buffer[0].actions.shape[-1]\n        factor = np.ones((self.episode_length, self.n_rollout_threads, action_dim), dtype=np.float32)\n        for agent_id in torch.randperm(self.num_agents):\n            self.trainer[agent_id].prep_training()\n            self.buffer[agent_id].update_factor(factor)\n            available_actions = None if self.buffer[agent_id].available_actions is None \\\n                else self.buffer[agent_id].available_actions[:-1].reshape(-1, *self.buffer[\n                                                                                   agent_id].available_actions.shape[\n                                                                               2:])\n            old_actions_logprob, _ = self.trainer[agent_id].policy.actor.evaluate_actions(\n                self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]),\n                self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]),\n                self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]),\n                self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]),\n                available_actions,\n                self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:]))\n\n            # safe_buffer, cost_adv = self.buffer_filter(agent_id)\n            # train_info = self.trainer[agent_id].train(safe_buffer, cost_adv)\n\n            train_info = self.trainer[agent_id].train(self.buffer[agent_id])\n\n            new_actions_logprob, _ = self.trainer[agent_id].policy.actor.evaluate_actions(\n                self.buffer[agent_id].obs[:-1].reshape(-1, *self.buffer[agent_id].obs.shape[2:]),\n                self.buffer[agent_id].rnn_states[0:1].reshape(-1, *self.buffer[agent_id].rnn_states.shape[2:]),\n                self.buffer[agent_id].actions.reshape(-1, *self.buffer[agent_id].actions.shape[2:]),\n                self.buffer[agent_id].masks[:-1].reshape(-1, *self.buffer[agent_id].masks.shape[2:]),\n                available_actions,\n                self.buffer[agent_id].active_masks[:-1].reshape(-1, *self.buffer[agent_id].active_masks.shape[2:]))\n            factor = factor * _t2n(torch.exp(new_actions_logprob - old_actions_logprob).reshape(self.episode_length,\n                                                                                                self.n_rollout_threads,\n                                                                                                action_dim))\n            train_infos.append(train_info)\n\n            self.buffer[agent_id].after_update()\n\n        return train_infos, cost_train_infos\n\n    # episode length of envs is exactly equal to buffer size, that is, num_thread = num_episode\n    def buffer_filter(self, agent_id):\n        episode_length = len(self.buffer[0].rewards)\n        # J constraints for all agents, just a toy example\n        J = np.zeros((self.n_rollout_threads, 1), dtype=np.float32)\n        for t in reversed(range(episode_length)):\n            J = self.buffer[agent_id].costs[t] + self.gamma * J\n\n        factor = self.buffer[agent_id].factor\n\n        if self.use_popart:\n            cost_adv = self.buffer[agent_id].cost_returns[:-1] - \\\n                       self.trainer[agent_id].value_normalizer.denormalize(self.buffer[agent_id].cost_preds[:-1])\n        else:\n            cost_adv = self.buffer[agent_id].cost_returns[:-1] - self.buffer[agent_id].cost_preds[:-1]\n\n        expectation = np.mean(factor * cost_adv, axis=(0, 2))\n\n        constraints_value = J + np.expand_dims(expectation, -1)\n\n        del_id = []\n        print(\"===================================================\")\n        print(\"safety_bound: \", self.safety_bound)\n        for i in range(self.n_rollout_threads):\n            if constraints_value[i][0] > self.safety_bound:\n                del_id.append(i)\n\n        buffer_filterd = self.remove_episodes(agent_id, del_id)\n        return buffer_filterd, cost_adv\n\n    def remove_episodes(self, agent_id, del_ids):\n        buffer = copy.deepcopy(self.buffer[agent_id])\n        buffer.share_obs = (buffer.share_obs, del_ids, 1)\n        buffer.obs = (buffer.obs, del_ids, 1)\n        buffer.rnn_states = (buffer.rnn_states, del_ids, 1)\n        buffer.rnn_states_critic = (buffer.rnn_states_critic, del_ids, 1)\n        buffer.rnn_states_cost = (buffer.rnn_states_cost, del_ids, 1)\n        buffer.value_preds = (buffer.value_preds, del_ids, 1)\n        buffer.returns = (buffer.returns, del_ids, 1)\n        if buffer.available_actions is not None:\n            buffer.available_actions = (buffer.available_actions, del_ids, 1)\n        buffer.actions = (buffer.actions, del_ids, 1)\n        buffer.action_log_probs = (buffer.action_log_probs, del_ids, 1)\n        buffer.rewards = (buffer.rewards, del_ids, 1)\n        # todo: cost should be calculated entirely\n        buffer.costs = (buffer.costs, del_ids, 1)\n        buffer.cost_preds = (buffer.cost_preds, del_ids, 1)\n        buffer.cost_returns = (buffer.cost_returns, del_ids, 1)\n        buffer.masks = (buffer.masks, del_ids, 1)\n        buffer.bad_masks = (buffer.bad_masks, del_ids, 1)\n        buffer.active_masks = (buffer.active_masks, del_ids, 1)\n        if buffer.factor is not None:\n            buffer.factor = (buffer.factor, del_ids, 1)\n        return buffer\n\n   \n\n    def save(self):\n        for agent_id in range(self.num_agents):\n            if self.use_single_network:\n                policy_model = self.trainer[agent_id].policy.model\n                torch.save(policy_model.state_dict(), str(self.save_dir) + \"/model_agent\" + str(agent_id) + \".pt\")\n            else:\n                policy_actor = self.trainer[agent_id].policy.actor\n                torch.save(policy_actor.state_dict(), str(self.save_dir) + \"/actor_agent\" + str(agent_id) + \".pt\")\n                policy_critic = self.trainer[agent_id].policy.critic\n                torch.save(policy_critic.state_dict(), str(self.save_dir) + \"/critic_agent\" + str(agent_id) + \".pt\")\n\n    def restore(self):\n        for agent_id in range(self.num_agents):\n            if self.use_single_network:\n                policy_model_state_dict = torch.load(str(self.model_dir) + '/model_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].model.load_state_dict(policy_model_state_dict)\n            else:\n                policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].actor.load_state_dict(policy_actor_state_dict)\n                policy_critic_state_dict = torch.load(str(self.model_dir) + '/critic_agent' + str(agent_id) + '.pt')\n                self.policy[agent_id].critic.load_state_dict(policy_critic_state_dict)\n\n    def log_train(self, train_infos, total_num_steps):\n        for agent_id in range(self.num_agents):\n            for k, v in train_infos[agent_id].items():\n                agent_k = \"agent%i/\" % agent_id + k\n                if self.use_wandb:\n                    wandb.log({agent_k: v}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)\n\n    def log_env(self, env_infos, total_num_steps):\n        for k, v in env_infos.items():\n            if len(v) > 0:\n                if self.use_wandb:\n                    wandb.log({k: np.mean(v)}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(k, {k: np.mean(v)}, total_num_steps)\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/runner/separated/mujoco_runner.py",
    "content": "import time\nimport wandb\nimport numpy as np\nfrom functools import reduce\nimport torch\nfrom mappo_lagrangian.runner.separated.base_runner import Runner\n\n\ndef _t2n(x):\n    return x.detach().cpu().numpy()\n\n\nclass MujocoRunner(Runner):\n    \"\"\"Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details.\"\"\"\n\n    def __init__(self, config):\n        super(MujocoRunner, self).__init__(config)\n\n    def run(self):\n        self.warmup()\n\n        start = time.time()\n        episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads\n\n        train_episode_rewards = [0 for _ in range(self.n_rollout_threads)]\n\n        for episode in range(episodes):\n            if self.use_linear_lr_decay:\n                self.trainer.policy.lr_decay(episode, episodes)\n\n            done_episodes_rewards = []\n\n            for step in range(self.episode_length):\n                # Sample actions\n                values, actions, action_log_probs, rnn_states, rnn_states_critic = self.collect(step)\n\n                # Obser reward and next obs\n                obs, share_obs, rewards, dones, infos, _ = self.envs.step(actions)\n\n                dones_env = np.all(dones, axis=1)\n                reward_env = np.mean(rewards, axis=1).flatten()\n                train_episode_rewards += reward_env\n                for t in range(self.n_rollout_threads):\n                    if dones_env[t]:\n                        done_episodes_rewards.append(train_episode_rewards[t])\n                        train_episode_rewards[t] = 0\n\n                data = obs, share_obs, rewards, dones, infos, \\\n                       values, actions, action_log_probs, \\\n                       rnn_states, rnn_states_critic\n\n                # insert data into buffer\n                self.insert(data)\n\n            # compute return and update network\n            self.compute()\n            train_infos = self.train()\n\n            # post process\n            total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads\n            # save model\n            if (episode % self.save_interval == 0 or episode == episodes - 1):\n                self.save()\n\n            # log information\n            if episode % self.log_interval == 0:\n                end = time.time()\n                print(\"\\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\\n\"\n                      .format(self.all_args.scenario,\n                              self.algorithm_name,\n                              self.experiment_name,\n                              episode,\n                              episodes,\n                              total_num_steps,\n                              self.num_env_steps,\n                              int(total_num_steps / (end - start))))\n\n                self.log_train(train_infos, total_num_steps)\n\n                if len(done_episodes_rewards) > 0:\n                    aver_episode_rewards = np.mean(done_episodes_rewards)\n                    print(\"some episodes done, average rewards: \", aver_episode_rewards)\n                    self.writter.add_scalars(\"train_episode_rewards\", {\"aver_rewards\": aver_episode_rewards},\n                                             total_num_steps)\n\n            # eval\n            if episode % self.eval_interval == 0 and self.use_eval:\n                self.eval(total_num_steps)\n\n    def warmup(self):\n        # reset env\n        obs, share_obs, _ = self.envs.reset()\n        # replay buffer\n        if not self.use_centralized_V:\n            share_obs = obs\n\n        for agent_id in range(self.num_agents):\n            self.buffer[agent_id].share_obs[0] = share_obs[:, agent_id].copy()\n            self.buffer[agent_id].obs[0] = obs[:, agent_id].copy()\n\n    @torch.no_grad()\n    def collect(self, step):\n        value_collector = []\n        action_collector = []\n        action_log_prob_collector = []\n        rnn_state_collector = []\n        rnn_state_critic_collector = []\n        for agent_id in range(self.num_agents):\n            self.trainer[agent_id].prep_rollout()\n            value, action, action_log_prob, rnn_state, rnn_state_critic \\\n                = self.trainer[agent_id].policy.get_actions(self.buffer[agent_id].share_obs[step],\n                                                            self.buffer[agent_id].obs[step],\n                                                            self.buffer[agent_id].rnn_states[step],\n                                                            self.buffer[agent_id].rnn_states_critic[step],\n                                                            self.buffer[agent_id].masks[step])\n            value_collector.append(_t2n(value))\n            action_collector.append(_t2n(action))\n            action_log_prob_collector.append(_t2n(action_log_prob))\n            rnn_state_collector.append(_t2n(rnn_state))\n            rnn_state_critic_collector.append(_t2n(rnn_state_critic))\n        # [self.envs, agents, dim]\n        values = np.array(value_collector).transpose(1, 0, 2)\n        actions = np.array(action_collector).transpose(1, 0, 2)\n        action_log_probs = np.array(action_log_prob_collector).transpose(1, 0, 2)\n        rnn_states = np.array(rnn_state_collector).transpose(1, 0, 2, 3)\n        rnn_states_critic = np.array(rnn_state_critic_collector).transpose(1, 0, 2, 3)\n\n        return values, actions, action_log_probs, rnn_states, rnn_states_critic\n\n    def insert(self, data):\n        obs, share_obs, rewards, dones, infos, \\\n        values, actions, action_log_probs, rnn_states, rnn_states_critic = data\n\n        dones_env = np.all(dones, axis=1)\n\n        rnn_states[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)\n        rnn_states_critic[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_critic.shape[2:]), dtype=np.float32)\n\n        masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n        masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)\n\n        active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n        active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32)\n        active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)\n\n        if not self.use_centralized_V:\n            share_obs = obs\n\n        for agent_id in range(self.num_agents):\n            self.buffer[agent_id].insert(share_obs[:, agent_id], obs[:, agent_id], rnn_states[:, agent_id],\n                                         rnn_states_critic[:, agent_id], actions[:, agent_id],\n                                         action_log_probs[:, agent_id],\n                                         values[:, agent_id], rewards[:, agent_id], masks[:, agent_id], None,\n                                         active_masks[:, agent_id], None)\n\n    def log_train(self, train_infos, total_num_steps):\n        print(\"average_step_rewards is {}.\".format(np.mean(self.buffer[0].rewards)))\n        for agent_id in range(self.num_agents):\n            train_infos[agent_id][\"average_step_rewards\"] = np.mean(self.buffer[agent_id].rewards)\n            for k, v in train_infos[agent_id].items():\n                agent_k = \"agent%i/\" % agent_id + k\n                if self.use_wandb:\n                    wandb.log({agent_k: v}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)\n\n    @torch.no_grad()\n    def eval(self, total_num_steps):\n        eval_episode = 0\n        eval_episode_rewards = []\n        one_episode_rewards = []\n        for eval_i in range(self.n_eval_rollout_threads):\n            one_episode_rewards.append([])\n            eval_episode_rewards.append([])\n\n        eval_obs, eval_share_obs, _ = self.eval_envs.reset()\n\n        eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size),\n                                   dtype=np.float32)\n        eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n\n        while True:\n            eval_actions_collector = []\n            eval_rnn_states_collector = []\n            for agent_id in range(self.num_agents):\n                self.trainer[agent_id].prep_rollout()\n                eval_actions, temp_rnn_state = \\\n                    self.trainer[agent_id].policy.act(eval_obs[:, agent_id],\n                                                      eval_rnn_states[:, agent_id],\n                                                      eval_masks[:, agent_id],\n                                                      deterministic=True)\n                eval_rnn_states[:, agent_id] = _t2n(temp_rnn_state)\n                eval_actions_collector.append(_t2n(eval_actions))\n\n            eval_actions = np.array(eval_actions_collector).transpose(1, 0, 2)\n\n            # Obser reward and next obs\n            eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, _ = self.eval_envs.step(\n                eval_actions)\n            for eval_i in range(self.n_eval_rollout_threads):\n                one_episode_rewards[eval_i].append(eval_rewards[eval_i])\n\n            eval_dones_env = np.all(eval_dones, axis=1)\n\n            eval_rnn_states[eval_dones_env == True] = np.zeros(\n                ((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)\n\n            eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n            eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1),\n                                                          dtype=np.float32)\n\n            for eval_i in range(self.n_eval_rollout_threads):\n                if eval_dones_env[eval_i]:\n                    eval_episode += 1\n                    eval_episode_rewards[eval_i].append(np.sum(one_episode_rewards[eval_i], axis=0))\n                    one_episode_rewards[eval_i] = []\n\n            if eval_episode >= self.all_args.eval_episodes:\n                eval_episode_rewards = np.concatenate(eval_episode_rewards)\n                eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards,\n                                  'eval_max_episode_rewards': [np.max(eval_episode_rewards)]}\n                self.log_env(eval_env_infos, total_num_steps)\n                print(\"eval_average_episode_rewards is {}.\".format(np.mean(eval_episode_rewards)))\n                break\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/runner/separated/mujoco_runner_mappo_lagr.py",
    "content": "import time\nfrom itertools import chain\n\nimport wandb\nimport numpy as np\nfrom functools import reduce\nimport torch\nfrom mappo_lagrangian.runner.separated.base_runner_mappo_lagr import Runner\n\n\ndef _t2n(x):\n    return x.detach().cpu().numpy()\n\n\nclass MujocoRunner(Runner):\n    \"\"\"Runner class to perform training, evaluation. and data collection for SMAC. See parent class for details.\"\"\"\n\n    def __init__(self, config):\n        super(MujocoRunner, self).__init__(config)\n\n    def run(self):\n        self.warmup()\n\n        start = time.time()\n        episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads\n\n        train_episode_rewards = [0 for _ in range(self.n_rollout_threads)]\n        train_episode_costs = [0 for _ in range(self.n_rollout_threads)]\n\n        for episode in range(episodes):\n            if self.use_linear_lr_decay:\n                self.trainer.policy.lr_decay(episode, episodes)\n\n            done_episodes_rewards = []\n            done_episodes_costs = []\n\n            for step in range(self.episode_length):\n                # Sample actions\n                values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, \\\n                rnn_states_cost = self.collect(step)\n\n                # Obser reward cost and next obs\n                obs, share_obs, rewards, costs, dones, infos, _ = self.envs.step(actions)\n\n                dones_env = np.all(dones, axis=1)\n                reward_env = np.mean(rewards, axis=1).flatten()\n                cost_env = np.mean(costs, axis=1).flatten()\n                train_episode_rewards += reward_env\n                train_episode_costs += cost_env\n                for t in range(self.n_rollout_threads):\n\n                    if dones_env[t]:\n                        done_episodes_rewards.append(train_episode_rewards[t])\n                        train_episode_rewards[t] = 0\n                        done_episodes_costs.append(train_episode_costs[t])\n                        train_episode_costs[t] = 0\n\n                data = obs, share_obs, rewards, costs, dones, infos, \\\n                       values, actions, action_log_probs, \\\n                       rnn_states, rnn_states_critic, cost_preds, rnn_states_cost  # fixme: it's important!!!\n\n                # insert data into buffer\n                self.insert(data)\n\n            # compute return and update network\n            self.compute()\n            train_infos = self.train()\n\n            # post process\n            total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads\n            # save model\n            if (episode % self.save_interval == 0 or episode == episodes - 1):\n                self.save()\n\n            # log information\n            if episode % self.log_interval == 0:\n                end = time.time()\n                print(\"\\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\\n\"\n                      .format(self.all_args.scenario,\n                              self.algorithm_name,\n                              self.experiment_name,\n                              episode,\n                              episodes,\n                              total_num_steps,\n                              self.num_env_steps,\n                              int(total_num_steps / (end - start))))\n\n                self.log_train(train_infos, total_num_steps)\n\n                if len(done_episodes_rewards) > 0:\n                    aver_episode_rewards = np.mean(done_episodes_rewards)\n                    aver_episode_costs = np.mean(done_episodes_costs)\n                    self.return_aver_cost(aver_episode_costs)\n                    print(\"some episodes done, average rewards: {}, average costs: {}\".format(aver_episode_rewards,\n                                                                                              aver_episode_costs))\n                    self.writter.add_scalars(\"train_episode_rewards\", {\"aver_rewards\": aver_episode_rewards},\n                                             total_num_steps)\n                    self.writter.add_scalars(\"train_episode_costs\", {\"aver_costs\": aver_episode_costs},\n                                             total_num_steps)\n\n            # eval\n            if episode % self.eval_interval == 0 and self.use_eval:\n                self.eval(total_num_steps)\n\n    def return_aver_cost(self, aver_episode_costs):\n        for agent_id in range(self.num_agents):\n            self.buffer[agent_id].return_aver_insert(aver_episode_costs)\n\n    def warmup(self):\n        # reset env\n        obs, share_obs, _ = self.envs.reset()\n        # replay buffer\n        if not self.use_centralized_V:\n            share_obs = obs\n\n        for agent_id in range(self.num_agents):\n            # print(share_obs[:, agent_id])\n            self.buffer[agent_id].share_obs[0] = share_obs[:, agent_id].copy()\n            self.buffer[agent_id].obs[0] = obs[:, agent_id].copy()\n\n    @torch.no_grad()\n    def collect(self, step):\n        # values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, \\\n        # rnn_states_cost = self.collect(step)\n\n        value_collector = []\n        action_collector = []\n        action_log_prob_collector = []\n        rnn_state_collector = []\n        rnn_state_critic_collector = []\n        cost_preds_collector = []\n        rnn_states_cost_collector = []\n\n        for agent_id in range(self.num_agents):\n            self.trainer[agent_id].prep_rollout()\n            value, action, action_log_prob, rnn_state, rnn_state_critic, cost_pred, rnn_state_cost \\\n                = self.trainer[agent_id].policy.get_actions(self.buffer[agent_id].share_obs[step],\n                                                            self.buffer[agent_id].obs[step],\n                                                            self.buffer[agent_id].rnn_states[step],\n                                                            self.buffer[agent_id].rnn_states_critic[step],\n                                                            self.buffer[agent_id].masks[step],\n                                                            rnn_states_cost=self.buffer[agent_id].rnn_states_cost[step]\n                                                            )\n            value_collector.append(_t2n(value))\n            action_collector.append(_t2n(action))\n            action_log_prob_collector.append(_t2n(action_log_prob))\n            rnn_state_collector.append(_t2n(rnn_state))\n            rnn_state_critic_collector.append(_t2n(rnn_state_critic))\n            cost_preds_collector.append(_t2n(cost_pred))\n            rnn_states_cost_collector.append(_t2n(rnn_state_cost))\n        # [self.envs, agents, dim]\n        values = np.array(value_collector).transpose(1, 0, 2)\n        actions = np.array(action_collector).transpose(1, 0, 2)\n        action_log_probs = np.array(action_log_prob_collector).transpose(1, 0, 2)\n        rnn_states = np.array(rnn_state_collector).transpose(1, 0, 2, 3)\n        rnn_states_critic = np.array(rnn_state_critic_collector).transpose(1, 0, 2, 3)\n        cost_preds = np.array(cost_preds_collector).transpose(1, 0, 2)\n        rnn_states_cost = np.array(rnn_states_cost_collector).transpose(1, 0, 2, 3)\n\n        return values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, rnn_states_cost\n\n    def insert(self, data):\n        obs, share_obs, rewards, costs, dones, infos, \\\n        values, actions, action_log_probs, rnn_states, rnn_states_critic, cost_preds, rnn_states_cost = data  # fixme:!!!\n        # print(\"insert--rewards\", rewards)\n        dones_env = np.all(dones, axis=1)\n\n        rnn_states[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)\n        rnn_states_critic[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_critic.shape[2:]), dtype=np.float32)\n\n        rnn_states_cost[dones_env == True] = np.zeros(\n            ((dones_env == True).sum(), self.num_agents, *self.buffer[0].rnn_states_cost.shape[2:]), dtype=np.float32)\n\n        masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n        masks[dones_env == True] = np.zeros(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)\n\n        active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n        active_masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32)\n        active_masks[dones_env == True] = np.ones(((dones_env == True).sum(), self.num_agents, 1), dtype=np.float32)\n\n        if not self.use_centralized_V:\n            share_obs = obs\n\n        for agent_id in range(self.num_agents):\n            self.buffer[agent_id].insert(share_obs[:, agent_id], obs[:, agent_id], rnn_states[:, agent_id],\n                                         rnn_states_critic[:, agent_id], actions[:, agent_id],\n                                         action_log_probs[:, agent_id],\n                                         values[:, agent_id], rewards[:, agent_id], masks[:, agent_id], None,\n                                         active_masks[:, agent_id], None, costs=costs[:, agent_id],\n                                         cost_preds=cost_preds[:, agent_id],\n                                         rnn_states_cost=rnn_states_cost[:, agent_id])\n\n    def log_train(self, train_infos, total_num_steps):\n        print(\"average_step_rewards is {}.\".format(np.mean(self.buffer[0].rewards)))\n        train_infos[0][0][\"average_step_rewards\"] = 0\n        for agent_id in range(self.num_agents):\n            train_infos[0][agent_id][\"average_step_rewards\"] = np.mean(self.buffer[agent_id].rewards)\n            for k, v in train_infos[0][agent_id].items():\n                agent_k = \"agent%i/\" % agent_id + k\n                if self.use_wandb:\n                    wandb.log({agent_k: v}, step=total_num_steps)\n                else:\n                    self.writter.add_scalars(agent_k, {agent_k: v}, total_num_steps)\n\n    @torch.no_grad()\n    def eval(self, total_num_steps):\n        eval_episode = 0\n        eval_episode_rewards = []\n        one_episode_rewards = []\n        eval_episode_costs = []\n        one_episode_costs = []\n\n        for eval_i in range(self.n_eval_rollout_threads):\n            one_episode_rewards.append([])\n            eval_episode_rewards.append([])\n\n            one_episode_costs.append([])\n            eval_episode_costs.append([])\n\n        eval_obs, eval_share_obs, _ = self.eval_envs.reset()\n\n        eval_rnn_states = np.zeros((self.n_eval_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size),\n                                   dtype=np.float32)\n        eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n\n        while True:\n            eval_actions_collector = []\n            eval_rnn_states_collector = []\n            for agent_id in range(self.num_agents):\n                self.trainer[agent_id].prep_rollout()\n                eval_actions, temp_rnn_state = \\\n                    self.trainer[agent_id].policy.act(eval_obs[:, agent_id],\n                                                      eval_rnn_states[:, agent_id],\n                                                      eval_masks[:, agent_id],\n                                                      deterministic=True)\n                eval_rnn_states[:, agent_id] = _t2n(temp_rnn_state)\n                eval_actions_collector.append(_t2n(eval_actions))\n\n            eval_actions = np.array(eval_actions_collector).transpose(1, 0, 2)\n\n            # Obser reward and next obs\n            eval_obs, eval_share_obs, eval_rewards, eval_costs, eval_dones, eval_infos, _ = self.eval_envs.step(\n                eval_actions)\n            for eval_i in range(self.n_eval_rollout_threads):\n                one_episode_rewards[eval_i].append(eval_rewards[eval_i])\n                one_episode_costs[eval_i].append(eval_costs[eval_i])\n\n            eval_dones_env = np.all(eval_dones, axis=1)\n\n            eval_rnn_states[eval_dones_env == True] = np.zeros(\n                ((eval_dones_env == True).sum(), self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)\n\n            eval_masks = np.ones((self.all_args.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n            eval_masks[eval_dones_env == True] = np.zeros(((eval_dones_env == True).sum(), self.num_agents, 1),\n                                                          dtype=np.float32)\n\n            for eval_i in range(self.n_eval_rollout_threads):\n                if eval_dones_env[eval_i]:\n                    eval_episode += 1\n                    eval_episode_rewards[eval_i].append(np.sum(one_episode_rewards[eval_i], axis=0))\n                    one_episode_rewards[eval_i] = []\n\n            if eval_episode >= self.all_args.eval_episodes:\n                eval_episode_rewards = np.concatenate(eval_episode_rewards)\n                eval_env_infos = {'eval_average_episode_rewards': eval_episode_rewards,\n                                  'eval_max_episode_rewards': [np.max(eval_episode_rewards)]}\n                self.log_env(eval_env_infos, total_num_steps)\n                print(\"eval_average_episode_rewards is {}.\".format(np.mean(eval_episode_rewards)))\n                break\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/scripts/__init__.py",
    "content": ""
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/scripts/eval/eval_hanabi.py",
    "content": "#!/usr/bin/env python\nimport sys\nimport os\nimport wandb\nimport socket\nimport setproctitle\nimport numpy as np\nfrom pathlib import Path\n\nimport torch\n\nfrom onpolicy.config import get_config\n\nfrom onpolicy.envs.hanabi.Hanabi_Env import HanabiEnv\nfrom onpolicy.envs.env_wrappers import ChooseSubprocVecEnv, ChooseDummyVecEnv\n\n\ndef make_train_env(all_args):\n    def get_env_fn(rank):\n        def init_env():\n            if all_args.env_name == \"Hanabi\":\n                assert all_args.num_agents > 1 and all_args.num_agents < 6, (\n                    \"num_agents can be only between 2-5.\")\n                env = HanabiEnv(all_args, (all_args.seed + rank * 1000))\n            else:\n                print(\"Can not support the \" +\n                      all_args.env_name + \"environment.\")\n                raise NotImplementedError\n            env.seed(all_args.seed + rank * 1000)\n            return env\n        return init_env\n    if all_args.n_rollout_threads == 1:\n        return ChooseDummyVecEnv([get_env_fn(0)])\n    else:\n        return ChooseSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_rollout_threads)])\n\n\ndef make_eval_env(all_args):\n    def get_env_fn(rank):\n        def init_env():\n            if all_args.env_name == \"Hanabi\":\n                assert all_args.num_agents > 1 and all_args.num_agents < 6, (\n                    \"num_agents can be only between 2-5.\")\n                env = HanabiEnv(\n                    all_args, (all_args.seed * 50000 + rank * 10000))\n            else:\n                print(\"Can not support the \" +\n                      all_args.env_name + \"environment.\")\n                raise NotImplementedError\n            env.seed(all_args.seed * 50000 + rank * 10000)\n            return env\n        return init_env\n    if all_args.n_eval_rollout_threads == 1:\n        return ChooseDummyVecEnv([get_env_fn(0)])\n    else:\n        return ChooseSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])\n\n\ndef parse_args(args, parser):\n    parser.add_argument('--hanabi_name', type=str,\n                        default='Hanabi-Very-Small', help=\"Which env to run on\")\n    parser.add_argument('--num_agents', type=int,\n                        default=2, help=\"number of players\")\n\n    all_args = parser.parse_known_args(args)[0]\n\n    return all_args\n\n\ndef main(args):\n    parser = get_config()\n    all_args = parse_args(args, parser)\n\n    if all_args.algorithm_name == \"rmappo\":\n        assert (all_args.use_recurrent_policy or all_args.use_naive_recurrent_policy), (\"check recurrent policy!\")\n    elif all_args.algorithm_name == \"mappo\":\n        assert (all_args.use_recurrent_policy == False and all_args.use_naive_recurrent_policy == False), (\"check recurrent policy!\")\n    else:\n        raise NotImplementedError\n\n    assert all_args.use_eval, (\"u need to set use_eval be True\")\n    assert not (all_args.model_dir == None or all_args.model_dir == \"\"), (\"set model_dir first\")\n    \n\n    # cuda\n    if all_args.cuda and torch.cuda.is_available():\n        print(\"choose to use gpu...\")\n        device = torch.device(\"cuda:0\")\n        torch.set_num_threads(all_args.n_training_threads)\n        if all_args.cuda_deterministic:\n            torch.backends.cudnn.benchmark = False\n            torch.backends.cudnn.deterministic = True\n    else:\n        print(\"choose to use cpu...\")\n        device = torch.device(\"cpu\")\n        torch.set_num_threads(all_args.n_training_threads)\n\n    # run dir\n    run_dir = Path(os.path.split(os.path.dirname(os.path.abspath(__file__)))[0] + \"/results\") / all_args.env_name / all_args.hanabi_name / all_args.algorithm_name / all_args.experiment_name\n    if not run_dir.exists():\n        os.makedirs(str(run_dir))\n\n    # wandb\n    if all_args.use_wandb:\n        run = wandb.init(config=all_args,\n                         project=all_args.env_name,\n                         entity=all_args.user_name,\n                         notes=socket.gethostname(),\n                         name=str(all_args.algorithm_name) + \"_\" +\n                         str(all_args.experiment_name) +\n                         \"_seed\" + str(all_args.seed),\n                         group=all_args.hanabi_name,\n                         dir=str(run_dir),\n                         job_type=\"training\",\n                         reinit=True)\n    else:\n        if not run_dir.exists():\n            curr_run = 'run1'\n        else:\n            exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if str(folder.name).startswith('run')]\n            if len(exst_run_nums) == 0:\n                curr_run = 'run1'\n            else:\n                curr_run = 'run%i' % (max(exst_run_nums) + 1)\n        run_dir = run_dir / curr_run\n        if not run_dir.exists():\n            os.makedirs(str(run_dir))\n\n    setproctitle.setproctitle(str(all_args.algorithm_name) + \"-\" + str(\n        all_args.env_name) + \"-\" + str(all_args.experiment_name) + \"@\" + str(all_args.user_name))\n\n    # seed\n    torch.manual_seed(all_args.seed)\n    torch.cuda.manual_seed_all(all_args.seed)\n    np.random.seed(all_args.seed)\n\n    # env init\n    envs = make_train_env(all_args)\n    eval_envs = make_eval_env(all_args) if all_args.use_eval else None\n    num_agents = all_args.num_agents\n\n    config = {\n        \"all_args\": all_args,\n        \"envs\": envs,\n        \"eval_envs\": eval_envs,\n        \"num_agents\": num_agents,\n        \"device\": device,\n        \"run_dir\": run_dir\n    }\n\n    # run experiments\n    if all_args.share_policy:\n        from onpolicy.runner.shared.hanabi_runner_forward import HanabiRunner as Runner\n    else:\n        from onpolicy.runner.separated.hanabi_runner_forward import HanabiRunner as Runner\n\n    runner = Runner(config)\n    runner.eval_100k()\n    \n    # post process\n    envs.close()\n    if all_args.use_eval and eval_envs is not envs:\n        eval_envs.close()\n\n    if all_args.use_wandb:\n        run.finish()\n    else:\n        runner.writter.export_scalars_to_json(str(runner.log_dir + '/summary.json'))\n        runner.writter.close()\n\n\nif __name__ == \"__main__\":\n    main(sys.argv[1:])\n\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/scripts/train/__init__.py",
    "content": ""
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/scripts/train/train_mujoco.py",
    "content": "#!/usr/bin/env python\nimport sys\nimport os\ncurPath = os.path.abspath(__file__)\n\nif len(curPath.split('/'))==1:\n    rootPath = '\\\\'.join(curPath.split('\\\\')[:-3])\nelse:\n    rootPath = '/'.join(curPath.split('/')[:-3])\nsys.path.append(os.path.split(rootPath)[0])\n\nimport wandb\nimport socket\nimport setproctitle\nimport numpy as np\nfrom pathlib import Path\nimport torch\nfrom mappo_lagrangian.config import get_config\nfrom mappo_lagrangian.envs.safety_ma_mujoco.safety_multiagent_mujoco import MujocoMulti\nfrom mappo_lagrangian.envs.env_wrappers import ShareSubprocVecEnv, ShareDummyVecEnv\n\n\ndef make_train_env(all_args):\n    def get_env_fn(rank):\n        def init_env():\n            if all_args.env_name == \"mujoco\":\n                env_args = {\"scenario\": all_args.scenario,\n                            \"agent_conf\": all_args.agent_conf,\n                            \"agent_obsk\": all_args.agent_obsk,\n                            \"episode_limit\": 1000}\n                env = MujocoMulti(env_args=env_args)\n            else:\n                print(\"Can not support the \" + all_args.env_name + \"environment.\")\n                raise NotImplementedError\n            env.seed(all_args.seed + rank * 1000)\n            return env\n\n        return init_env\n\n    if all_args.n_rollout_threads == 1:\n        return ShareDummyVecEnv([get_env_fn(0)])\n    else:\n        return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_rollout_threads)])\n\n\ndef make_eval_env(all_args):\n    def get_env_fn(rank):\n        def init_env():\n            if all_args.env_name == \"mujoco\":\n                env_args = {\"scenario\": all_args.scenario,\n                            \"agent_conf\": all_args.agent_conf,\n                            \"agent_obsk\": all_args.agent_obsk,\n                            \"episode_limit\": 1000}\n                env = MujocoMulti(env_args=env_args)\n            else:\n                print(\"Can not support the \" + all_args.env_name + \"environment.\")\n                raise NotImplementedError\n            env.seed(all_args.seed * 50000 + rank * 10000)\n            return env\n\n        return init_env\n\n    if all_args.n_eval_rollout_threads == 1:\n        return ShareDummyVecEnv([get_env_fn(0)])\n    else:\n        return ShareSubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])\n\n\ndef parse_args(args, parser):\n    parser.add_argument('--scenario', type=str, default='Hopper-v2', help=\"Which mujoco task to run on\")\n    parser.add_argument('--agent_conf', type=str, default='3x1')\n    parser.add_argument('--agent_obsk', type=int, default=0)\n    parser.add_argument(\"--add_move_state\", action='store_true', default=False)\n    parser.add_argument(\"--add_local_obs\", action='store_true', default=False)\n    parser.add_argument(\"--add_distance_state\", action='store_true', default=False)\n    parser.add_argument(\"--add_enemy_action_state\", action='store_true', default=False)\n    parser.add_argument(\"--add_agent_id\", action='store_true', default=False)\n    parser.add_argument(\"--add_visible_state\", action='store_true', default=False)\n    parser.add_argument(\"--add_xy_state\", action='store_true', default=False)\n\n    # agent-specific state should be designed carefully\n    parser.add_argument(\"--use_state_agent\", action='store_true', default=False)\n    parser.add_argument(\"--use_mustalive\", action='store_false', default=True)\n    parser.add_argument(\"--add_center_xy\", action='store_true', default=False)\n    parser.add_argument(\"--use_single_network\", action='store_true', default=False)\n\n    all_args = parser.parse_known_args(args)[0]\n\n    return all_args\n\n\ndef main(args):\n    parser = get_config()\n    all_args = parse_args(args, parser)\n    print(\"mumu config: \", all_args)\n\n    if all_args.algorithm_name == \"mappo_lagr\":\n        all_args.share_policy=False\n    else:\n        raise NotImplementedError\n\n    # cuda\n    # all_args.cuda = True\n    if all_args.cuda and torch.cuda.is_available():\n        print(\"choose to use gpu...\")\n        device = torch.device(\"cuda:0\")\n        torch.set_num_threads(all_args.n_training_threads)\n        if all_args.cuda_deterministic:\n            torch.backends.cudnn.benchmark = False\n            torch.backends.cudnn.deterministic = True\n    else:\n        print(\"cuda flag: \", all_args.cuda, \"Torch: \", torch.cuda.is_available())\n        print(\"choose to use cpu...\")\n        device = torch.device(\"cpu\")\n        torch.set_num_threads(all_args.n_training_threads)\n\n    run_dir = Path(os.path.split(os.path.dirname(os.path.abspath(__file__)))[\n                       0] + \"/results\") / all_args.env_name / all_args.scenario / all_args.algorithm_name / all_args.experiment_name\n    if not run_dir.exists():\n        os.makedirs(str(run_dir))\n\n    if all_args.use_wandb:\n        run = wandb.init(config=all_args,\n                         project=all_args.env_name,\n                         entity=all_args.user_name,\n                         notes=socket.gethostname(),\n                         name=str(all_args.algorithm_name) + \"_\" +\n                              str(all_args.experiment_name) +\n                              \"_seed\" + str(all_args.seed),\n                         group=all_args.map_name,\n                         dir=str(run_dir),\n                         job_type=\"training\",\n                         reinit=True)\n    else:\n        if not run_dir.exists():\n            curr_run = 'run1'\n        else:\n            exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in run_dir.iterdir() if\n                             str(folder.name).startswith('run')]\n            if len(exst_run_nums) == 0:\n                curr_run = 'run1'\n            else:\n                curr_run = 'run%i' % (max(exst_run_nums) + 1)\n        run_dir = run_dir / curr_run\n        if not run_dir.exists():\n            os.makedirs(str(run_dir))\n\n    setproctitle.setproctitle(\n        str(all_args.algorithm_name) + \"-\" + str(all_args.env_name) + \"-\" + str(all_args.experiment_name) + \"@\" + str(\n            all_args.user_name))\n\n    # seed\n    torch.manual_seed(all_args.seed)\n    torch.cuda.manual_seed_all(all_args.seed)\n    np.random.seed(all_args.seed)\n\n    # env\n    envs = make_train_env(all_args)\n    eval_envs = make_eval_env(all_args) if all_args.use_eval else None\n    num_agents = envs.n_agents\n\n    config = {\n        \"all_args\": all_args,\n        \"envs\": envs,\n        \"eval_envs\": eval_envs,\n        \"num_agents\": num_agents,\n        \"device\": device,\n        \"run_dir\": run_dir\n    }\n\n    # run experiments\n    if all_args.share_policy:\n        from mappo_lagrangian.runner.shared.mujoco_runner import MujocoRunner as Runner\n    else:\n        #in origin code not implement this method\n        if all_args.algorithm_name == \"mappo_lagr\":\n            from mappo_lagrangian.runner.separated.mujoco_runner_mappo_lagr import MujocoRunner as Runner\n        else:\n            from mappo_lagrangian.runner.separated.mujoco_runner import MujocoRunner as Runner\n\n    runner = Runner(config)\n    runner.run()\n\n    # post process\n    envs.close()\n    if all_args.use_eval and eval_envs is not envs:\n        eval_envs.close()\n\n    if all_args.use_wandb:\n        run.finish()\n    else:\n        runner.writter.export_scalars_to_json(str(runner.log_dir + '/summary.json'))\n        runner.writter.close()\n\n\nif __name__ == \"__main__\":\n    main(sys.argv[1:])\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/scripts/train_mujoco.sh",
    "content": "#!/bin/sh\nenv=\"mujoco\"\nscenario=\"Ant-v2\"\nagent_conf=\"2x4\"\nagent_obsk=1\nalgo=\"mappo_lagr\"\nexp=\"rnn\"\nseed_max=1\nseed_=50\n\necho \"env is ${env}, scenario is ${scenario}, algo is ${algo}, exp is ${exp}, max seed is ${seed_max}\"\nfor seed in `seq ${seed_max}`;\ndo\n    echo \"seed is ${seed}:\"\n    CUDA_VISIBLE_DEVICES=0 python train/train_mujoco.py  --env_name ${env} --algorithm_name ${algo} --experiment_name ${exp} --scenario ${scenario} --agent_conf ${agent_conf} --agent_obsk ${agent_obsk} --lr 9e-5 --critic_lr 5e-3 --std_x_coef 1 --std_y_coef 5e-1 --seed ${seed_} --n_training_threads 4 --n_rollout_threads 16 --num_mini_batch 40 --episode_length 1000 --num_env_steps 10000000 --ppo_epoch 5 --use_value_active_masks  --add_center_xy --use_state_agent  --safety_bound 0.2 --lamda_lagr 0.78 --lagrangian_coef_rate 1e-7\ndone\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/utils/__init__.py",
    "content": ""
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/utils/multi_discrete.py",
    "content": "import gym\nimport numpy as np\n\n# An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)\n# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)\nclass MultiDiscrete(gym.Space):\n    \"\"\"\n    - The multi-discrete action space consists of a series of discrete action spaces with different parameters\n    - It can be adapted to both a Discrete action space or a continuous (Box) action space\n    - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space\n    - It is parametrized by passing an array of arrays containing [min, max] for each discrete action space where the discrete action space can take any integers from `min` to `max` (both inclusive)\n    Note: A value of 0 always need to represent the NOOP action.\n    e.g. Nintendo Game Controller\n    - Can be conceptualized as 3 discrete action spaces:\n        1) Arrow Keys: Discrete 5  - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4]  - params: min: 0, max: 4\n        2) Button A:   Discrete 2  - NOOP[0], Pressed[1] - params: min: 0, max: 1\n        3) Button B:   Discrete 2  - NOOP[0], Pressed[1] - params: min: 0, max: 1\n    - Can be initialized as\n        MultiDiscrete([ [0,4], [0,1], [0,1] ])\n    \"\"\"\n\n    def __init__(self, array_of_param_array):\n        self.low = np.array([x[0] for x in array_of_param_array])\n        self.high = np.array([x[1] for x in array_of_param_array])\n        self.num_discrete_space = self.low.shape[0]\n        self.n = np.sum(self.high) + 2\n\n    def sample(self):\n        \"\"\" Returns a array with one sample from each discrete action space \"\"\"\n        # For each row: round(random .* (max - min) + min, 0)\n        random_array = np.random.rand(self.num_discrete_space)\n        return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]\n\n    def contains(self, x):\n        return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()\n\n    @property\n    def shape(self):\n        return self.num_discrete_space\n\n    def __repr__(self):\n        return \"MultiDiscrete\" + str(self.num_discrete_space)\n\n    def __eq__(self, other):\n        return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/utils/popart.py",
    "content": "\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\n\nclass PopArt(nn.Module):\n    \"\"\" Normalize a vector of observations - across the first norm_axes dimensions\"\"\"\n\n    def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device(\"cpu\")):\n        super(PopArt, self).__init__()\n\n        self.input_shape = input_shape\n        self.norm_axes = norm_axes\n        self.epsilon = epsilon\n        self.beta = beta\n        self.per_element_update = per_element_update\n        self.tpdv = dict(dtype=torch.float32, device=device)\n\n        self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)\n        self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)\n        self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)\n\n    def reset_parameters(self):\n        self.running_mean.zero_()\n        self.running_mean_sq.zero_()\n        self.debiasing_term.zero_()\n\n    def running_mean_var(self):\n        debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)\n        debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)\n        debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)\n        return debiased_mean, debiased_var\n\n    def forward(self, input_vector, train=True):\n        # Make sure input is float32\n        if type(input_vector) == np.ndarray:\n            input_vector = torch.from_numpy(input_vector)\n        input_vector = input_vector.to(**self.tpdv)\n\n        if train:\n            # Detach input before adding it to running means to avoid backpropping through it on\n            # subsequent batches.\n            detached_input = input_vector.detach()\n            batch_mean = detached_input.mean(dim=tuple(range(self.norm_axes)))\n            batch_sq_mean = (detached_input ** 2).mean(dim=tuple(range(self.norm_axes)))\n\n            if self.per_element_update:\n                batch_size = np.prod(detached_input.size()[:self.norm_axes])\n                weight = self.beta ** batch_size\n            else:\n                weight = self.beta\n\n            self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))\n            self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))\n            self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))\n\n        mean, var = self.running_mean_var()\n        out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]\n        \n        return out\n\n    def denormalize(self, input_vector):\n        \"\"\" Transform normalized data back into original distribution \"\"\"\n        if type(input_vector) == np.ndarray:\n            input_vector = torch.from_numpy(input_vector)\n        input_vector = input_vector.to(**self.tpdv)\n\n        mean, var = self.running_mean_var()\n        out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]\n        \n        out = out.cpu().numpy()\n        \n        return out\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/utils/separated_buffer.py",
    "content": "import torch\nimport numpy as np\nfrom collections import defaultdict\n\nfrom mappo_lagrangian.utils.util import check, get_shape_from_obs_space, get_shape_from_act_space\n\n\ndef _flatten(T, N, x):\n    return x.reshape(T * N, *x.shape[2:])\n\n\ndef _cast(x):\n    return x.transpose(1,0,2).reshape(-1, *x.shape[2:])\n\n\nclass SeparatedReplayBuffer(object):\n    def __init__(self, args, obs_space, share_obs_space, act_space):\n        self.episode_length = args.episode_length\n        self.n_rollout_threads = args.n_rollout_threads\n        self.rnn_hidden_size = args.hidden_size\n        self.recurrent_N = args.recurrent_N\n        self.gamma = args.gamma\n        self.gae_lambda = args.gae_lambda\n        self._use_gae = args.use_gae\n        self._use_popart = args.use_popart\n        self._use_valuenorm = args.use_valuenorm\n        self._use_proper_time_limits = args.use_proper_time_limits\n        self.algo = args.algorithm_name\n\n        obs_shape = get_shape_from_obs_space(obs_space)\n        share_obs_shape = get_shape_from_obs_space(share_obs_space)\n\n        if type(obs_shape[-1]) == list:\n            obs_shape = obs_shape[:1]\n\n        if type(share_obs_shape[-1]) == list:\n            share_obs_shape = share_obs_shape[:1]\n\n        self.aver_episode_costs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape),\n                                           dtype=np.float32)\n        self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *share_obs_shape), dtype=np.float32)\n        self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, *obs_shape), dtype=np.float32)\n\n        self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, self.recurrent_N, self.rnn_hidden_size), dtype=np.float32)\n        self.rnn_states_critic = np.zeros_like(self.rnn_states)\n        self.rnn_states_cost = np.zeros_like(self.rnn_states)\n\n        self.value_preds = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)\n        self.returns = np.zeros((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)\n        \n        if act_space.__class__.__name__ == 'Discrete':\n            self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, act_space.n), dtype=np.float32)\n        else:\n            self.available_actions = None\n\n        act_shape = get_shape_from_act_space(act_space)\n\n        self.actions = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32)\n        self.action_log_probs = np.zeros((self.episode_length, self.n_rollout_threads, act_shape), dtype=np.float32)\n        self.rewards = np.zeros((self.episode_length, self.n_rollout_threads, 1), dtype=np.float32)\n\n        self.costs = np.zeros_like(self.rewards)\n        self.cost_preds = np.zeros_like(self.value_preds)\n        self.cost_returns = np.zeros_like(self.returns)\n        \n        self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, 1), dtype=np.float32)\n        self.bad_masks = np.ones_like(self.masks)\n        self.active_masks = np.ones_like(self.masks)\n\n        self.factor = None\n\n        self.step = 0\n\n    def update_factor(self, factor):\n        self.factor = factor.copy()\n\n    def return_aver_insert(self, aver_episode_costs):\n        self.aver_episode_costs = aver_episode_costs.copy()\n\n    def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,\n               value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None, costs=None,\n               cost_preds=None, rnn_states_cost=None, aver_episode_costs = 0):\n        self.share_obs[self.step + 1] = share_obs.copy()\n        self.obs[self.step + 1] = obs.copy()\n        self.rnn_states[self.step + 1] = rnn_states.copy()\n        self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()\n        self.actions[self.step] = actions.copy()\n        self.action_log_probs[self.step] = action_log_probs.copy()\n        self.value_preds[self.step] = value_preds.copy()\n        self.rewards[self.step] = rewards.copy()\n        self.masks[self.step + 1] = masks.copy()\n        if bad_masks is not None:\n            self.bad_masks[self.step + 1] = bad_masks.copy()\n        if active_masks is not None:\n            self.active_masks[self.step + 1] = active_masks.copy()\n        if available_actions is not None:\n            self.available_actions[self.step + 1] = available_actions.copy()\n        if costs is not None:\n            self.costs[self.step] = costs.copy()\n        if cost_preds is not None:\n            self.cost_preds[self.step] = cost_preds.copy()\n        if rnn_states_cost is not None:\n            self.rnn_states_cost[self.step + 1] = rnn_states_cost.copy()\n\n        self.step = (self.step + 1) % self.episode_length\n\n    def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,\n                     value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):\n        self.share_obs[self.step] = share_obs.copy()\n        self.obs[self.step] = obs.copy()\n        self.rnn_states[self.step + 1] = rnn_states.copy()\n        self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()\n        self.actions[self.step] = actions.copy()\n        self.action_log_probs[self.step] = action_log_probs.copy()\n        self.value_preds[self.step] = value_preds.copy()\n        self.rewards[self.step] = rewards.copy()\n        self.masks[self.step + 1] = masks.copy()\n        if bad_masks is not None:\n            self.bad_masks[self.step + 1] = bad_masks.copy()\n        if active_masks is not None:\n            self.active_masks[self.step] = active_masks.copy()\n        if available_actions is not None:\n            self.available_actions[self.step] = available_actions.copy()\n\n        self.step = (self.step + 1) % self.episode_length\n    \n    def after_update(self):\n        self.share_obs[0] = self.share_obs[-1].copy()\n        self.obs[0] = self.obs[-1].copy()\n        self.rnn_states[0] = self.rnn_states[-1].copy()\n        self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()\n        self.rnn_states_cost[0] = self.rnn_states_cost[-1].copy()\n        self.masks[0] = self.masks[-1].copy()\n        self.bad_masks[0] = self.bad_masks[-1].copy()\n        self.active_masks[0] = self.active_masks[-1].copy()\n        if self.available_actions is not None:\n            self.available_actions[0] = self.available_actions[-1].copy()\n\n    def chooseafter_update(self):\n        self.rnn_states[0] = self.rnn_states[-1].copy()\n        self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()\n        self.masks[0] = self.masks[-1].copy()\n        self.bad_masks[0] = self.bad_masks[-1].copy()\n\n    def compute_returns(self, next_value, value_normalizer=None):\n        \"\"\"\n        use proper time limits, the difference of use or not is whether use bad_mask\n        \"\"\"\n        if self._use_proper_time_limits:\n            if self._use_gae:\n                self.value_preds[-1] = next_value\n                gae = 0\n                for step in reversed(range(self.rewards.shape[0])):\n                    if self._use_popart or self._use_valuenorm:\n                        delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[\n                            step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step])\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        gae = gae * self.bad_masks[step + 1]\n                        self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])\n                    else:\n                        delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        gae = gae * self.bad_masks[step + 1]\n                        self.returns[step] = gae + self.value_preds[step]\n            else:\n                self.returns[-1] = next_value\n                for step in reversed(range(self.rewards.shape[0])):\n                    if self._use_popart:\n                        self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \\\n                            + (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step])\n                    else:\n                        self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \\\n                            + (1 - self.bad_masks[step + 1]) * self.value_preds[step]\n        else:\n            if self._use_gae:\n                self.value_preds[-1] = next_value\n                gae = 0\n                for step in reversed(range(self.rewards.shape[0])):\n                    if self._use_popart or self._use_valuenorm:\n                        delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.value_preds[step])\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])\n                    else:\n                        delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        self.returns[step] = gae + self.value_preds[step]\n            else:\n                self.returns[-1] = next_value\n                for step in reversed(range(self.rewards.shape[0])):\n                    self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]\n\n    def compute_cost_returns(self, next_cost, value_normalizer=None):\n\n        if self._use_proper_time_limits:\n            if self._use_gae:\n                self.cost_preds[-1] = next_cost\n                gae = 0\n                for step in reversed(range(self.costs.shape[0])):\n                    if self._use_popart or self._use_valuenorm:\n                        delta = self.costs[step] + self.gamma * value_normalizer.denormalize(self.cost_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.cost_preds[step])\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        gae = gae * self.bad_masks[step + 1]\n                        self.cost_returns[step] = gae + value_normalizer.denormalize(self.cost_preds[step])\n                    else:\n                        delta = self.costs[step] + self.gamma * self.cost_preds[step + 1] * self.masks[step + 1] - self.cost_preds[step]\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        gae = gae * self.bad_masks[step + 1]\n                        self.cost_returns[step] = gae + self.cost_preds[step]\n            else:\n                self.cost_returns[-1] = next_cost\n                for step in reversed(range(self.costs.shape[0])):\n                    if self._use_popart:\n                        self.cost_returns[step] = (self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]) * self.bad_masks[step + 1] \\\n                                             + (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.cost_preds[step])\n                    else:\n                        self.cost_returns[step] = (self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]) * self.bad_masks[step + 1] \\\n                                             + (1 - self.bad_masks[step + 1]) * self.cost_preds[step]\n        else:\n            if self._use_gae:\n                self.cost_preds[-1] = next_cost\n                gae = 0\n                for step in reversed(range(self.costs.shape[0])):\n                    if self._use_popart or self._use_valuenorm:\n                        delta = self.costs[step] + self.gamma * value_normalizer.denormalize(self.cost_preds[step + 1]) * self.masks[step + 1] - value_normalizer.denormalize(self.cost_preds[step])\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        self.cost_returns[step] = gae + value_normalizer.denormalize(self.cost_preds[step])\n                    else:\n                        delta = self.costs[step] + self.gamma * self.cost_preds[step + 1] * self.masks[step + 1] - self.cost_preds[step]\n                        gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                        self.cost_returns[step] = gae + self.cost_preds[step]\n            else:\n                self.cost_returns[-1] = next_cost\n                for step in reversed(range(self.costs.shape[0])):\n                    self.cost_returns[step] = self.cost_returns[step + 1] * self.gamma * self.masks[step + 1] + self.costs[step]\n\n    def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None, cost_adv=None):\n        episode_length, n_rollout_threads = self.rewards.shape[0:2]\n        batch_size = n_rollout_threads * episode_length\n\n        if mini_batch_size is None:\n            assert batch_size >= num_mini_batch, (\n                \"PPO requires the number of processes ({}) \"\n                \"* number of steps ({}) = {} \"\n                \"to be greater than or equal to the number of PPO mini batches ({}).\"\n                \"\".format(n_rollout_threads, episode_length, n_rollout_threads * episode_length,\n                          num_mini_batch))\n            mini_batch_size = batch_size // num_mini_batch\n\n        rand = torch.randperm(batch_size).numpy()\n        sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]\n\n        share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[2:])\n        obs = self.obs[:-1].reshape(-1, *self.obs.shape[2:])\n        rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[2:])\n        rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[2:])\n        rnn_states_cost = self.rnn_states_cost[:-1].reshape(-1, *self.rnn_states_cost.shape[2:])\n        actions = self.actions.reshape(-1, self.actions.shape[-1])\n        if self.available_actions is not None:\n            available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])\n        value_preds = self.value_preds[:-1].reshape(-1, 1)\n        returns = self.returns[:-1].reshape(-1, 1)\n        cost_preds = self.cost_preds[:-1].reshape(-1, 1)\n        cost_returns = self.cost_returns[:-1].reshape(-1, 1)\n        masks = self.masks[:-1].reshape(-1, 1)\n        active_masks = self.active_masks[:-1].reshape(-1, 1)\n        action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])\n        aver_episode_costs = self.aver_episode_costs\n        if self.factor is not None:\n            # factor = self.factor.reshape(-1,1)\n            factor = self.factor.reshape(-1, self.factor.shape[-1])\n        advantages = advantages.reshape(-1, 1)\n        if cost_adv is not None:\n            cost_adv = cost_adv.reshape(-1, 1)\n\n        for indices in sampler:\n            # obs size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[index,Dim]\n            share_obs_batch = share_obs[indices]\n            obs_batch = obs[indices]\n            rnn_states_batch = rnn_states[indices]\n            rnn_states_critic_batch = rnn_states_critic[indices]\n            rnn_states_cost_batch = rnn_states_cost[indices]\n            actions_batch = actions[indices]\n            if self.available_actions is not None:\n                available_actions_batch = available_actions[indices]\n            else:\n                available_actions_batch = None\n            value_preds_batch = value_preds[indices]\n            return_batch = returns[indices]\n            cost_preds_batch = cost_preds[indices]\n            cost_return_batch = cost_returns[indices]\n            masks_batch = masks[indices]\n            active_masks_batch = active_masks[indices]\n            old_action_log_probs_batch = action_log_probs[indices]\n            if advantages is None:\n                adv_targ = None\n            else:\n                adv_targ = advantages[indices]\n            if cost_adv is None:\n                cost_adv_targ = None\n            else:\n                cost_adv_targ = cost_adv[indices]\n\n            if self.factor is None:\n                yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch\n            else:\n                if self.algo == \"mappo_lagr\":\n                    factor_batch = factor[indices]\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ, aver_episode_costs\n                else:\n                    factor_batch = factor[indices]\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch\n\n    def naive_recurrent_generator(self, advantages, num_mini_batch, cost_adv=None):\n        n_rollout_threads = self.rewards.shape[1]\n        assert n_rollout_threads >= num_mini_batch, (\n            \"PPO requires the number of processes ({}) \"\n            \"to be greater than or equal to the number of \"\n            \"PPO mini batches ({}).\".format(n_rollout_threads, num_mini_batch))\n        num_envs_per_batch = n_rollout_threads // num_mini_batch\n        perm = torch.randperm(n_rollout_threads).numpy()\n        for start_ind in range(0, n_rollout_threads, num_envs_per_batch):\n            share_obs_batch = []\n            obs_batch = []\n            rnn_states_batch = []\n            rnn_states_critic_batch = []\n            rnn_states_cost_batch = []\n            actions_batch = []\n            available_actions_batch = []\n            value_preds_batch = []\n            cost_preds_batch = []\n            return_batch = []\n            cost_return_batch = []\n            masks_batch = []\n            active_masks_batch = []\n            old_action_log_probs_batch = []\n            adv_targ = []\n            cost_adv_targ = []\n            factor_batch = []\n            for offset in range(num_envs_per_batch):\n                ind = perm[start_ind + offset]\n                share_obs_batch.append(self.share_obs[:-1, ind])\n                obs_batch.append(self.obs[:-1, ind])\n                rnn_states_batch.append(self.rnn_states[0:1, ind])\n                rnn_states_critic_batch.append(self.rnn_states_critic[0:1, ind])\n                rnn_states_cost_batch.append(self.rnn_states_cost[0:1, ind])\n                actions_batch.append(self.actions[:, ind])\n                if self.available_actions is not None:\n                    available_actions_batch.append(self.available_actions[:-1, ind])\n                value_preds_batch.append(self.value_preds[:-1, ind])\n                cost_preds_batch.append(self.cost_preds[:-1, ind])\n                return_batch.append(self.returns[:-1, ind])\n                cost_return_batch.append(self.cost_returns[:-1, ind])\n                masks_batch.append(self.masks[:-1, ind])\n                active_masks_batch.append(self.active_masks[:-1, ind])\n                old_action_log_probs_batch.append(self.action_log_probs[:, ind])\n                adv_targ.append(advantages[:, ind])\n                if cost_adv is not None:\n                    cost_adv_targ.append(cost_adv[:, ind])\n                if self.factor is not None:\n                    factor_batch.append(self.factor[:, ind])\n\n            # [N[T, dim]]\n            T, N = self.episode_length, num_envs_per_batch\n            # These are all from_numpys of size (T, N, -1)\n            share_obs_batch = np.stack(share_obs_batch, 1)\n            obs_batch = np.stack(obs_batch, 1)\n            actions_batch = np.stack(actions_batch, 1)\n            if self.available_actions is not None:\n                available_actions_batch = np.stack(available_actions_batch, 1)\n            if self.factor is not None:\n                factor_batch=np.stack(factor_batch,1)\n            value_preds_batch = np.stack(value_preds_batch, 1)\n            cost_preds_batch = np.stack(cost_preds_batch, 1)\n            return_batch = np.stack(return_batch, 1)\n            cost_return_batch = np.stack(cost_return_batch, 1)\n            masks_batch = np.stack(masks_batch, 1)\n            active_masks_batch = np.stack(active_masks_batch, 1)\n            old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)\n            adv_targ = np.stack(adv_targ, 1)\n            if cost_adv is not None:\n                cost_adv_targ = np.stack(cost_adv_targ, 1)\n\n            # States is just a (N, -1) from_numpy [N[1,dim]]\n            rnn_states_batch = np.stack(rnn_states_batch, 1).reshape(N, *self.rnn_states.shape[2:])\n            rnn_states_critic_batch = np.stack(rnn_states_critic_batch, 1).reshape(N, *self.rnn_states_critic.shape[2:])\n            rnn_states_cost_batch = np.stack(rnn_states_cost_batch, 1).reshape(N, *self.rnn_states_cost.shape[2:])\n\n            # Flatten the (T, N, ...) from_numpys to (T * N, ...)\n            share_obs_batch = _flatten(T, N, share_obs_batch)\n            obs_batch = _flatten(T, N, obs_batch)\n            actions_batch = _flatten(T, N, actions_batch)\n            if self.available_actions is not None:\n                available_actions_batch = _flatten(T, N, available_actions_batch)\n            else:\n                available_actions_batch = None\n            if self.factor is not None:\n                factor_batch=_flatten(T,N,factor_batch)\n            value_preds_batch = _flatten(T, N, value_preds_batch)\n            cost_preds_batch = _flatten(T, N, cost_preds_batch)\n            return_batch = _flatten(T, N, return_batch)\n            cost_return_batch = _flatten(T, N, cost_return_batch)\n            masks_batch = _flatten(T, N, masks_batch)\n            active_masks_batch = _flatten(T, N, active_masks_batch)\n            old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)\n            adv_targ = _flatten(T, N, adv_targ)\n            if cost_adv is not None:\n                cost_adv_targ = _flatten(T, N, cost_adv_targ)\n            if self.factor is not None:\n                if self.algo == \"mappo_lagr\":\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch, cost_preds_batch, cost_return_batch, rnn_states_cost_batch, cost_adv_targ  # 17 value\n                else:\n                    yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch, factor_batch  # value\n            else:\n                yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch\n\n     \n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/utils/shared_buffer.py",
    "content": "import torch\nimport numpy as np\nfrom mappo_lagrangian.utils.util import get_shape_from_obs_space, get_shape_from_act_space\n\n\ndef _flatten(T, N, x):\n    return x.reshape(T * N, *x.shape[2:])\n\n\ndef _cast(x):\n    return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:])\n\n\nclass SharedReplayBuffer(object):\n    \"\"\"\n    Buffer to store training data.\n    :param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.\n    :param num_agents: (int) number of agents in the env.\n    :param obs_space: (gym.Space) observation space of agents.\n    :param cent_obs_space: (gym.Space) centralized observation space of agents.\n    :param act_space: (gym.Space) action space for agents.\n    \"\"\"\n\n    def __init__(self, args, num_agents, obs_space, cent_obs_space, act_space):\n        self.episode_length = args.episode_length\n        self.n_rollout_threads = args.n_rollout_threads\n        self.hidden_size = args.hidden_size\n        self.recurrent_N = args.recurrent_N\n        self.gamma = args.gamma\n        self.gae_lambda = args.gae_lambda\n        self._use_gae = args.use_gae\n        self._use_popart = args.use_popart\n        self._use_proper_time_limits = args.use_proper_time_limits\n\n        obs_shape = get_shape_from_obs_space(obs_space)\n        share_obs_shape = get_shape_from_obs_space(cent_obs_space)\n        if not args.use_centralized_V:\n            share_obs_shape = obs_shape\n\n        if type(obs_shape[-1]) == list:\n            obs_shape = obs_shape[:1]\n\n        if type(share_obs_shape[-1]) == list:\n            share_obs_shape = share_obs_shape[:1]\n\n        self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape),\n                                  dtype=np.float32)\n        self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)\n\n        self.rnn_states = np.zeros(\n            (self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size),\n            dtype=np.float32)\n        self.rnn_states_critic = np.zeros_like(self.rnn_states)\n\n        self.value_preds = np.zeros(\n            (self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)\n        self.returns = np.zeros_like(self.value_preds)\n\n        if act_space.__class__.__name__ == 'Discrete':\n            self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n),\n                                             dtype=np.float32)\n        else:\n            self.available_actions = None\n\n        act_shape = get_shape_from_act_space(act_space)\n\n        self.actions = np.zeros(\n            (self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)\n        self.action_log_probs = np.zeros(\n            (self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)\n        self.rewards = np.zeros(\n            (self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)\n\n        self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)\n        self.bad_masks = np.ones_like(self.masks)\n        self.active_masks = np.ones_like(self.masks)\n\n        self.step = 0\n\n    def insert(self, share_obs, obs, rnn_states_actor, rnn_states_critic, actions, action_log_probs,\n               value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):\n        \"\"\"\n        Insert data into the buffer.\n        :param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.\n        :param obs: (np.ndarray) local agent observations.\n        :param rnn_states_actor: (np.ndarray) RNN states for actor network.\n        :param rnn_states_critic: (np.ndarray) RNN states for critic network.\n        :param actions:(np.ndarray) actions taken by agents.\n        :param action_log_probs:(np.ndarray) log probs of actions taken by agents\n        :param value_preds: (np.ndarray) value function prediction at each step.\n        :param rewards: (np.ndarray) reward collected at each step.\n        :param masks: (np.ndarray) denotes whether the environment has terminated or not.\n        :param bad_masks: (np.ndarray) action space for agents.\n        :param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.\n        :param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.\n        \"\"\"\n        self.share_obs[self.step + 1] = share_obs.copy()\n        self.obs[self.step + 1] = obs.copy()\n        self.rnn_states[self.step + 1] = rnn_states_actor.copy()\n        self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()\n        self.actions[self.step] = actions.copy()\n        self.action_log_probs[self.step] = action_log_probs.copy()\n        self.value_preds[self.step] = value_preds.copy()\n        self.rewards[self.step] = rewards.copy()\n        self.masks[self.step + 1] = masks.copy()\n        if bad_masks is not None:\n            self.bad_masks[self.step + 1] = bad_masks.copy()\n        if active_masks is not None:\n            self.active_masks[self.step + 1] = active_masks.copy()\n        if available_actions is not None:\n            self.available_actions[self.step + 1] = available_actions.copy()\n\n        self.step = (self.step + 1) % self.episode_length\n\n    def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,\n                     value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):\n        \"\"\"\n        Insert data into the buffer. This insert function is used specifically for Hanabi, which is turn based.\n        :param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.\n        :param obs: (np.ndarray) local agent observations.\n        :param rnn_states_actor: (np.ndarray) RNN states for actor network.\n        :param rnn_states_critic: (np.ndarray) RNN states for critic network.\n        :param actions:(np.ndarray) actions taken by agents.\n        :param action_log_probs:(np.ndarray) log probs of actions taken by agents\n        :param value_preds: (np.ndarray) value function prediction at each step.\n        :param rewards: (np.ndarray) reward collected at each step.\n        :param masks: (np.ndarray) denotes whether the environment has terminated or not.\n        :param bad_masks: (np.ndarray) denotes indicate whether whether true terminal state or due to episode limit\n        :param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.\n        :param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.\n        \"\"\"\n        self.share_obs[self.step] = share_obs.copy()\n        self.obs[self.step] = obs.copy()\n        self.rnn_states[self.step + 1] = rnn_states.copy()\n        self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()\n        self.actions[self.step] = actions.copy()\n        self.action_log_probs[self.step] = action_log_probs.copy()\n        self.value_preds[self.step] = value_preds.copy()\n        self.rewards[self.step] = rewards.copy()\n        self.masks[self.step + 1] = masks.copy()\n        if bad_masks is not None:\n            self.bad_masks[self.step + 1] = bad_masks.copy()\n        if active_masks is not None:\n            self.active_masks[self.step] = active_masks.copy()\n        if available_actions is not None:\n            self.available_actions[self.step] = available_actions.copy()\n\n        self.step = (self.step + 1) % self.episode_length\n\n    def after_update(self):\n        \"\"\"Copy last timestep data to first index. Called after update to model.\"\"\"\n        self.share_obs[0] = self.share_obs[-1].copy()\n        self.obs[0] = self.obs[-1].copy()\n        self.rnn_states[0] = self.rnn_states[-1].copy()\n        self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()\n        self.masks[0] = self.masks[-1].copy()\n        self.bad_masks[0] = self.bad_masks[-1].copy()\n        self.active_masks[0] = self.active_masks[-1].copy()\n        if self.available_actions is not None:\n            self.available_actions[0] = self.available_actions[-1].copy()\n\n    def chooseafter_update(self):\n        \"\"\"Copy last timestep data to first index. This method is used for Hanabi.\"\"\"\n        self.rnn_states[0] = self.rnn_states[-1].copy()\n        self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()\n        self.masks[0] = self.masks[-1].copy()\n        self.bad_masks[0] = self.bad_masks[-1].copy()\n\n    def compute_returns(self, next_value, value_normalizer=None):\n        \"\"\"\n        Compute returns either as discounted sum of rewards, or using GAE.\n        :param next_value: (np.ndarray) value predictions for the step after the last episode step.\n        :param value_normalizer: (PopArt) If not None, PopArt value normalizer instance.\n        \"\"\"\n        if self._use_gae:\n            self.value_preds[-1] = next_value\n            gae = 0\n            for step in reversed(range(self.rewards.shape[0])):\n                if self._use_popart:\n                    delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(\n                        self.value_preds[step + 1]) * self.masks[step + 1] \\\n                            - value_normalizer.denormalize(self.value_preds[step])\n                    gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                    self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])\n                else:\n                    delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \\\n                            self.value_preds[step]\n                    gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae\n                    self.returns[step] = gae + self.value_preds[step]\n        else:\n            self.returns[-1] = next_value\n            for step in reversed(range(self.rewards.shape[0])):\n                self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]\n\n    def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):\n        \"\"\"\n        Yield training data for MLP policies.\n        :param advantages: (np.ndarray) advantage estimates.\n        :param num_mini_batch: (int) number of minibatches to split the batch into.\n        :param mini_batch_size: (int) number of samples in each minibatch.\n        \"\"\"\n        episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]\n        batch_size = n_rollout_threads * episode_length * num_agents\n\n        if mini_batch_size is None:\n            assert batch_size >= num_mini_batch, (\n                \"PPO requires the number of processes ({}) \"\n                \"* number of steps ({}) * number of agents ({}) = {} \"\n                \"to be greater than or equal to the number of PPO mini batches ({}).\"\n                \"\".format(n_rollout_threads, episode_length, num_agents,\n                          n_rollout_threads * episode_length * num_agents,\n                          num_mini_batch))\n            mini_batch_size = batch_size // num_mini_batch\n\n        rand = torch.randperm(batch_size).numpy()\n        sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]\n\n        share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])\n        obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])\n        rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:])\n        rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:])\n        actions = self.actions.reshape(-1, self.actions.shape[-1])\n        if self.available_actions is not None:\n            available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])\n        value_preds = self.value_preds[:-1].reshape(-1, 1)\n        returns = self.returns[:-1].reshape(-1, 1)\n        masks = self.masks[:-1].reshape(-1, 1)\n        active_masks = self.active_masks[:-1].reshape(-1, 1)\n        action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])\n        advantages = advantages.reshape(-1, 1)\n\n        for indices in sampler:\n            # obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]\n            share_obs_batch = share_obs[indices]\n            obs_batch = obs[indices]\n            rnn_states_batch = rnn_states[indices]\n            rnn_states_critic_batch = rnn_states_critic[indices]\n            actions_batch = actions[indices]\n            if self.available_actions is not None:\n                available_actions_batch = available_actions[indices]\n            else:\n                available_actions_batch = None\n            value_preds_batch = value_preds[indices]\n            return_batch = returns[indices]\n            masks_batch = masks[indices]\n            active_masks_batch = active_masks[indices]\n            old_action_log_probs_batch = action_log_probs[indices]\n            if advantages is None:\n                adv_targ = None\n            else:\n                adv_targ = advantages[indices]\n\n            yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\\\n                  value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\\\n                  adv_targ, available_actions_batch\n\n    def naive_recurrent_generator(self, advantages, num_mini_batch):\n        \"\"\"\n        Yield training data for non-chunked RNN training.\n        :param advantages: (np.ndarray) advantage estimates.\n        :param num_mini_batch: (int) number of minibatches to split the batch into.\n        \"\"\"\n        episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]\n        batch_size = n_rollout_threads * num_agents\n        assert n_rollout_threads * num_agents >= num_mini_batch, (\n            \"PPO requires the number of processes ({})* number of agents ({}) \"\n            \"to be greater than or equal to the number of \"\n            \"PPO mini batches ({}).\".format(n_rollout_threads, num_agents, num_mini_batch))\n        num_envs_per_batch = batch_size // num_mini_batch\n        perm = torch.randperm(batch_size).numpy()\n\n        share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:])\n        obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])\n        rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:])\n        rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:])\n        actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])\n        if self.available_actions is not None:\n            available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1])\n        value_preds = self.value_preds.reshape(-1, batch_size, 1)\n        returns = self.returns.reshape(-1, batch_size, 1)\n        masks = self.masks.reshape(-1, batch_size, 1)\n        active_masks = self.active_masks.reshape(-1, batch_size, 1)\n        action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1])\n        advantages = advantages.reshape(-1, batch_size, 1)\n\n        for start_ind in range(0, batch_size, num_envs_per_batch):\n            share_obs_batch = []\n            obs_batch = []\n            rnn_states_batch = []\n            rnn_states_critic_batch = []\n            actions_batch = []\n            available_actions_batch = []\n            value_preds_batch = []\n            return_batch = []\n            masks_batch = []\n            active_masks_batch = []\n            old_action_log_probs_batch = []\n            adv_targ = []\n\n            for offset in range(num_envs_per_batch):\n                ind = perm[start_ind + offset]\n                share_obs_batch.append(share_obs[:-1, ind])\n                obs_batch.append(obs[:-1, ind])\n                rnn_states_batch.append(rnn_states[0:1, ind])\n                rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])\n                actions_batch.append(actions[:, ind])\n                if self.available_actions is not None:\n                    available_actions_batch.append(available_actions[:-1, ind])\n                value_preds_batch.append(value_preds[:-1, ind])\n                return_batch.append(returns[:-1, ind])\n                masks_batch.append(masks[:-1, ind])\n                active_masks_batch.append(active_masks[:-1, ind])\n                old_action_log_probs_batch.append(action_log_probs[:, ind])\n                adv_targ.append(advantages[:, ind])\n\n            # [N[T, dim]]\n            T, N = self.episode_length, num_envs_per_batch\n            # These are all from_numpys of size (T, N, -1)\n            share_obs_batch = np.stack(share_obs_batch, 1)\n            obs_batch = np.stack(obs_batch, 1)\n            actions_batch = np.stack(actions_batch, 1)\n            if self.available_actions is not None:\n                available_actions_batch = np.stack(available_actions_batch, 1)\n            value_preds_batch = np.stack(value_preds_batch, 1)\n            return_batch = np.stack(return_batch, 1)\n            masks_batch = np.stack(masks_batch, 1)\n            active_masks_batch = np.stack(active_masks_batch, 1)\n            old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)\n            adv_targ = np.stack(adv_targ, 1)\n\n            # States is just a (N, dim) from_numpy [N[1,dim]]\n            rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])\n            rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])\n\n            # Flatten the (T, N, ...) from_numpys to (T * N, ...)\n            share_obs_batch = _flatten(T, N, share_obs_batch)\n            obs_batch = _flatten(T, N, obs_batch)\n            actions_batch = _flatten(T, N, actions_batch)\n            if self.available_actions is not None:\n                available_actions_batch = _flatten(T, N, available_actions_batch)\n            else:\n                available_actions_batch = None\n            value_preds_batch = _flatten(T, N, value_preds_batch)\n            return_batch = _flatten(T, N, return_batch)\n            masks_batch = _flatten(T, N, masks_batch)\n            active_masks_batch = _flatten(T, N, active_masks_batch)\n            old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)\n            adv_targ = _flatten(T, N, adv_targ)\n\n            yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\\\n                  value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\\\n                  adv_targ, available_actions_batch\n\n    def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):\n        \"\"\"\n        Yield training data for chunked RNN training.\n        :param advantages: (np.ndarray) advantage estimates.\n        :param num_mini_batch: (int) number of minibatches to split the batch into.\n        :param data_chunk_length: (int) length of sequence chunks with which to train RNN.\n        \"\"\"\n        episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]\n        batch_size = n_rollout_threads * episode_length * num_agents\n        data_chunks = batch_size // data_chunk_length  # [C=r*T*M/L]\n        mini_batch_size = data_chunks // num_mini_batch\n\n        rand = torch.randperm(data_chunks).numpy()\n        sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]\n\n        if len(self.share_obs.shape) > 4:\n            share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])\n            obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:])\n        else:\n            share_obs = _cast(self.share_obs[:-1])\n            obs = _cast(self.obs[:-1])\n\n        actions = _cast(self.actions)\n        action_log_probs = _cast(self.action_log_probs)\n        advantages = _cast(advantages)\n        value_preds = _cast(self.value_preds[:-1])\n        returns = _cast(self.returns[:-1])\n        masks = _cast(self.masks[:-1])\n        active_masks = _cast(self.active_masks[:-1])\n        # rnn_states = _cast(self.rnn_states[:-1])\n        # rnn_states_critic = _cast(self.rnn_states_critic[:-1])\n        rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:])\n        rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1,\n                                                                                         *self.rnn_states_critic.shape[\n                                                                                          3:])\n\n        if self.available_actions is not None:\n            available_actions = _cast(self.available_actions[:-1])\n\n        for indices in sampler:\n            share_obs_batch = []\n            obs_batch = []\n            rnn_states_batch = []\n            rnn_states_critic_batch = []\n            actions_batch = []\n            available_actions_batch = []\n            value_preds_batch = []\n            return_batch = []\n            masks_batch = []\n            active_masks_batch = []\n            old_action_log_probs_batch = []\n            adv_targ = []\n\n            for index in indices:\n\n                ind = index * data_chunk_length\n                # size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]\n                share_obs_batch.append(share_obs[ind:ind + data_chunk_length])\n                obs_batch.append(obs[ind:ind + data_chunk_length])\n                actions_batch.append(actions[ind:ind + data_chunk_length])\n                if self.available_actions is not None:\n                    available_actions_batch.append(available_actions[ind:ind + data_chunk_length])\n                value_preds_batch.append(value_preds[ind:ind + data_chunk_length])\n                return_batch.append(returns[ind:ind + data_chunk_length])\n                masks_batch.append(masks[ind:ind + data_chunk_length])\n                active_masks_batch.append(active_masks[ind:ind + data_chunk_length])\n                old_action_log_probs_batch.append(action_log_probs[ind:ind + data_chunk_length])\n                adv_targ.append(advantages[ind:ind + data_chunk_length])\n                # size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]\n                rnn_states_batch.append(rnn_states[ind])\n                rnn_states_critic_batch.append(rnn_states_critic[ind])\n\n            L, N = data_chunk_length, mini_batch_size\n\n            # These are all from_numpys of size (L, N, Dim)           \n            share_obs_batch = np.stack(share_obs_batch, axis=1)\n            obs_batch = np.stack(obs_batch, axis=1)\n\n            actions_batch = np.stack(actions_batch, axis=1)\n            if self.available_actions is not None:\n                available_actions_batch = np.stack(available_actions_batch, axis=1)\n            value_preds_batch = np.stack(value_preds_batch, axis=1)\n            return_batch = np.stack(return_batch, axis=1)\n            masks_batch = np.stack(masks_batch, axis=1)\n            active_masks_batch = np.stack(active_masks_batch, axis=1)\n            old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1)\n            adv_targ = np.stack(adv_targ, axis=1)\n\n            # States is just a (N, -1) from_numpy\n            rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])\n            rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])\n\n            # Flatten the (L, N, ...) from_numpys to (L * N, ...)\n            share_obs_batch = _flatten(L, N, share_obs_batch)\n            obs_batch = _flatten(L, N, obs_batch)\n            actions_batch = _flatten(L, N, actions_batch)\n            if self.available_actions is not None:\n                available_actions_batch = _flatten(L, N, available_actions_batch)\n            else:\n                available_actions_batch = None\n            value_preds_batch = _flatten(L, N, value_preds_batch)\n            return_batch = _flatten(L, N, return_batch)\n            masks_batch = _flatten(L, N, masks_batch)\n            active_masks_batch = _flatten(L, N, active_masks_batch)\n            old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)\n            adv_targ = _flatten(L, N, adv_targ)\n\n            yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\\\n                  value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\\\n                  adv_targ, available_actions_batch\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian/utils/util.py",
    "content": "import numpy as np\nimport math\nimport torch\n\ndef check(input):\n    if type(input) == np.ndarray:\n        return torch.from_numpy(input)\n        \ndef get_gard_norm(it):\n    sum_grad = 0\n    for x in it:\n        if x.grad is None:\n            continue\n        sum_grad += x.grad.norm() ** 2\n    return math.sqrt(sum_grad)\n\ndef update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):\n    \"\"\"Decreases the learning rate linearly\"\"\"\n    lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))\n    for param_group in optimizer.param_groups:\n        param_group['lr'] = lr\n\ndef huber_loss(e, d):\n    a = (abs(e) <= d).float()\n    b = (e > d).float()\n    return a*e**2/2 + b*d*(abs(e)-d/2)\n\ndef mse_loss(e):\n    return e**2/2\n\ndef get_shape_from_obs_space(obs_space):\n    if obs_space.__class__.__name__ == 'Box':\n        obs_shape = obs_space.shape\n    elif obs_space.__class__.__name__ == 'list':\n        obs_shape = obs_space\n    else:\n        raise NotImplementedError\n    return obs_shape\n\ndef get_shape_from_act_space(act_space):\n    if act_space.__class__.__name__ == 'Discrete':\n        act_shape = 1\n    elif act_space.__class__.__name__ == \"MultiDiscrete\":\n        act_shape = act_space.shape\n    elif act_space.__class__.__name__ == \"Box\":\n        act_shape = act_space.shape[0]\n    elif act_space.__class__.__name__ == \"MultiBinary\":\n        act_shape = act_space.shape[0]\n    else:  # agar\n        act_shape = act_space[0].shape[0] + 1  \n    return act_shape\n\n\ndef tile_images(img_nhwc):\n    \"\"\"\n    Tile N images into one big PxQ image\n    (P,Q) are chosen to be as close as possible, and if N\n    is square, then P=Q.\n    input: img_nhwc, list or array of images, ndim=4 once turned into array\n        n = batch index, h = height, w = width, c = channel\n    returns:\n        bigim_HWc, ndarray with ndim=3\n    \"\"\"\n    img_nhwc = np.asarray(img_nhwc)\n    N, h, w, c = img_nhwc.shape\n    H = int(np.ceil(np.sqrt(N)))\n    W = int(np.ceil(float(N)/H))\n    img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])\n    img_HWhwc = img_nhwc.reshape(H, W, h, w, c)\n    img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)\n    img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)\n    return img_Hh_Ww_c"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian.egg-info/PKG-INFO",
    "content": "Metadata-Version: 2.1\nName: mappo-lagrangian\nVersion: 0.1.0\nSummary: mappo_lagrangian algorithms of marlbenchmark\nHome-page: UNKNOWN\nAuthor: marl\nAuthor-email: marl@gmail.com\nLicense: UNKNOWN\nDescription: # MAPPO\n        \n        Chao Yu*, Akash Velu*, Eugene Vinitsky, Yu Wang, Alexandre Bayen, and Yi Wu. \n        \n        Website: https://sites.google.com/view/mappo\n        \n        This repository implements MAPPO, a multi-agent variant of PPO. The implementation in this repositorory is used in the paper \"The Surprising Effectiveness of MAPPO in Cooperative Multi-Agent Games\" (https://arxiv.org/abs/2103.01955). \n        This repository is heavily based on https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail. \n        \n        ## Environments supported:\n        \n        - [StarCraftII (SMAC)](https://github.com/oxwhirl/smac)\n        - [Hanabi](https://github.com/deepmind/hanabi-learning-environment)\n        - [Multiagent Particle-World Environments (MPEs)](https://github.com/openai/multiagent-particle-envs)\n        \n        ## 1. Usage\n        All core code is located within the onpolicy folder. The algorithms/ subfolder contains algorithm-specific code\n        for MAPPO. \n        \n        * The envs/ subfolder contains environment wrapper implementations for the MPEs, SMAC, and Hanabi. \n        \n        * Code to perform training rollouts and policy updates are contained within the runner/ folder - there is a runner for \n        each environment. \n        \n        * Executable scripts for training with default hyperparameters can be found in the scripts/ folder. The files are named\n        in the following manner: train_algo_environment.sh. Within each file, the map name (in the case of SMAC and the MPEs) can be altered. \n        * Python training scripts for each environment can be found in the scripts/train/ folder. \n        \n        * The config.py file contains relevant hyperparameter and env settings. Most hyperparameters are defaulted to the ones\n        used in the paper; however, please refer to the appendix for a full list of hyperparameters used. \n        \n        \n        ## 2. Installation\n        \n         Here we give an example installation on CUDA == 10.1. For non-GPU & other CUDA version installation, please refer to the [PyTorch website](https://pytorch.org/get-started/locally/).\n        \n        ``` Bash\n        # create conda environment\n        conda create -n marl python==3.6.1\n        conda activate marl\n        pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html\n        ```\n        \n        ```\n        # install on-policy package\n        cd on-policy\n        pip install -e .\n        ```\n        \n        Even though we provide requirement.txt, it may have redundancy. We recommend that the user try to install other required packages by running the code and finding which required package hasn't installed yet.\n        \n        ### 2.1 Install StarCraftII [4.10](http://blzdistsc2-a.akamaihd.net/Linux/SC2.4.10.zip)\n        \n           \n        \n        ``` Bash\n        unzip SC2.4.10.zip\n        # password is iagreetotheeula\n        echo \"export SC2PATH=~/StarCraftII/\" > ~/.bashrc\n        ```\n        \n        * download SMAC Maps, and move it to `~/StarCraftII/Maps/`.\n        \n        * To use a stableid, copy `stableid.json` from https://github.com/Blizzard/s2client-proto.git to `~/StarCraftII/`.\n        \n        \n        ### 2.2 Hanabi\n        Environment code for Hanabi is developed from the open-source environment code, but has been slightly modified to fit the algorithms used here.  \n        To install, execute the following:\n        ``` Bash\n        pip install cffi\n        cd envs/hanabi\n        mkdir build & cd build\n        cmake ..\n        make -j\n        ```\n        \n        \n        ### 2.3 Install MPE\n        \n        ``` Bash\n        # install this package first\n        pip install seaborn\n        ```\n        \n        There are 3 Cooperative scenarios in MPE:\n        \n        * simple_spread\n        * simple_speaker_listener, which is 'Comm' scenario in paper\n        * simple_reference\n        \n        ## 3.Train\n        Here we use train_mpe.sh as an example:\n        ```\n        cd onpolicy/scripts\n        chmod +x ./train_mpe.sh\n        ./train_mpe.sh\n        ```\n        Local results are stored in subfold scripts/results. Note that we use Weights & Bias as the default visualization platform; to use Weights & Bias, please register and login to the platform first. More instructions for using Weights&Bias can be found in the official [documentation](https://docs.wandb.ai/). Adding the `--use_wandb` in command line or in the .sh file will use Tensorboard instead of Weights & Biases. \n        \n        We additionally provide `./eval_hanabi_forward.sh` for evaluating the hanabi score over 100k trials. \n        \n        ## 4. Publication\n        \n        If you find this repository useful, please cite our [paper](https://arxiv.org/abs/2103.01955):\n        ```\n        @misc{yu2021surprising,\n              title={The Surprising Effectiveness of MAPPO in Cooperative Multi-Agent Games}, \n              author={Chao Yu and Akash Velu and Eugene Vinitsky and Yu Wang and Alexandre Bayen and Yi Wu},\n              year={2021},\n              eprint={2103.01955},\n              archivePrefix={arXiv},\n              primaryClass={cs.LG}\n        }\n        ```\n        \n        \nKeywords: multi-agent reinforcement learning platform pytorch\nPlatform: UNKNOWN\nClassifier: Development Status :: 3 - Alpha\nClassifier: Intended Audience :: Science/Research\nClassifier: Topic :: Scientific/Engineering :: Artificial Intelligence\nClassifier: Topic :: Software Development :: Libraries :: Python Modules\nClassifier: Programming Language :: Python :: 3\nClassifier: License :: OSI Approved :: MIT License\nClassifier: Operating System :: OS Independent\nRequires-Python: >=3.6\nDescription-Content-Type: text/markdown\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian.egg-info/SOURCES.txt",
    "content": "README.md\nsetup.py\nmappo_lagrangian/__init__.py\nmappo_lagrangian/config.py\nmappo_lagrangian.egg-info/PKG-INFO\nmappo_lagrangian.egg-info/SOURCES.txt\nmappo_lagrangian.egg-info/dependency_links.txt\nmappo_lagrangian.egg-info/top_level.txt\nmappo_lagrangian/algorithms/__init__.py\nmappo_lagrangian/algorithms/r_mappo/__init__.py\nmappo_lagrangian/algorithms/r_mappo/r_mappo_lagr.py\nmappo_lagrangian/envs/__init__.py\nmappo_lagrangian/envs/env_wrappers.py\nmappo_lagrangian/envs/safety_ma_mujoco/__init__.py\nmappo_lagrangian/envs/safety_ma_mujoco/test.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/__init__.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/ant.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/coupled_half_cheetah.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/half_cheetah.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/hopper.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/humanoid.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_ant.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/manyagent_swimmer.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_env.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/mujoco_multi.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/multiagentenv.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/obsk.py\nmappo_lagrangian/envs/safety_ma_mujoco/safety_multiagent_mujoco/assets/__init__.py\nmappo_lagrangian/runner/__init__.py\nmappo_lagrangian/runner/separated/__init__.py\nmappo_lagrangian/runner/separated/base_runner.py\nmappo_lagrangian/runner/separated/base_runner_mappo_lagr.py\nmappo_lagrangian/runner/separated/mujoco_runner.py\nmappo_lagrangian/runner/separated/mujoco_runner_mappo_lagr.py\nmappo_lagrangian/scripts/__init__.py\nmappo_lagrangian/scripts/train/__init__.py\nmappo_lagrangian/scripts/train/train_mujoco.py\nmappo_lagrangian/utils/__init__.py\nmappo_lagrangian/utils/multi_discrete.py\nmappo_lagrangian/utils/popart.py\nmappo_lagrangian/utils/separated_buffer.py\nmappo_lagrangian/utils/shared_buffer.py\nmappo_lagrangian/utils/util.py"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian.egg-info/dependency_links.txt",
    "content": "\n"
  },
  {
    "path": "MAPPO-Lagrangian/mappo_lagrangian.egg-info/top_level.txt",
    "content": "mappo_lagrangian\n"
  },
  {
    "path": "MAPPO-Lagrangian/setup.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom setuptools import setup, find_packages\nimport setuptools\n\ndef get_version() -> str:\n    # https://packaging.python.org/guides/single-sourcing-package-version/\n    init = open(os.path.join(\"mappo_lagrangian\", \"__init__.py\"), \"r\").read().split()\n    return init[init.index(\"__version__\") + 2][1:-1]\n\nsetup(\n    name=\"mappo_lagrangian\",  # Replace with your own username\n    version=get_version(),\n    description=\"mappo_lagrangian algorithms of marlbenchmark\",\n    # long_description=open(\"README.md\", encoding=\"utf8\").read(),\n    long_description_content_type=\"text/markdown\",\n    author=\"marl\",\n    author_email=\"marl@gmail.com\",\n    packages=setuptools.find_packages(),\n    classifiers=[\n        \"Development Status :: 3 - Alpha\",\n        \"Intended Audience :: Science/Research\",\n        \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n        \"Topic :: Software Development :: Libraries :: Python Modules\",\n        \"Programming Language :: Python :: 3\",\n        \"License :: OSI Approved :: MIT License\",\n        \"Operating System :: OS Independent\",\n    ],\n    keywords=\"multi-agent reinforcement learning platform pytorch\",\n    python_requires='>=3.6',\n)\n"
  },
  {
    "path": "README.md",
    "content": "# Multi-Agent Constrained Policy Optimisation (MACPO)\r\n\r\nThe repository is for the paper: **[Multi-Agent Constrained Policy Optimisation](http://arxiv.org/abs/2110.02793)**, in which we investigate the problem of safe MARL. The problem of safe multi-agent learning with safety constraints has not been rigorously studied; very few solutions have been proposed, nor a sharable testing environment or benchmarks.   To fill these gaps, in this work, we formulate the safe multi-agent reinforcement learning problem as a constrained Markov game and solve it with trust region methods. Our solutions---*Multi-Agent Constrained Policy Optimisation (MACPO)* and *MAPPO-Lagrangian*---leverage on the theory of  *Constrained Policy Optimisation (CPO)* and multi-agent trust region learning, and critically, they enjoy theoretical guarantees of  both  monotonic improvement in reward and satisfaction of safety constraints  at every iteration. Experimental results reveal that  *MACPO/MAPPO-Lagrangian* significantly outperform baselines in terms of balancing the performance and constraint satisfaction, e.g. [MAPPO](https://arxiv.org/abs/2103.01955), [IPPO](https://arxiv.org/abs/2011.09533), [HAPPO](https://arxiv.org/abs/2109.11251).\r\n\r\n\r\n\r\n## Environments Supported:\r\n\r\n- [Safety Multi-Agent Mujoco](https://github.com/chauncygu/Safe-Multi-Agent-Mujoco)\r\n\r\n\r\n\r\n\r\n## 1. Installation\r\n\r\n####  1.1 Create Environment\r\n\r\n``` Bash\r\n# create conda environment\r\nconda create -n macpo python==3.7\r\nconda activate macpo\r\npip install -r requirements.txt\r\nconda install pytorch torchvision torchaudio cudatoolkit=11.1 -c pytorch -c nvidia\r\n```\r\n\r\n```\r\ncd MACPO/macpo (for the macpo algorithm) or cd MAPPO-Lagrangian/mappo_lagrangian (for the mappo_lagrangian algorithm)\r\npip install -e .\r\n```\r\n\r\n\r\n\r\n#### 1.2 Install Safety Multi-Agent Mujoco\r\n\r\n\r\n- Install mujoco accoring to [mujoco-py](https://github.com/openai/mujoco-py) and [MuJoCo website](https://www.roboti.us/license.html).\r\n- clone [Safety Multi-Agent Mujoco](https://github.com/chauncygu/Safe-Multi-Agent-Mujoco) to the env path (in this repository, have set the path).\r\n\r\n``` Bash\r\nLD_LIBRARY_PATH=${HOME}/.mujoco/mujoco200/bin;\r\nLD_PRELOAD=/usr/lib/x86_64-linux-gnu/libGLEW.so\r\n```\r\n\r\n\r\n\r\n## 2. Train\r\n\r\n```\r\ncd MACPO/macpo/scripts or cd MAPPO-Lagrangian/mappo_lagrangian/scripts\r\nchmod +x ./train_mujoco.sh\r\n./train_mujoco.sh\r\n```\r\n\r\n\r\n## 3. Results\r\n\r\n<div align=center>\r\n<img src=\"https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/wall1_manyagent_ant.png\" width=\"850\"/>    \r\n<img src=\"https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/New_Ant_results.png\" width=\"850\"/> \r\n<img src=\"https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/New_HalfCheetah_results.png\" width=\"850\"/>\r\n</div>\r\n    \r\n<div align=center>\r\n<center style=\"color:#000000;text-decoration:underline\">\r\n    Performance comparisons on tasks of Safe ManyAgent Ant, Safe Ant, and Safe HalfCheetah in terms of cost (the first row) and reward (the second row). The  safety constraint  values are: 1 for ManyAgent Ant, 0.2 for Ant, and 5 for HalfCheetah. Our methods consistently achieve almost zero costs, thus satisfying safe constraints,  on all tasks. In terms of reward, our methods outperform <a href=\"https://arxiv.org/abs/2011.09533)\">IPPO</a> and <a href=\"https://arxiv.org/abs/2103.01955\">MAPPO</a> on some tasks  but  underperform  <a href=\"https://arxiv.org/abs/2109.11251\">HAPPO</a>, which  is also an unsafe algorithm.</center>\r\n</div>\r\n\r\n\r\n## 4. Demos\r\n\r\n<!--<div align=center>\r\n<img src=\"https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111071600-unsafe-end%2000_00_00-00_00_30.gif\" width=\"700\"/>    \r\n</div>\r\n    \r\n<div align=center>\r\n<center style=\"color:#000000;text-decoration:underline\">\r\n    A demo denotes <b>unsafe</b> performance using <a href=\"https://arxiv.org/abs/2109.11251\">HAPPO</a> on Ant2x4 task.</center>\r\n</div>\r\n\r\n&nbsp;\r\n<div align=center>\r\n<img src=\"https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111140948-safe-end1%2000_00_00-00_00_30.gif\" width=\"700\"/>    \r\n</div>\r\n    \r\n<div align=center>\r\n<center style=\"color:#000000;text-decoration:underline\">\r\n    A demo denotes <b>safe</b> performance using <a href=\"http://arxiv.org/abs/2110.02793\">MAPPO-Lagrangian</a> on Ant2x4 task.</center>\r\n</div>-->\r\n\r\n**Ant Task**: the width of the corridor set by two walls is 10 m. The environment emits the cost of 1 for an agent, if the distance between the robot and the wall is less than 1.8 m, or when the robot topples over.\r\n\r\n|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111071600-unsafe-end%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111140948-safe-end1%2000_00_00-00_00_30.gif)|\r\n| :---: | :---: | \r\n|  A demo denotes <b>unsafe</b> performance using <a href=\"https://arxiv.org/abs/2109.11251\">HAPPO</a> on Ant-2x4 task.</center> | A demo denotes <b>safe</b> performance using <a href=\"http://arxiv.org/abs/2110.02793\">MAPPO-Lagrangian</a> on Ant-2x4 task.</center> | \r\n\r\n\r\n**HalfCheetah Task**: In the task, the agents move inside a corridor (which constraints their movement, but does not induce costs). Together with them, there are bombs moving inside the corridor. If an agent finds itself too close to the bomb, the distance between an agent and the bomb is less than 9m, a cost of 1 will be emitted, at the same time, the bomb will turn blood red.\r\n\r\n|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111140948-halfcheetah-unsafe-end%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/202111140948-halfcheetah-safe-end%2000_00_00-00_00_30.gif)|\r\n| :---: | :---: | \r\n|  A demo denotes <b>unsafe</b> performance using <a href=\"https://arxiv.org/abs/2109.11251\">HAPPO</a> on HalfCheetah-2x3 task.</center> | A demo denotes <b>safe</b> performance using <a href=\"http://arxiv.org/abs/2110.02793\">MAPPO-Lagrangian</a> on HalfCheetah-2x3 task.</center> | \r\n\r\n\r\n**ManyAgent Ant Task One**: In the ManyAgent Ant task, the width of the corridor set by two walls is 9m. The environment emits the cost of 1 for an agent, if the distance between the robot and the wall is less than 1.8 m, or when the robot topples over. \r\n\r\n|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/sadppo-manyagent-ant--unsafe-end-have-word%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/safe-mappo-manyagent-ant--safe-end-have-word-01%2000_00_00-00_00_30.gif)|\r\n| :---: | :---: | \r\n|  A demo denotes <b>unsafe</b> performance using <a href=\"https://arxiv.org/abs/2109.11251\">HAPPO</a> on ManyAgent Ant-2x3 task.</center> | A demo denotes <b>safe</b> performance using <a href=\"http://arxiv.org/abs/2110.02793\">MAPPO-Lagrangian</a> on ManyAgent Ant-2x3 task.</center> | \r\n\r\n\r\n**ManyAgent Ant Task Two**: In the ManyAgent Ant task, the width of the corridor is 12 m; its walls fold at the angle of 30 degrees. The environment emits the cost of 1 for an agent, if the distance between the robot and the wall is less than 1.8 m, or when the robot topples over.\r\n\r\n|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/two-wall-sadppo-unsafe-end-have-word-manyagent-ant%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/two-wall-safe-mappo--manyagent-ant-safe-end-have-word-manyagent-ant%2000_00_00-00_00_30.gif)|![](https://github.com/chauncygu/Multi-Agent-Constrained-Policy-Optimisation/blob/main/figures/two-wall-macpo-manyagent-ant--safe-end-have-word%2000_00_00-00_00_30.gif)|\r\n| :---: | :---: | :---: | \r\n|  A demo denotes <b>unsafe</b> performance using <a href=\"https://arxiv.org/abs/2109.11251\">HAPPO</a> on ManyAgent Ant-2x3 task.</center> | A demo denotes <b>unsafe</b> performance using <a href=\"http://arxiv.org/abs/2110.02793\">MAPPO-Lagrangian</a> on ManyAgent Ant-2x3 task.</center> | A demo denotes <b>safe</b> performance using <a href=\"http://arxiv.org/abs/2110.02793\">MACPO</a> on ManyAgent Ant-2x3 task.</center> | \r\n\r\n\r\n## 5. Publication\r\nIf you find the repository useful, please cite the [paper](https://arxiv.org/abs/2110.02793):\r\n```\r\n@article{gu2023safe,\r\n  title={Safe Multi-Agent Reinforcement Learning for Multi-Robot Control},\r\n  author={Gu, Shangding and Kuba, Jakub Grudzien and Chen, Yuanpei and Du, Yali and Yang, Long and Knoll, Alois and Yang, Yaodong},\r\n  journal={Artificial Intelligence},\r\n  pages={103905},\r\n  year={2023},\r\n  publisher={Elsevier}\r\n}\r\n\r\n\r\n```\r\n\r\n\r\n## Acknowledgments\r\n\r\nWe thank the list of contributors from the following open source repositories: [MAPPO](https://github.com/marlbenchmark/on-policy), [HAPPO](https://github.com/cyanrain7/Trust-Region-Policy-Optimisation-in-Multi-Agent-Reinforcement-Learning), [safety-starter-agents](https://github.com/openai/safety-starter-agents), [CMBPO](https://github.com/anyboby/Constrained-Model-Based-Policy-Optimization).\r\n\r\n\r\n\r\n\r\n\r\n"
  },
  {
    "path": "environment.yaml",
    "content": "name: marl\nchannels:\n  - defaults\ndependencies:\n  - _libgcc_mutex=0.1=main\n  - _tflow_select=2.1.0=gpu\n  - absl-py=0.9.0=py36_0\n  - astor=0.8.0=py36_0\n  - blas=1.0=mkl\n  - c-ares=1.15.0=h7b6447c_1001\n  - ca-certificates=2020.1.1=0\n  - certifi=2020.4.5.2=py36_0\n  - cudatoolkit=10.0.130=0\n  - cudnn=7.6.5=cuda10.0_0\n  - cupti=10.0.130=0\n  - gast=0.2.2=py36_0\n  - google-pasta=0.2.0=py_0\n  - grpcio=1.14.1=py36h9ba97e2_0\n  - h5py=2.10.0=py36h7918eee_0\n  - hdf5=1.10.4=hb1b8bf9_0\n  - intel-openmp=2020.1=217\n  - keras-applications=1.0.8=py_0\n  - keras-preprocessing=1.1.0=py_1\n  - libedit=3.1=heed3624_0\n  - libffi=3.2.1=hd88cf55_4\n  - libgcc-ng=9.1.0=hdf63c60_0\n  - libgfortran-ng=7.3.0=hdf63c60_0\n  - libprotobuf=3.12.3=hd408876_0\n  - libstdcxx-ng=9.1.0=hdf63c60_0\n  - markdown=3.1.1=py36_0\n  - mkl=2020.1=217\n  - mkl-service=2.3.0=py36he904b0f_0\n  - mkl_fft=1.1.0=py36h23d657b_0\n  - mkl_random=1.1.1=py36h0573a6f_0\n  - ncurses=6.0=h9df7e31_2\n  - numpy=1.18.1=py36h4f9e942_0\n  - numpy-base=1.18.1=py36hde5b4d6_1\n  - openssl=1.0.2u=h7b6447c_0\n  - opt_einsum=3.1.0=py_0\n  - pip=20.1.1=py36_1\n  - protobuf=3.12.3=py36he6710b0_0\n  - python=3.6.2=hca45abc_19\n  - readline=7.0=ha6073c6_4\n  - scipy=1.4.1=py36h0b6359f_0\n  - setuptools=47.3.0=py36_0\n  - six=1.15.0=py_0\n  - sqlite=3.23.1=he433501_0\n  - tensorboard=2.0.0=pyhb38c66f_1\n  - tensorflow=2.0.0=gpu_py36h6b29c10_0\n  - tensorflow-base=2.0.0=gpu_py36h0ec5d1f_0\n  - tensorflow-estimator=2.0.0=pyh2649769_0\n  - tensorflow-gpu=2.0.0=h0d30ee6_0\n  - termcolor=1.1.0=py36_1\n  - tk=8.6.8=hbc83047_0\n  - werkzeug=0.16.1=py_0\n  - wheel=0.34.2=py36_0\n  - wrapt=1.12.1=py36h7b6447c_1\n  - xz=5.2.5=h7b6447c_0\n  - zlib=1.2.11=h7b6447c_3\n  - pip:\n      - aiohttp==3.6.2\n      - aioredis==1.3.1\n      - astunparse==1.6.3\n      - async-timeout==3.0.1\n      - atari-py==0.2.6\n      - atomicwrites==1.2.1\n      - attrs==18.2.0\n      - beautifulsoup4==4.9.1\n      - blessings==1.7\n      - cachetools==4.1.1\n      - cffi==1.14.1\n      - chardet==3.0.4\n      - click==7.1.2\n      - cloudpickle==1.3.0\n      - colorama==0.4.3\n      - colorful==0.5.4\n      - configparser==5.0.1\n      - contextvars==2.4\n      - cycler==0.10.0\n      - cython==0.29.21\n      - deepdiff==4.3.2\n      - dill==0.3.2\n      - docker-pycreds==0.4.0\n      - docopt==0.6.2\n      - fasteners==0.15\n      - filelock==3.0.12\n      - funcsigs==1.0.2\n      - future==0.16.0\n      - gin==0.1.6\n      - gin-config==0.3.0\n      - gitdb==4.0.5\n      - gitpython==3.1.9\n      - glfw==1.12.0\n      - google==3.0.0\n      - google-api-core==1.22.1\n      - google-auth==1.21.0\n      - google-auth-oauthlib==0.4.1\n      - googleapis-common-protos==1.52.0\n      - gpustat==0.6.0\n      - gql==0.2.0\n      - graphql-core==1.1\n      - gym==0.17.2\n      - hiredis==1.1.0\n      - idna==2.7\n      - idna-ssl==1.1.0\n      - imageio==2.4.1\n      - immutables==0.14\n      - importlib-metadata==1.7.0\n      - joblib==0.16.0\n      - jsonnet==0.16.0\n      - jsonpickle==0.9.6\n      - jsonschema==3.2.0\n      - kiwisolver==1.0.1\n      - lockfile==0.12.2\n      - mappo==0.0.1\n      - matplotlib==3.0.0\n      - mock==2.0.0\n      - monotonic==1.5\n      - more-itertools==4.3.0\n      - mpi4py==3.0.3\n      - mpyq==0.2.5\n      - msgpack==1.0.0\n      - mujoco-py==2.0.2.13\n      - mujoco-worldgen==0.0.0\n      - multidict==4.7.6\n      - munch==2.3.2\n      - nvidia-ml-py3==7.352.0\n      - oauthlib==3.1.0\n      - opencensus==0.7.10\n      - opencensus-context==0.1.1\n      - opencv-python==4.2.0.34\n      - ordered-set==4.0.2\n      - packaging==20.4\n      - pandas==1.1.1\n      - pathlib2==2.3.2\n      - pathtools==0.1.2\n      - pbr==4.3.0\n      - pillow==5.3.0\n      - pluggy==0.7.1\n      - portpicker==1.2.0\n      - probscale==0.2.3\n      - progressbar2==3.53.1\n      - prometheus-client==0.8.0\n      - promise==2.3\n      - psutil==5.7.2\n      - py==1.6.0\n      - py-spy==0.3.3\n      - pyasn1==0.4.8\n      - pyasn1-modules==0.2.8\n      - pycparser==2.20\n      - pygame==1.9.4\n      - pyglet==1.5.0\n      - pyopengl==3.1.5\n      - pyopengl-accelerate==3.1.5\n      - pyparsing==2.2.2\n      - pyrsistent==0.16.0\n      - pysc2==3.0.0\n      - pytest==3.8.2\n      - python-dateutil==2.7.3\n      - python-utils==2.4.0\n      - pytz==2020.1\n      - pyyaml==3.13\n      - pyzmq==19.0.2\n      - ray==0.8.0\n      - redis==3.4.1\n      - requests==2.24.0\n      - requests-oauthlib==1.3.0\n      - rsa==4.6\n      - s2clientprotocol==4.10.1.75800.0\n      - s2protocol==4.11.4.78285.0\n      - sacred==0.7.2\n      - seaborn==0.10.1\n      - sentry-sdk==0.18.0\n      - shortuuid==1.0.1\n      - sk-video==1.1.10\n      - smmap==3.0.4\n      - snakeviz==1.0.0\n      - soupsieve==2.0.1\n      - subprocess32==3.5.4\n      - tabulate==0.8.7\n      - tensorboard-logger==0.1.0\n      - tensorboard-plugin-wit==1.7.0\n      - tensorboardx==2.0\n      - torch==1.5.1+cu101\n      - torchvision==0.6.1+cu101\n      - tornado==5.1.1\n      - tqdm==4.48.2\n      - typing-extensions==3.7.4.3\n      - urllib3==1.23\n      - wandb==0.10.5\n      - watchdog==0.10.3\n      - websocket-client==0.53.0\n      - whichcraft==0.5.2\n      - xmltodict==0.12.0\n      - yarl==1.5.1\n      - zipp==3.1.0\n      - zmq==0.0.0\n"
  },
  {
    "path": "requirements.txt",
    "content": "absl-py==0.9.0\naiohttp==3.6.2\naioredis==1.3.1\nastor==0.8.0\nastunparse==1.6.3\nasync-timeout==3.0.1\natari-py==0.2.6\natomicwrites==1.2.1\nattrs==18.2.0\nbeautifulsoup4==4.9.1\nblessings==1.7\ncachetools==4.1.1\ncertifi==2020.4.5.2\ncffi==1.14.1\nchardet==3.0.4\nclick==7.1.2\ncloudpickle==1.3.0\ncolorama==0.4.3\ncolorful==0.5.4\nconfigparser==5.0.1\ncontextvars==2.4\ncycler==0.10.0\nCython==0.29.21\ndeepdiff==4.3.2\ndill==0.3.2\ndocker-pycreds==0.4.0\ndocopt==0.6.2\nfasteners==0.15\nfilelock==3.0.12\nfuncsigs==1.0.2\nfuture==0.16.0\ngast==0.2.2\ngin==0.1.6\ngin-config==0.3.0\ngitdb==4.0.5\nGitPython==3.1.9\nglfw==1.12.0\ngoogle==3.0.0\ngoogle-api-core==1.22.1\ngoogle-auth==1.21.0\ngoogle-auth-oauthlib==0.4.1\ngoogle-pasta==0.2.0\ngoogleapis-common-protos==1.52.0\ngpustat==0.6.0\ngql==0.2.0\ngraphql-core==1.1\ngrpcio==1.31.0\ngym==0.17.2\nh5py==2.10.0\nhiredis==1.1.0\nidna==2.7\nidna-ssl==1.1.0\nimageio==2.4.1\nimmutables==0.14\nimportlib-metadata==1.7.0\njoblib==0.16.0\njsonnet==0.16.0\njsonpickle==0.9.6\njsonschema==3.2.0\nKeras-Applications==1.0.8\nKeras-Preprocessing==1.1.2\nkiwisolver==1.0.1\nlockfile==0.12.2\nMarkdown==3.1.1\nmatplotlib==3.0.0\nmkl-fft==1.2.0\nmkl-random==1.2.0\nmkl-service==2.3.0\nmock==2.0.0\nmonotonic==1.5\nmore-itertools==4.3.0\nmpi4py==3.0.3\nmpyq==0.2.5\nmsgpack==1.0.0\nmujoco-py==2.0.2.8\nmultidict==4.7.6\nmunch==2.3.2\nnumpy\nnvidia-ml-py3==7.352.0\noauthlib==3.1.0\nopencensus==0.7.10\nopencensus-context==0.1.1\nopencv-python==4.2.0.34\nopt-einsum==3.1.0\nordered-set==4.0.2\npackaging==20.4\npandas==1.1.1\npathlib2==2.3.2\npathtools==0.1.2\npbr==4.3.0\nPillow==5.3.0\npluggy==0.7.1\nportpicker==1.2.0\nprobscale==0.2.3\nprogressbar2==3.53.1\nprometheus-client==0.8.0\npromise==2.3\nprotobuf==3.12.4\npsutil==5.7.2\npy==1.6.0\npy-spy==0.3.3\npyasn1==0.4.8\npyasn1-modules==0.2.8\npycparser==2.20\npygame==1.9.4\npyglet==1.5.0\nPyOpenGL==3.1.5\nPyOpenGL-accelerate==3.1.5\npyparsing==2.2.2\npyrsistent==0.16.0\nPySC2==3.0.0\npytest==3.8.2\npython-dateutil==2.7.3\npython-utils==2.4.0\npytz==2020.1\nPyYAML==3.13\npyzmq==19.0.2\nredis==3.4.1\nrequests==2.24.0\nrequests-oauthlib==1.3.0\nrsa==4.6\ns2clientprotocol==4.10.1.75800.0\ns2protocol==4.11.4.78285.0\nsacred==0.7.2\nscipy==1.4.1\nseaborn==0.10.1\nsentry-sdk==0.18.0\nsetproctitle==1.1.10\nshortuuid==1.0.1\nsix==1.15.0\nsk-video==1.1.10\nsmmap==3.0.4\nsnakeviz==1.0.0\nsoupsieve==2.0.1\nsubprocess32==3.5.4\ntabulate==0.8.7\ntensorboard==2.0.2\ntensorboard-logger==0.1.0\ntensorboard-plugin-wit==1.7.0\ntensorboardX==2.0\ntensorflow==2.0.0\ntensorflow-estimator==2.0.0\ntermcolor==1.1.0\ntorch\ntorchvision\ntornado\ntqdm==4.48.2\ntyping-extensions==3.7.4.3\nurllib3==1.23\nwandb==0.10.5\nwatchdog==0.10.3\nwebsocket-client==0.53.0\nWerkzeug==0.16.1\nwhichcraft==0.5.2\nwrapt==1.12.1\nxmltodict==0.12.0\nyarl==1.5.1\nzipp==3.1.0\nzmq==0.0.0\n"
  }
]