[
  {
    "path": ".gitignore",
    "content": "# Compiled source #\n######################\n*.pyc\n*.so\n*.so.*\n*.dll\n*.o\n*.a\n*.hpp.gch\n\n# Packages #\n######################\n# it's better to unpack these files and commit the raw source\n# git has its own built in compression methods\n*.7z\n*.dmg\n*.gz\n*.iso\n*.jar\n*.rar\n*.tar\n*.zip\n\n# Logs and databases #\n######################\n*.log\n*.sql\n*.sqlite\n*.mat\n\n# Micro$oft Office #\n######################\n*.pptx\n*.PPTX\n*.docx\n*.DOCX\n*.xlsx\n*.XLSX\n\n# LateX\n######################\n*.aux\n*.log\n*.bbl\n*.blg\n*.toc\n*.lof\n*.lot\n*.out\n*.idx\n*.ilg\n*.ind\n*.bag\n*.BAG\n\n# Others #\n######################\n.depend\n\\#*\n*.flv\n*.FLV\n*.mov\n*.MOV\n*.avi\n*.AVI\n*.mp4\n*.MP4\n*.mkv\n*.MKV\n*.mp3\n*.MP3\n*.wmv\n*.WMV\n*.pdf\n*.PDF\n.git/\nauto/\n\n# Learning #\n###################\n#checkpoint\n*.cptk.data-*\n*.cptk.index\n*.cptk.meta\n*.gif\n*.GIF\n\n# Python stuffs\n###################\n__pycache__/\nresults/\n.idea/\nmodel_v1_oIL_no_prune/checkpoint\n.vscode/settings.json\n"
  },
  {
    "path": "Env_Builder.py",
    "content": "import copy\nfrom operator import sub, add\nimport gym\nimport numpy as np\nimport math, time\nimport warnings\nfrom od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError\nfrom od_mstar3 import od_mstar\nfrom od_mstar3 import cpp_mstar\nfrom GroupLock import Lock\nfrom matplotlib.colors import *\nfrom gym.envs.classic_control import rendering\nimport imageio\nfrom gym import spaces\n\n\ndef make_gif(images, fname):\n    gif = imageio.mimwrite(fname, images, subrectangles=True)\n    print(\"wrote gif\")\n    return gif\n\n\ndef opposite_actions(action, isDiagonal=False):\n    if isDiagonal:\n        checking_table = {0: -1, 1: 3, 2: 4, 3: 1, 4: 2}\n        raise NotImplemented\n    else:\n        checking_table = {0: -1, 1: 3, 2: 4, 3: 1, 4: 2}\n    return checking_table[action]\n\n\ndef action2dir(action):\n    checking_table = {0: (0, 0), 1: (0, 1), 2: (1, 0), 3: (0, -1), 4: (-1, 0)}\n    return checking_table[action]\n\n\ndef dir2action(direction):\n    checking_table = {(0, 0): 0, (0, 1): 1, (1, 0): 2, (0, -1): 3, (-1, 0): 4}\n    return checking_table[direction]\n\n\ndef tuple_plus(a, b):\n    \"\"\" a + b \"\"\"\n    return tuple(map(add, a, b))\n\n\ndef tuple_minus(a, b):\n    \"\"\" a - b \"\"\"\n    return tuple(map(sub, a, b))\n\n\ndef _heap(ls, max_length):\n    while True:\n        if len(ls) > max_length:\n            ls.pop(0)\n        else:\n            return ls\n\n\ndef get_key(dict, value):\n    return [k for k, v in dict.items() if v == value]\n\n\ndef getAstarDistanceMap(map: np.array, start: tuple, goal: tuple, isDiagonal: bool = False):\n    \"\"\"\n    returns a numpy array of same dims as map with the distance to the goal from each coord\n    :param map: a n by m np array, where -1 denotes obstacle\n    :param start: start_position\n    :param goal: goal_position\n    :return: optimal distance map\n    \"\"\"\n\n    def lowestF(fScore, openSet):\n        # find entry in openSet with lowest fScore\n        assert (len(openSet) > 0)\n        minF = 2 ** 31 - 1\n        minNode = None\n        for (i, j) in openSet:\n            if (i, j) not in fScore: continue\n            if fScore[(i, j)] < minF:\n                minF = fScore[(i, j)]\n                minNode = (i, j)\n        return minNode\n\n    def getNeighbors(node):\n        # return set of neighbors to the given node\n        n_moves = 9 if isDiagonal else 5\n        neighbors = set()\n        for move in range(1, n_moves):  # we dont want to include 0 or it will include itself\n            direction = action2dir(move)\n            dx = direction[0]\n            dy = direction[1]\n            ax = node[0]\n            ay = node[1]\n            if (ax + dx >= map.shape[0] or ax + dx < 0 or ay + dy >= map.shape[\n                1] or ay + dy < 0):  # out of bounds\n                continue\n            if map[ax + dx, ay + dy] == -1:  # collide with static obstacle\n                continue\n            neighbors.add((ax + dx, ay + dy))\n        return neighbors\n\n    # NOTE THAT WE REVERSE THE DIRECTION OF SEARCH SO THAT THE GSCORE WILL BE DISTANCE TO GOAL\n    start, goal = goal, start\n    start, goal = tuple(start), tuple(goal)\n    # The set of nodes already evaluated\n    closedSet = set()\n\n    # The set of currently discovered nodes that are not evaluated yet.\n    # Initially, only the start node is known.\n    openSet = set()\n    openSet.add(start)\n\n    # For each node, which node it can most efficiently be reached from.\n    # If a node can be reached from many nodes, cameFrom will eventually contain the\n    # most efficient previous step.\n    cameFrom = dict()\n\n    # For each node, the cost of getting from the start node to that node.\n    gScore = dict()  # default value infinity\n\n    # The cost of going from start to start is zero.\n    gScore[start] = 0\n\n    # For each node, the total cost of getting from the start node to the goal\n    # by passing by that node. That value is partly known, partly heuristic.\n    fScore = dict()  # default infinity\n\n    # our heuristic is euclidean distance to goal\n    heuristic_cost_estimate = lambda x, y: math.hypot(x[0] - y[0], x[1] - y[1])\n\n    # For the first node, that value is completely heuristic.\n    fScore[start] = heuristic_cost_estimate(start, goal)\n\n    while len(openSet) != 0:\n        # current = the node in openSet having the lowest fScore value\n        current = lowestF(fScore, openSet)\n\n        openSet.remove(current)\n        closedSet.add(current)\n        for neighbor in getNeighbors(current):\n            if neighbor in closedSet:\n                continue  # Ignore the neighbor which is already evaluated.\n\n            if neighbor not in openSet:  # Discover a new node\n                openSet.add(neighbor)\n\n            # The distance from start to a neighbor\n            # in our case the distance between is always 1\n            tentative_gScore = gScore[current] + 1\n            if tentative_gScore >= gScore.get(neighbor, 2 ** 31 - 1):\n                continue  # This is not a better path.\n\n            # This path is the best until now. Record it!\n            cameFrom[neighbor] = current\n            gScore[neighbor] = tentative_gScore\n            fScore[neighbor] = gScore[neighbor] + heuristic_cost_estimate(neighbor, goal)\n\n            # parse through the gScores\n    Astar_map = map.copy()\n    for (i, j) in gScore:\n        Astar_map[i, j] = gScore[i, j]\n    return Astar_map\n\n\nclass Agent:\n    \"\"\"\n    The agent object that contains agent's position, direction dict and position dict,\n    currently only supporting 4-connected region.\n    self.distance_map is None here. Assign values in upper class.\n    ###########\n    WARNING: direction_history[i] means the action taking from i-1 step, resulting in the state of step i,\n    such that len(direction_history) == len(position_history)\n    ###########\n    \"\"\"\n\n    def __init__(self, isDiagonal=False):\n        self._path_count = -1\n        self.IsDiagonal = isDiagonal\n        self.freeze = 0\n        self.position, self.position_history, self.ID, self.direction, self.direction_history, \\\n        self.action_history, self.goal_pos, self.distanceMap, self.dones, self.status, self.next_goal, self.next_distanceMap \\\n            = None, [], None, None, [(None, None)], [(None, None)], None, None, 0, None, None, None\n\n    def reset(self):\n        self._path_count = -1\n        self.freeze = 0\n        self.position, self.position_history, self.ID, self.direction, self.direction_history, \\\n        self.action_history, self.goal_pos, self.distanceMap, self.dones, self.status, self.next_goal, self.next_distanceMap \\\n            = None, [], None, None, [(None, None)], [(None, None)], None, None, 0, None, None, None\n\n    def move(self, pos, status=None):\n        if pos is None:\n            pos = self.position\n        if self.position is not None:\n            assert pos in [self.position,\n                           tuple_plus(self.position, (0, 1)), tuple_plus(self.position, (0, -1)),\n                           tuple_plus(self.position, (1, 0)), tuple_plus(self.position, (-1, 0)), ], \\\n                \"only 1 step 1 cell allowed. Previous pos:\" + str(self.position)\n        self.add_history(pos, status)\n\n    def add_history(self, position, status):\n        assert len(position) == 2\n        self.status = status\n        self._path_count += 1\n        self.position = tuple(position)\n        if self._path_count != 0:\n            direction = tuple_minus(position, self.position_history[-1])\n            action = dir2action(direction)\n            assert action in list(range(4 + 1)), \\\n                \"direction not in actionDir, something going wrong\"\n            self.direction_history.append(direction)\n            self.action_history.append(action)\n        self.position_history.append(tuple(position))\n\n        self.position_history = _heap(self.position_history, 30)\n        self.direction_history = _heap(self.direction_history, 30)\n        self.action_history = _heap(self.action_history, 30)\n\n\nclass World:\n    \"\"\"\n    Include: basic world generation rules, blank map generation and collision checking.\n    reset_world:\n    Do not add action pruning, reward structure or any other routine for training in this class. Pls add in upper class MAPFEnv\n    \"\"\"\n\n    def __init__(self, map_generator, num_agents, isDiagonal=False):\n        self.num_agents = num_agents\n        self.manual_world = False\n        self.manual_goal = False\n        self.goal_generate_distance = 2\n\n        self.map_generator = map_generator\n        self.isDiagonal = isDiagonal\n\n        self.agents_init_pos, self.goals_init_pos = None, None\n        self.reset_world()\n        self.init_agents_and_goals()\n\n    def reset_world(self):\n        \"\"\"\n        generate/re-generate a world map, and compute its corridor map\n        \"\"\"\n\n        def scan_for_agents(state_map):\n            agents = {}\n            for i in range(state_map.shape[0]):\n                for j in range(state_map.shape[1]):\n                    if state_map[i, j] > 0:\n                        agentID = state_map[i, j]\n                        agents.update({agentID: (i, j)})\n            return agents\n\n        self.state, self.goals_map = self.map_generator()\n        # detect manual world\n        if (self.state > 0).any():\n            self.manual_world = True\n            self.agents_init_pos = scan_for_agents(self.state)\n            if self.num_agents is not None and self.num_agents != len(self.agents_init_pos.keys()):\n                warnings.warn(\"num_agent does not match the actual agent number in manual map! \"\n                              \"num_agent has been set to be consistent with manual map.\")\n            self.num_agents = len(self.agents_init_pos.keys())\n            self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)}\n        else:\n            assert self.num_agents is not None\n            self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)}\n        # detect manual goals_map\n        if self.goals_map is not None:\n            self.manual_goal = True\n            self.goals_init_pos = scan_for_agents(self.goals_map) if self.manual_goal else None\n\n        else:\n            self.goals_map = np.zeros([self.state.shape[0], self.state.shape[1]])\n\n        self.corridor_map = {}\n        self.restrict_init_corridor = True\n        self.visited = []\n        self.corridors = {}\n        self.get_corridors()\n\n    def reset_agent(self):\n        \"\"\"\n        remove all the agents (with their travel history) and goals in the env, rebase the env into a blank one\n        \"\"\"\n        self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)}\n        self.state[self.state > 0] = 0  # remove agents in the map\n\n    def get_corridors(self):\n        \"\"\"\n        in corridor_map , output = list:\n            list[0] : if In corridor, corridor id , else -1 \n            list[1] : If Inside Corridor = 1\n                      If Corridor Endpoint = 2\n                      If Free Cell Outside Corridor = 0   \n                      If Obstacle = -1 \n        \"\"\"\n        corridor_count = 1\n        # Initialize corridor map\n        for i in range(self.state.shape[0]):\n            for j in range(self.state.shape[1]):\n                if self.state[i, j] >= 0:\n                    self.corridor_map[(i, j)] = [-1, 0]\n                else:\n                    self.corridor_map[(i, j)] = [-1, -1]\n        # Compute All Corridors and End-points, store them in self.corridors , update corridor_map\n        for i in range(self.state.shape[0]):\n            for j in range(self.state.shape[1]):\n                positions = self.blank_env_valid_neighbor(i, j)\n                if (positions.count(None)) == 2 and (i, j) not in self.visited:\n                    allowed = self.check_for_singular_state(positions)\n                    if not allowed:\n                        continue\n                    self.corridors[corridor_count] = {}\n                    self.corridors[corridor_count]['Positions'] = [(i, j)]\n                    self.corridor_map[(i, j)] = [corridor_count, 1]\n                    self.corridors[corridor_count]['EndPoints'] = []\n                    self.visited.append((i, j))\n                    for num in range(4):\n                        if positions[num] is not None:\n                            self.visit(positions[num][0], positions[num][1], corridor_count)\n                    corridor_count += 1\n        # Get Delta X , Delta Y for the computed corridors ( Delta= Displacement to corridor exit)       \n        for k in range(1, corridor_count):\n            if k in self.corridors:\n                if len(self.corridors[k]['EndPoints']) == 2:\n                    self.corridors[k]['DeltaX'] = {}\n                    self.corridors[k]['DeltaY'] = {}\n                    pos_a = self.corridors[k]['EndPoints'][0]\n                    pos_b = self.corridors[k]['EndPoints'][1]\n                    self.corridors[k]['DeltaX'][pos_a] = (pos_a[0] - pos_b[0])  # / (max(1, abs(pos_a[0] - pos_b[0])))\n                    self.corridors[k]['DeltaX'][pos_b] = -1 * self.corridors[k]['DeltaX'][pos_a]\n                    self.corridors[k]['DeltaY'][pos_a] = (pos_a[1] - pos_b[1])  # / (max(1, abs(pos_a[1] - pos_b[1])))\n                    self.corridors[k]['DeltaY'][pos_b] = -1 * self.corridors[k]['DeltaY'][pos_a]\n            else:\n                print('Weird2')\n\n                # Rearrange the computed corridor list such that it becomes easier to iterate over the structure\n        # Basically, sort the self.corridors['Positions'] list in a way that the first element of the list is\n        # adjacent to Endpoint[0] and the last element of the list is adjacent to EndPoint[1] \n        # If there is only 1 endpoint, the sorting doesn't matter since blocking is easy to compute\n        for t in range(1, corridor_count):\n            positions = self.blank_env_valid_neighbor(self.corridors[t]['EndPoints'][0][0],\n                                                      self.corridors[t]['EndPoints'][0][1])\n            for position in positions:\n                if position is not None and self.corridor_map[position][0] == t:\n                    break\n            index = self.corridors[t]['Positions'].index(position)\n\n            if index == 0:\n                pass\n            if index != len(self.corridors[t]['Positions']) - 1:\n                temp_list = self.corridors[t]['Positions'][0:index + 1]\n                temp_list.reverse()\n                temp_end = self.corridors[t]['Positions'][index + 1:]\n                self.corridors[t]['Positions'] = []\n                self.corridors[t]['Positions'].extend(temp_list)\n                self.corridors[t]['Positions'].extend(temp_end)\n\n            elif index == len(self.corridors[t]['Positions']) - 1 and len(self.corridors[t]['EndPoints']) == 2:\n                positions2 = self.blank_env_valid_neighbor(self.corridors[t]['EndPoints'][1][0],\n                                                           self.corridors[t]['EndPoints'][1][1])\n                for position2 in positions2:\n                    if position2 is not None and self.corridor_map[position2][0] == t:\n                        break\n                index2 = self.corridors[t]['Positions'].index(position2)\n                temp_list = self.corridors[t]['Positions'][0:index2 + 1]\n                temp_list.reverse()\n                temp_end = self.corridors[t]['Positions'][index2 + 1:]\n                self.corridors[t]['Positions'] = []\n                self.corridors[t]['Positions'].extend(temp_list)\n                self.corridors[t]['Positions'].extend(temp_end)\n                self.corridors[t]['Positions'].reverse()\n            else:\n                if len(self.corridors[t]['EndPoints']) == 2:\n                    print(\"Weird3\")\n\n            self.corridors[t]['StoppingPoints'] = []\n            if len(self.corridors[t]['EndPoints']) == 2:\n                position_first = self.corridors[t]['Positions'][0]\n                position_last = self.corridors[t]['Positions'][-1]\n                self.corridors[t]['StoppingPoints'].append([position_first[0], position_first[1]])\n                self.corridors[t]['StoppingPoints'].append([position_last[0], position_last[1]])\n            else:\n                position_first = self.corridors[t]['Positions'][0]\n                self.corridors[t]['StoppingPoints'].append([position[0], position[1]])\n                self.corridors[t]['StoppingPoints'].append(None)\n        return\n\n    def check_for_singular_state(self, positions):\n        counter = 0\n        for num in range(4):\n            if positions[num] is not None:\n                new_positions = self.blank_env_valid_neighbor(positions[num][0], positions[num][1])\n                if new_positions.count(None) in [2, 3]:\n                    counter += 1\n        return counter > 0\n\n    def visit(self, i, j, corridor_id):\n        positions = self.blank_env_valid_neighbor(i, j)\n        if positions.count(None) in [0, 1]:\n            self.corridors[corridor_id]['EndPoints'].append((i, j))\n            self.corridor_map[(i, j)] = [corridor_id, 2]\n            return\n        elif positions.count(None) in [2, 3]:\n            self.visited.append((i, j))\n            self.corridors[corridor_id]['Positions'].append((i, j))\n            self.corridor_map[(i, j)] = [corridor_id, 1]\n            for num in range(4):\n                if positions[num] is not None and positions[num] not in self.visited:\n                    self.visit(positions[num][0], positions[num][1], corridor_id)\n        else:\n            print('Weird')\n\n    def blank_env_valid_neighbor(self, i, j):\n        possible_positions = [None, None, None, None]\n        move = [[0, 1], [1, 0], [-1, 0], [0, -1]]\n        if self.state[i, j] == -1:\n            return possible_positions\n        else:\n            for num in range(4):\n                x = i + move[num][0]\n                y = j + move[num][1]\n                if 0 <= x < self.state.shape[0] and 0 <= y < self.state.shape[1]:\n                    if self.state[x, y] != -1:\n                        possible_positions[num] = (x, y)\n                        continue\n        return possible_positions\n\n    def getPos(self, agent_id):\n        return tuple(self.agents[agent_id].position)\n\n    def getDone(self, agentID):\n        # get the number of goals that an agent has finished\n        return self.agents[agentID].dones\n\n    def get_history(self, agent_id, path_id=None):\n        \"\"\"\n        :param: path_id: if None, get the last step\n        :return: past_pos: (x,y), past_direction: int\n        \"\"\"\n\n        if path_id is None:\n            path_id = self.agents[agent_id].path_count - 1 if self.agents[agent_id].path_count > 0 else 0\n        try:\n            return self.agents[agent_id].position_history[path_id], self.agents[agent_id].direction_history[path_id]\n        except IndexError:\n            print(\"you are giving an invalid path_id\")\n\n    def getGoal(self, agent_id):\n        return tuple(self.agents[agent_id].goal_pos)\n\n    def init_agents_and_goals(self):\n        \"\"\"\n        place all agents and goals in the blank env. If turning on corridor population restriction, only 1 agent is\n        allowed to be born in each corridor.\n        \"\"\"\n\n        def corridor_restricted_init_poss(state_map, corridor_map, goal_map, id_list=None):\n            \"\"\"\n            generate agent init positions when corridor init population is restricted\n            return a dict of positions {agentID:(x,y), ...}\n            \"\"\"\n            if id_list is None:\n                id_list = list(range(1, self.num_agents + 1))\n\n            free_space1 = list(np.argwhere(state_map == 0))\n            free_space1 = [tuple(pos) for pos in free_space1]\n            corridors_visited = []\n            manual_positions = {}\n            break_completely = False\n            for idx in id_list:\n                if break_completely:\n                    return None\n                pos_set = False\n                agentID = idx\n                while not pos_set:\n                    try:\n                        assert (len(free_space1) > 1)\n                        random_pos = np.random.choice(len(free_space1))\n                    except AssertionError or ValueError:\n                        print('wrong agent')\n                        self.reset_world()\n                        self.init_agents_and_goals()\n                        break_completely = True\n                        if idx == id_list[-1]:\n                            return None\n                        break\n                    position = free_space1[random_pos]\n                    cell_info = corridor_map[position[0], position[1]][1]\n                    if cell_info in [0, 2]:\n                        if goal_map[position[0], position[1]] != agentID:\n                            manual_positions.update({idx: (position[0], position[1])})\n                            free_space1.remove(position)\n                            pos_set = True\n                    elif cell_info == 1:\n                        corridor_id = corridor_map[position[0], position[1]][0]\n                        if corridor_id not in corridors_visited:\n                            if goal_map[position[0], position[1]] != agentID:\n                                manual_positions.update({idx: (position[0], position[1])})\n                                corridors_visited.append(corridor_id)\n                                free_space1.remove(position)\n                                pos_set = True\n                        else:\n                            free_space1.remove(position)\n                    else:\n                        print(\"Very Weird\")\n                        # print('Manual Positions' ,manual_positions)\n            return manual_positions\n\n        # no corridor population restriction\n        if not self.restrict_init_corridor or (self.restrict_init_corridor and self.manual_world):\n            self.put_goals(list(range(1, self.num_agents + 1)), self.goals_init_pos)\n            self._put_agents(list(range(1, self.num_agents + 1)), self.agents_init_pos)\n        # has corridor population restriction\n        else:\n            check = self.put_goals(list(range(1, self.num_agents + 1)), self.goals_init_pos)\n            if check is not None:\n                manual_positions = corridor_restricted_init_poss(self.state, self.corridor_map, self.goals_map)\n                if manual_positions is not None:\n                    self._put_agents(list(range(1, self.num_agents + 1)), manual_positions)\n\n    def _put_agents(self, id_list, manual_pos=None):\n        \"\"\"\n        put some agents in the blank env, saved history data in self.agents and self.state\n        get distance map for the agents\n        :param id_list: a list of agent_id\n                manual_pos: a dict of manual positions {agentID: (x,y),...}\n        \"\"\"\n        if manual_pos is None:\n            # randomly init agents everywhere\n            free_space = np.argwhere(np.logical_or(self.state == 0, self.goals_map == 0) == 1)\n            new_idx = np.random.choice(len(free_space), size=len(id_list), replace=False)\n            init_poss = [free_space[idx] for idx in new_idx]\n        else:\n            assert len(manual_pos.keys()) == len(id_list)\n            init_poss = [manual_pos[agentID] for agentID in id_list]\n        assert len(init_poss) == len(id_list)\n        for idx, agentID in enumerate(id_list):\n            self.agents[agentID].ID = agentID\n            self.agents_init_pos = {}\n            if self.state[init_poss[idx][0], init_poss[idx][1]] in [0, agentID] \\\n                    and self.goals_map[init_poss[idx][0], init_poss[idx][1]] != agentID:\n                self.state[init_poss[idx][0], init_poss[idx][1]] = agentID\n                self.agents_init_pos.update({agentID: (init_poss[idx][0], init_poss[idx][1])})\n            else:\n                print(self.state)\n                print(init_poss)\n                raise ValueError('invalid manual_pos for agent' + str(agentID) + ' at: ' + str(init_poss[idx]))\n            self.agents[agentID].move(init_poss[idx])\n            self.agents[agentID].distanceMap = getAstarDistanceMap(self.state, self.agents[agentID].position,\n                                                                   self.agents[agentID].goal_pos)\n\n    def put_goals(self, id_list, manual_pos=None):\n        \"\"\"\n        put a goal of single agent in the env, if the goal already exists, remove that goal and put a new one\n        :param manual_pos: a dict of manual_pos {agentID: (x, y)}\n        :param id_list: a list of agentID\n        :return: an Agent object\n        \"\"\"\n\n        def random_goal_pos(previous_goals=None, distance=None):\n            next_goal_buffer = {agentID: self.agents[agentID].next_goal for agentID in range(1, self.num_agents + 1)}\n            curr_goal_buffer = {agentID: self.agents[agentID].goal_pos for agentID in range(1, self.num_agents + 1)}\n            if previous_goals is None:\n                previous_goals = {agentID: None for agentID in id_list}\n            if distance is None:\n                distance = self.goal_generate_distance\n            free_for_all = np.logical_and(self.state == 0, self.goals_map == 0)\n            # print(previous_goals)\n            if not all(previous_goals.values()):  # they are new born agents\n                free_space = np.argwhere(free_for_all == 1)\n                init_idx = np.random.choice(len(free_space), size=len(id_list), replace=False)\n                new_goals = {agentID: tuple(free_space[init_idx[agentID - 1]]) for agentID in id_list}\n                return new_goals\n            else:\n                new_goals = {}\n                for agentID in id_list:\n                    free_on_agents = np.logical_and(self.state > 0, self.state != agentID)\n                    free_spaces_for_previous_goal = np.logical_or(free_on_agents, free_for_all)\n                    # free_spaces_for_previous_goal = np.logical_and(free_spaces_for_previous_goal, self.goals_map==0)\n                    if distance > 0:\n                        previous_x, previous_y = previous_goals[agentID]\n                        x_lower_bound = (previous_x - distance) if (previous_x - distance) > 0 else 0\n                        x_upper_bound = previous_x + distance + 1\n                        y_lower_bound = (previous_y - distance) if (previous_x - distance) > 0 else 0\n                        y_upper_bound = previous_y + distance + 1\n                        free_spaces_for_previous_goal[x_lower_bound:x_upper_bound, y_lower_bound:y_upper_bound] = False\n                    free_spaces_for_previous_goal = list(np.argwhere(free_spaces_for_previous_goal == 1))\n                    free_spaces_for_previous_goal = [pos.tolist() for pos in free_spaces_for_previous_goal]\n\n                    try:\n                        unique = False\n                        counter = 0\n                        while unique == False and counter < 500:\n                            init_idx = np.random.choice(len(free_spaces_for_previous_goal))\n                            init_pos = free_spaces_for_previous_goal[init_idx]\n                            unique = True\n                            if tuple(init_pos) in next_goal_buffer.values() or tuple(\n                                    init_pos) in curr_goal_buffer.values() or tuple(init_pos) in new_goals.values():\n                                unique = False\n                            if previous_goals is not None:\n                                if tuple(init_pos) in previous_goals.values():\n                                    unique = False\n                            counter += 1\n                        if counter >= 500:\n                            print('Hard to find Non Conflicting Goal')\n                        new_goals.update({agentID: tuple(init_pos)})\n                    except ValueError:\n                        print('wrong goal')\n                        self.reset_world()\n                        print(self.agents[1].position)\n                        self.init_agents_and_goals()\n                        return None\n                return new_goals\n\n        previous_goals = {agentID: self.agents[agentID].goal_pos for agentID in id_list}\n        if manual_pos is None:\n            new_goals = random_goal_pos(previous_goals, distance=self.goal_generate_distance)\n        else:\n            new_goals = manual_pos\n        if new_goals is not None:  # recursive breaker\n            refresh_distance_map = False\n            for agentID in id_list:\n                if self.state[new_goals[agentID][0], new_goals[agentID][1]] >= 0:\n                    if self.agents[agentID].next_goal is None:  # no next_goal to use\n                        # set goals_map\n                        self.goals_map[new_goals[agentID][0], new_goals[agentID][1]] = agentID\n                        # set agent.goal_pos\n                        self.agents[agentID].goal_pos = (new_goals[agentID][0], new_goals[agentID][1])\n                        # set agent.next_goal\n                        new_next_goals = random_goal_pos(new_goals, distance=self.goal_generate_distance)\n                        if new_next_goals is None:\n                            return None\n                        self.agents[agentID].next_goal = (new_next_goals[agentID][0], new_next_goals[agentID][1])\n                        # remove previous goal\n                        if previous_goals[agentID] is not None:\n                            self.goals_map[previous_goals[agentID][0], previous_goals[agentID][1]] = 0\n                    else:  # use next_goal as new goal\n                        # set goals_map\n                        self.goals_map[self.agents[agentID].next_goal[0], self.agents[agentID].next_goal[1]] = agentID\n                        # set agent.goal_pos\n                        self.agents[agentID].goal_pos = self.agents[agentID].next_goal\n                        # set agent.next_goal\n                        self.agents[agentID].next_goal = (\n                            new_goals[agentID][0], new_goals[agentID][1])  # store new goal into next_goal\n                        # remove previous goal\n                        if previous_goals[agentID] is not None:\n                            self.goals_map[previous_goals[agentID][0], previous_goals[agentID][1]] = 0\n                else:\n                    print(self.state)\n                    print(self.goals_map)\n                    raise ValueError('invalid manual_pos for goal' + str(agentID) + ' at: ', str(new_goals[agentID]))\n                if previous_goals[agentID] is not None:  # it has a goal!\n                    if previous_goals[agentID] != self.agents[agentID].position:\n                        print(self.state)\n                        print(self.goals_map)\n                        print(previous_goals)\n                        raise RuntimeError(\"agent hasn't finished its goal but asking for a new goal!\")\n\n                    refresh_distance_map = True\n\n                # compute distance map\n                self.agents[agentID].next_distanceMap = getAstarDistanceMap(self.state, self.agents[agentID].goal_pos,\n                                                                            self.agents[agentID].next_goal)\n                if refresh_distance_map:\n                    self.agents[agentID].distanceMap = getAstarDistanceMap(self.state, self.agents[agentID].position,\n                                                                           self.agents[agentID].goal_pos)\n            return 1\n        else:\n            return None\n\n    def CheckCollideStatus(self, movement_dict):\n        \"\"\"\n        WARNING: ONLY NON-DIAGONAL IS IMPLEMENTED\n        return collision status and predicted next positions, do not move agent directly\n        :return:\n         1: action executed, and agents standing on its goal.\n         0: action executed\n        -1: collision with env (obstacles, out of bound)\n        -2: collision with robot, swap\n        -3: collision with robot, cell-wise\n        \"\"\"\n\n        if self.isDiagonal is True:\n            raise NotImplemented\n        Assumed_newPos_dict = {}\n        newPos_dict = {}\n        status_dict = {agentID: None for agentID in range(1, self.num_agents + 1)}\n        not_checked_list = list(range(1, self.num_agents + 1))\n\n        # detect env collision\n        for agentID in range(1, self.num_agents + 1):\n            direction_vector = action2dir(movement_dict[agentID])\n            newPos = tuple_plus(self.getPos(agentID), direction_vector)\n            Assumed_newPos_dict.update({agentID: newPos})\n            if newPos[0] < 0 or newPos[0] > self.state.shape[0] or newPos[1] < 0 \\\n                    or newPos[1] > self.state.shape[1] or self.state[newPos] == -1:\n                status_dict[agentID] = -1\n                newPos_dict.update({agentID: self.getPos(agentID)})\n                Assumed_newPos_dict[agentID] = self.getPos(agentID)\n                not_checked_list.remove(agentID)\n                # collide, stay at the same place\n\n        # detect swap collision\n\n        for agentID in copy.deepcopy(not_checked_list):\n            collided_ID = self.state[Assumed_newPos_dict[agentID]]\n            if collided_ID != 0:  # some one is standing on the assumed pos\n                if Assumed_newPos_dict[collided_ID] == self.getPos(agentID):  # he wants to swap\n                    if status_dict[agentID] is None:\n                        status_dict[agentID] = -2\n                        newPos_dict.update({agentID: self.getPos(agentID)})  # stand still\n                        Assumed_newPos_dict[agentID] = self.getPos(agentID)\n                        not_checked_list.remove(agentID)\n                    if status_dict[collided_ID] is None:\n                        status_dict[collided_ID] = -2\n                        newPos_dict.update({collided_ID: self.getPos(collided_ID)})  # stand still\n                        Assumed_newPos_dict[collided_ID] = self.getPos(collided_ID)\n                        not_checked_list.remove(collided_ID)\n\n        # detect cell-wise collision\n        for agentID in copy.deepcopy(not_checked_list):\n            other_agents_dict = copy.deepcopy(Assumed_newPos_dict)\n            other_agents_dict.pop(agentID)\n            if Assumed_newPos_dict[agentID] in newPos_dict.values():\n                status_dict[agentID] = -3\n                newPos_dict.update({agentID: self.getPos(agentID)})  # stand still\n                Assumed_newPos_dict[agentID] = self.getPos(agentID)\n                not_checked_list.remove(agentID)\n            elif Assumed_newPos_dict[agentID] in other_agents_dict.values():\n                other_coming_agents = get_key(Assumed_newPos_dict, Assumed_newPos_dict[agentID])\n                other_coming_agents.remove(agentID)\n                # if the agentID is the biggest among all other coming agents,\n                # allow it to move. Else, let it stand still\n                if agentID < min(other_coming_agents):\n                    status_dict[agentID] = 1 if Assumed_newPos_dict[agentID] == self.agents[agentID].goal_pos else 0\n                    newPos_dict.update({agentID: Assumed_newPos_dict[agentID]})\n                    not_checked_list.remove(agentID)\n                else:\n                    status_dict[agentID] = -3\n                    newPos_dict.update({agentID: self.getPos(agentID)})  # stand still\n                    Assumed_newPos_dict[agentID] = self.getPos(agentID)\n                    not_checked_list.remove(agentID)\n\n        # the rest are valid actions\n        for agentID in copy.deepcopy(not_checked_list):\n            status_dict[agentID] = 1 if Assumed_newPos_dict[agentID] == self.agents[agentID].goal_pos else 0\n            newPos_dict.update({agentID: Assumed_newPos_dict[agentID]})\n            not_checked_list.remove(agentID)\n        assert not not_checked_list\n\n        return status_dict, newPos_dict\n\n\nclass MAPFEnv(gym.Env):\n    metadata = {\"render.modes\": [\"human\", \"ansi\"]}\n\n    def __init__(self, observer, map_generator, num_agents=None,\n                 IsDiagonal=False, frozen_steps=0, isOneShot=False):\n        self.observer = observer\n        self.map_generator = map_generator\n        self.viewer = None\n\n        self.isOneShot = isOneShot\n        self.frozen_steps = frozen_steps\n        self.num_agents = num_agents\n        self.IsDiagonal = IsDiagonal\n        self.set_world()\n        self.obs_size = self.observer.observation_size\n        self.isStandingOnGoal = {i: False for i in range(1, self.num_agents + 1)}\n\n        self.individual_rewards = {i: 0 for i in range(1, self.num_agents + 1)}\n        self.mutex = Lock()\n        self.GIF_frame = []\n        if IsDiagonal:\n            self.action_space = spaces.Tuple([spaces.Discrete(self.num_agents), spaces.Discrete(9)])\n        else:\n            self.action_space = spaces.Tuple([spaces.Discrete(self.num_agents), spaces.Discrete(5)])\n\n        self.ACTION_COST, self.GOAL_REWARD, self.COLLISION_REWARD = -0.3, 5., -2.\n\n    def getObstacleMap(self):\n        return (self.world.state == -1).astype(int)\n\n    def getGoals(self):\n        return {i: self.world.agents[i].goal_pos for i in range(1, self.num_agents + 1)}\n\n    def getStatus(self):\n        return {i: self.world.agents[i].status for i in range(1, self.num_agents + 1)}\n\n    def getPositions(self):\n        return {i: self.world.agents[i].position for i in range(1, self.num_agents + 1)}\n\n    def getLastMovements(self):\n        return {i: self.world.agents[i].position_history(-1) for i in range(1, self.num_agents + 1)}\n\n    def set_world(self):\n\n        self.world = World(self.map_generator, num_agents=self.num_agents, isDiagonal=self.IsDiagonal)\n        self.num_agents = self.world.num_agents\n        self.observer.set_env(self.world)\n\n    def _reset(self, *args, **kwargs):\n        raise NotImplementedError\n\n    def isInCorridor(self, agentID):\n        \"\"\"\n        :param agentID: start from 1 not 0!\n        :return: isIn: bool, corridor_ID: int\n        \"\"\"\n        agent_pos = self.world.getPos(agentID)\n        if self.world.corridor_map[(agent_pos[0], agent_pos[1])][1] in [-1, 2]:\n            return False, None\n        else:\n            return True, self.world.corridor_map[(agent_pos[0], agent_pos[1])][0]\n\n    def _observe(self, handles=None):\n        \"\"\"\n        Returns Dict of observation {agentid:[], ...}\n        \"\"\"\n        if handles is None:\n            self.obs_dict = self.observer.get_many(list(range(1, self.num_agents + 1)))\n        elif handles in list(range(1, self.num_agents + 1)):\n            self.obs_dict = self.observer.get_many([handles])\n        elif set(handles) == set(handles) & set(list(range(1, self.num_agents + 1))):\n            self.obs_dict = self.observer.get_many(handles)\n        else:\n            raise ValueError(\"Invalid agent_id given\")\n        return self.obs_dict\n\n    def step_all(self, movement_dict):\n        \"\"\"\n        Agents are forced to freeze self.frozen_steps steps if they are standing on their goals.\n        The new goal will be generated at the FIRST step it remains on its goal.\n\n        :param movement_dict: {agentID_starting_from_1: action:int 0-4, ...}\n                              unmentioned agent will be considered as taking standing still\n        :return: obs_of_all:dict, reward_of_single_step:dict\n        \"\"\"\n\n        for agentID in range(1, self.num_agents + 1):\n            if self.world.agents[agentID].freeze > self.frozen_steps:  # set frozen agents free\n                self.world.agents[agentID].freeze = 0\n\n            if agentID not in movement_dict.keys() or self.world.agents[agentID].freeze:\n                movement_dict.update({agentID: 0})\n            else:\n                assert movement_dict[agentID] in list(range(5)) if self.IsDiagonal else list(range(9)), \\\n                    'action not in action space'\n\n        status_dict, newPos_dict = self.world.CheckCollideStatus(movement_dict)\n        self.world.state[self.world.state > 0] = 0  # remove agents in the map\n        put_goal_list = []\n        freeze_list = []\n        for agentID in range(1, self.num_agents + 1):\n            if self.isOneShot and self.world.getDone(agentID) > 0:\n                continue\n\n            newPos = newPos_dict[agentID]\n            self.world.state[newPos] = agentID\n            self.world.agents[agentID].move(newPos, status_dict[agentID])\n            self.give_moving_reward(agentID)\n            if status_dict[agentID] == 1:\n                if not self.isOneShot:\n                    if self.world.agents[agentID].freeze == 0:\n                        put_goal_list.append(agentID)\n                    if self.world.agents[agentID].action_history[-1] == 0:  # standing still on goal\n                        freeze_list.append(agentID)\n                    self.world.agents[agentID].freeze += 1\n                else:\n                    self.world.agents[agentID].status = 2\n                    self.world.state[newPos] = 0\n                    self.world.goals_map[newPos] = 0\n        free_agents = list(range(1, self.num_agents + 1))\n\n        if put_goal_list and not self.isOneShot:\n            self.world.put_goals(put_goal_list)\n\n            # remove obs for frozen agents:\n\n            for frozen_agent in freeze_list:\n                free_agents.remove(frozen_agent)\n        return self._observe(free_agents), self.individual_rewards\n\n    def give_moving_reward(self, agentID):\n        raise NotImplementedError\n\n    def listValidActions(self, agent_ID, agent_obs):\n        raise NotImplementedError\n\n    def expert_until_first_goal(self, inflation=2.0, time_limit=60.0):\n        world = self.getObstacleMap()\n        start_positions = []\n        goals = []\n        start_positions_dir = self.getPositions()\n        goals_dir = self.getGoals()\n        for i in range(1, self.world.num_agents + 1):\n            start_positions.append(start_positions_dir[i])\n            goals.append(goals_dir[i])\n        mstar_path = None\n        start_time = time.time()\n        try:\n            mstar_path = cpp_mstar.find_path(world, start_positions, goals, inflation, time_limit / 5.0)\n\n        except OutOfTimeError:\n            # M* timed out\n            print(\"timeout\")\n            print('World', world)\n            print('Start Pos', start_positions)\n            print('Goals', goals)\n        except NoSolutionError:\n            print(\"nosol????\")\n            print('World', world)\n            print('Start Pos', start_positions)\n            print('Goals', goals)\n\n        except:\n            c_time = time.time() - start_time\n            if c_time > time_limit:\n                return mstar_path  # should be None\n\n            # print(\"cpp_mstar crash most likely... trying python mstar instead\")\n            try:\n                mstar_path = od_mstar.find_path(world, start_positions, goals,\n                                                inflation=inflation, time_limit=time_limit - c_time)\n            except OutOfTimeError:\n                # M* timed out\n                print(\"timeout\")\n                print('World', world)\n                print('Start Pos', start_positions)\n                print('Goals', goals)\n            except NoSolutionError:\n                print(\"nosol????\")\n                print('World', world)\n                print('Start Pos', start_positions)\n                print('Goals', goals)\n            except:\n                print(\"Unknown bug?!\")\n\n        return mstar_path\n\n    def _add_rendering_entry(self, entry, permanent=False):\n        if permanent:\n            self.viewer.add_geom(entry)\n        else:\n            self.viewer.add_onetime(entry)\n\n    def _render(self, mode='human', close=False, screen_width=800, screen_height=800):\n\n        def painter(state_map, agents_dict, goals_dict):\n            def initColors(num_agents):\n                c = {a + 1: hsv_to_rgb(np.array([a / float(num_agents), 1, 1])) for a in range(num_agents)}\n                return c\n\n            def create_rectangle(x, y, width, height, fill):\n                ps = [(x, y), ((x + width), y), ((x + width), (y + height)), (x, (y + height))]\n                rect = rendering.FilledPolygon(ps)\n                rect.set_color(fill[0], fill[1], fill[2])\n                rect.add_attr(rendering.Transform())\n                return rect\n\n            def drawStar(centerX, centerY, diameter, numPoints, color):\n                entry_list = []\n                outerRad = diameter // 2\n                innerRad = int(outerRad * 3 / 8)\n                # fill the center of the star\n                angleBetween = 2 * math.pi / numPoints  # angle between star points in radians\n                for i in range(numPoints):\n                    # p1 and p3 are on the inner radius, and p2 is the point\n                    pointAngle = math.pi / 2 + i * angleBetween\n                    p1X = centerX + innerRad * math.cos(pointAngle - angleBetween / 2)\n                    p1Y = centerY - innerRad * math.sin(pointAngle - angleBetween / 2)\n                    p2X = centerX + outerRad * math.cos(pointAngle)\n                    p2Y = centerY - outerRad * math.sin(pointAngle)\n                    p3X = centerX + innerRad * math.cos(pointAngle + angleBetween / 2)\n                    p3Y = centerY - innerRad * math.sin(pointAngle + angleBetween / 2)\n                    # draw the triangle for each tip.\n                    poly = rendering.FilledPolygon([(p1X, p1Y), (p2X, p2Y), (p3X, p3Y)])\n                    poly.set_color(color[0], color[1], color[2])\n                    poly.add_attr(rendering.Transform())\n                    entry_list.append(poly)\n                return entry_list\n\n            def create_circle(x, y, diameter, world_size, fill, resolution=20):\n                c = (x + world_size / 2, y + world_size / 2)\n                dr = math.pi * 2 / resolution\n                ps = []\n                for i in range(resolution):\n                    x = c[0] + math.cos(i * dr) * diameter / 2\n                    y = c[1] + math.sin(i * dr) * diameter / 2\n                    ps.append((x, y))\n                circ = rendering.FilledPolygon(ps)\n                circ.set_color(fill[0], fill[1], fill[2])\n                circ.add_attr(rendering.Transform())\n                return circ\n\n            assert len(goals_dict) == len(agents_dict)\n            num_agents = len(goals_dict)\n            world_shape = state_map.shape\n            world_size = screen_width / max(*world_shape)\n            colors = initColors(num_agents)\n            if self.viewer is None:\n                self.viewer = rendering.Viewer(screen_width, screen_height)\n                rect = create_rectangle(0, 0, screen_width, screen_height, (.6, .6, .6))\n                self._add_rendering_entry(rect, permanent=True)\n                for i in range(world_shape[0]):\n                    start = 0\n                    end = 1\n                    scanning = False\n                    write = False\n                    for j in range(world_shape[1]):\n                        if state_map[i, j] != -1 and not scanning:  # free\n                            start = j\n                            scanning = True\n                        if (j == world_shape[1] - 1 or state_map[i, j] == -1) and scanning:\n                            end = j + 1 if j == world_shape[1] - 1 else j\n                            scanning = False\n                            write = True\n                        if write:\n                            x = i * world_size\n                            y = start * world_size\n                            rect = create_rectangle(x, y, world_size, world_size * (end - start), (1, 1, 1))\n                            self._add_rendering_entry(rect, permanent=True)\n                            write = False\n            for agent in range(1, num_agents + 1):\n                i, j = agents_dict[agent]\n                x = i * world_size\n                y = j * world_size\n                color = colors[state_map[i, j]]\n                rect = create_rectangle(x, y, world_size, world_size, color)\n                self._add_rendering_entry(rect)\n\n                i, j = goals_dict[agent]\n                x = i * world_size\n                y = j * world_size\n                color = colors[agent]\n                circ = create_circle(x, y, world_size, world_size, color)\n                self._add_rendering_entry(circ)\n                if agents_dict[agent][0] == goals_dict[agent][0] and agents_dict[agent][1] == goals_dict[agent][1]:\n                    color = (0, 0, 0)\n                    circ = create_circle(x, y, world_size, world_size, color)\n                    self._add_rendering_entry(circ)\n            # if self.action_probs is not None:\n            #     n_moves = 9 if self.IsDiagonal else 5\n            #     for agent in range(1, num_agents + 1):\n            #         # take the a_dist from the given data and draw it on the frame\n            #         a_dist = self.action_probs[agent - 1]\n            #         if a_dist is not None:\n            #             for m in range(n_moves):\n            #                 dx, dy = action2dir(m)\n            #                 x = (agents_dict(agent)[0] + dx) * world_size\n            #                 y = (agents_dict(agent)[1] + dy) * world_size\n            #                 circ = create_circle(x, y, world_size, world_size, (0, 0, 0))\n            #                 self._add_rendering_entry(circ)\n            result = self.viewer.render(return_rgb_array=1)\n            return result\n\n        frame = painter(self.world.state, self.getPositions(), self.getGoals())\n        return frame\n\n\nif __name__ == \"__main__\":\n    from Primal2Observer import Primal2Observer\n    from Map_Generator import *\n    from Primal2Env import Primal2Env\n    import numpy as np\n    from tqdm import tqdm\n\n    for _ in tqdm(range(2000)):\n        n_agents = np.random.randint(low=25, high=30)\n        env = Primal2Env(num_agents=n_agents,\n                         observer=Primal2Observer(observation_size=3),\n                         map_generator=maze_generator(env_size=(10, 30),\n                                                      wall_components=(3, 8), obstacle_density=(0.5, 0.7)),\n                         IsDiagonal=False)\n        for agentID in range(1, n_agents + 1):\n            pos = env.world.agents[agentID].position\n            goal = env.world.agents[agentID].goal_pos\n            assert agentID == env.world.state[pos]\n            assert agentID == env.world.goals_map[goal]\n        assert len(np.argwhere(env.world.state > 0)) == n_agents\n        assert len(np.argwhere(env.world.goals_map > 0)) == n_agents\n"
  },
  {
    "path": "GroupLock.py",
    "content": "from threading import Lock, Condition\n\nclass GroupLock:\n    '''Queues asynchronus threads by group.\n\n    Args:\n\n        groups (key list list) : a list of lists of keys which represent the keys for each thread in each group.\n\n                                e.g. -> [['thread1','thread2'], ['thread3']]'''\n    def __init__(self, groups):\n        self.groups = groups\n\n        self.activeGroup = 0\n\n        self.hasReleased = [{member:False for member in group} for group in groups]\n        self.numGroups = len(groups)\n\n        self._groupConditions = [{member:Condition(Lock()) for member in group} for group in groups]\n\n    def acquire(self, group, id):\n        '''Acquires the lock, blocks if not the thread's turn yet. A thread can acquire the lock once per group cycle.\n\n        e.g.\n        if GroupLock was initialized with [['thread1','thread2'], ['thread3']], a calls would be\n\n            GroupLock.acquire(1, 'thread3')\n            GroupLock.acquire(0, 'thread2')\n\n\n        Args:\n\n            group (int): number of calling thread's group\n            id    (key): key given to identify the thread (given in init)'''\n        self._groupConditions[group][id].acquire()\n        if self.hasReleased[group][id] or self.activeGroup != group:\n            self._groupConditions[group][id].wait()\n\n    def release(self, group, id):\n        '''Releases the group lock. All threads in a lock must release before the next group's turn can begin.\n\n        e.g.\n        if GroupLock was initialized with [['thread1','thread2'], ['thread3']], a calls would be\n\n            GroupLock.acquire(1, 'thread3')\n            GroupLock.acquire(0, 'thread2')\n\n\n        Args:\n\n            group (int): number of calling thread's group\n            id    (key): key given to identify the thread(given in init)'''\n        self._groupConditions[group][id].release()\n        self.hasReleased[group][id] = True\n\n        if all(self.hasReleased[group].values()):\n            self.hasReleased[group] = {member:False for member in self.hasReleased[group]}\n            self.activeGroup = (self.activeGroup + 1) % len(self.groups)\n\n            releasedGroup = self.activeGroup\n            for memberCondition in self._groupConditions[releasedGroup]:\n                self._groupConditions[releasedGroup][memberCondition].acquire()\n                self._groupConditions[releasedGroup][memberCondition].notify_all()\n                self._groupConditions[releasedGroup][memberCondition].release()\n\n    def releaseAll(self):\n        releasedGroup = self.activeGroup\n        for memberCondition in self._groupConditions[releasedGroup]:\n            self._groupConditions[releasedGroup][memberCondition].acquire()\n            self._groupConditions[releasedGroup][memberCondition].notify_all()\n            self._groupConditions[releasedGroup][memberCondition].release()\n"
  },
  {
    "path": "LICENSE.md",
    "content": "The MIT License (MIT)\n\nCopyright (c) .NET Foundation and Contributors\n\nAll rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Map_Generator.py",
    "content": "import numpy as np\nimport random\nimport sys\nfrom Env_Builder import World\n\n\ndef isConnected(world0):\n    sys.setrecursionlimit(10000)\n    world0 = world0.copy()\n\n    def firstFree(world0):\n        for x in range(world0.shape[0]):\n            for y in range(world0.shape[1]):\n                if world0[x, y] == 0:\n                    return x, y\n\n    def floodfill(world, i, j):\n        sx, sy = world.shape[0], world.shape[1]\n        if i < 0 or i >= sx or j < 0 or j >= sy:  # out of bounds, return\n            return\n        if world[i, j] == -1: return\n        world[i, j] = -1\n        floodfill(world, i + 1, j)\n        floodfill(world, i, j + 1)\n        floodfill(world, i - 1, j)\n        floodfill(world, i, j - 1)\n\n    i, j = firstFree(world0)\n    floodfill(world0, i, j)\n    if np.any(world0 == 0):\n        return False\n    else:\n        return True\n\n\ndef GetConnectedRegion(world, regions_dict, x, y):\n    sys.setrecursionlimit(1000000)\n    '''returns a list of tuples of connected squares to the given tile\n    this is memorized with a dict'''\n    if (x, y) in regions_dict:\n        return regions_dict[(x, y)]\n    visited = set()\n    sx, sy = world.shape[0], world.shape[1]\n    work_list = [(x, y)]\n    while len(work_list) > 0:\n        (i, j) = work_list.pop()\n        if i < 0 or i >= sx or j < 0 or j >= sy:  # out of bounds, return\n            continue\n        if world[i, j] == -1:\n            continue  # crashes\n        if world[i, j] > 0:\n            regions_dict[(i, j)] = visited\n        if (i, j) in visited: continue\n        visited.add((i, j))\n        work_list.append((i + 1, j))\n        work_list.append((i, j + 1))\n        work_list.append((i - 1, j))\n        work_list.append((i, j - 1))\n    regions_dict[(x, y)] = visited\n    return visited\n\n\ndef maze_generator(env_size=(10, 70), wall_components=(1, 8), obstacle_density=None,\n                   go_straight=0.8):\n    min_size, max_size = env_size\n    min_component, max_component = wall_components\n    num_components = np.random.randint(low=min_component, high=max_component + 1)\n    assert min_size > 5\n    # todo: write comments\n    \"\"\"\n    num_agents,\n    IsDiagonal,\n    min_size: min length of the 'radius' of the map,\n    max_size: max length of the 'radius' of the map,\n    complexity,\n    obstacle_density,\n    go_straight,\n    \"\"\"\n    if obstacle_density is None:\n        obstacle_density = [0, 1]\n\n    def maze(h, w, total_density=0):\n        # Only odd shapes\n        assert h > 0 and w > 0, \"You are giving non-positive width and height\"\n        shape = ((h // 2) * 2 + 3, (w // 2) * 2 + 3)\n        # Adjust num_components and density relative to maze world_size\n        # density    = int(density * ((shape[0] // 2) * (shape[1] // 2))) // 20 # world_size of components\n        density = int(shape[0] * shape[1] * total_density // num_components) if num_components != 0 else 0\n\n        # Build actual maze\n        Z = np.zeros(shape, dtype='int')\n        # Fill borders\n        Z[0, :] = Z[-1, :] = 1\n        Z[:, 0] = Z[:, -1] = 1\n        # Make aisles\n        for i in range(density):\n            x, y = np.random.randint(0, shape[1] // 2) * 2, np.random.randint(0, shape[\n                0] // 2) * 2  # pick a random position\n            Z[y, x] = 1\n            last_dir = 0\n            for j in range(num_components):\n                neighbours = []\n                if x > 1:             neighbours.append((y, x - 2))\n                if x < shape[1] - 2:  neighbours.append((y, x + 2))\n                if y > 1:             neighbours.append((y - 2, x))\n                if y < shape[0] - 2:  neighbours.append((y + 2, x))\n                if len(neighbours):\n                    if last_dir == 0:\n                        y_, x_ = neighbours[np.random.randint(0, len(neighbours))]\n                        if Z[y_, x_] == 0:\n                            last_dir = (y_ - y, x_ - x)\n                            Z[y_, x_] = 1\n                            Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n                            x, y = x_, y_\n                    else:\n                        index_F = -1\n                        index_B = -1\n                        diff = []\n                        for k in range(len(neighbours)):\n                            diff.append((neighbours[k][0] - y, neighbours[k][1] - x))\n                            if diff[k] == last_dir:\n                                index_F = k\n                            elif diff[k][0] + last_dir[0] == 0 and diff[k][1] + last_dir[1] == 0:\n                                index_B = k\n                        assert (index_B >= 0)\n                        if (index_F + 1):\n                            p = (1 - go_straight) * np.ones(len(neighbours)) / (len(neighbours) - 2)\n                            p[index_B] = 0\n                            p[index_F] = go_straight\n                            # assert(p.sum() == 1)\n                        else:\n                            if len(neighbours) == 1:\n                                p = 1\n                            else:\n                                p = np.ones(len(neighbours)) / (len(neighbours) - 1)\n                                p[index_B] = 0\n                            assert (p.sum() == 1)\n\n                        I = np.random.choice(range(len(neighbours)), p=p)\n                        (y_, x_) = neighbours[I]\n                        if Z[y_, x_] == 0:\n                            last_dir = (y_ - y, x_ - x)\n                            Z[y_, x_] = 1\n                            Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n                            x, y = x_, y_\n        return Z\n\n    def generator():\n        # randomize the world RANDOMIZE THE STATIC OBSTACLES obstacle_density =\n        # np.random.triangular(obstacle_density[0], .33 * obstacle_density[0] + .66 * obstacle_density[1],\n        # obstacle_density[1])\n        world_size = np.random.randint(min_size, max_size + 1)\n        world = -maze(int(world_size), int(world_size),\n                      total_density=np.random.uniform(obstacle_density[0], obstacle_density[1]),\n                      ).astype(int)\n        world = np.array(world)\n        return world, None\n\n    return generator\n\n\ndef manual_generator(state_map, goals_map=None):\n    state_map = np.array(state_map)\n\n    assert state_map is not None\n    assert len(state_map.shape) == 2\n    assert min(state_map.shape) >= 5\n    if goals_map is not None:\n        goals_map = np.array(goals_map)\n        assert goals_map.shape[0] == state_map.shape[0] and goals_map.shape[1] == state_map.shape[1]\n\n    def generator():\n        return state_map, goals_map\n\n    return generator\n\n\nif __name__ == \"__main__\":\n    from matplotlib import pyplot as plt\n\n    print(\"testing randomized map generation\")\n    plt.ion()\n    for _ in range(1000):\n        generator = maze_generator()\n        world = generator()\n        plt.imshow(world[0])  # obstacle map\n        plt.pause(0.1)\n    plt.ioff()\n    plt.show()\n"
  },
  {
    "path": "Observer_Builder.py",
    "content": "import numpy as np\n\n\ndef _get_one_hot_for_agent_direction(agent):\n    \"\"\"Retuns the agent's direction to one-hot encoding.\"\"\"\n    direction = np.zeros(4)\n    direction[agent.direction] = 1\n    return direction\n\n\nclass ObservationBuilder:\n    \"\"\"\n    ObservationBuilder base class.\n    \"\"\"\n\n    def __init__(self):\n        self.world = None\n        self.NUM_CHANNELS = None\n\n    def set_env(self, env):\n        self.world = env\n\n    def reset(self):\n        \"\"\"\n        Called after each environment reset.\n        \"\"\"\n        raise NotImplementedError()\n\n    def get_many(self, handles):\n        \"\"\"\n        Called whenever an observation has to be computed for the `env` environment, for each agent with handle\n        in the `handles` list.\n\n        Parameters\n        ----------\n        handles : list of handles, optional\n            List with the handles of the agents for which to compute the observation vector.\n\n        Returns\n        -------\n        function\n            A dictionary of observation structures, specific to the corresponding environment, with handles from\n            `handles` as keys.\n        \"\"\"\n        raise NotImplementedError\n\n\nclass DummyObserver(ObservationBuilder):\n    \"\"\"\n    DummyObservationBuilder class which returns dummy observations\n    This is used in the evaluation service\n    \"\"\"\n\n    def __init__(self):\n        super().__init__()\n        self.observation_size = 1\n\n    def reset(self):\n        pass\n\n    def get_many(self, handles) -> bool:\n        return True\n\n    def get(self, handle: int = 0) -> bool:\n        return True\n"
  },
  {
    "path": "Primal2Env.py",
    "content": "from Env_Builder import *\nfrom od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError\nfrom od_mstar3 import od_mstar\nfrom GroupLock import Lock\nimport random\nfrom gym import spaces\n\n'''\n    Observation: \n    Action space: (Tuple)\n        agent_id: positive integer\n        action: {0:STILL, 1:MOVE_NORTH, 2:MOVE_EAST, 3:MOVE_SOUTH, 4:MOVE_WEST,\n                 5:NE, 6:SE, 7:SW, 8:NW, 5,6,7,8 not used in non-diagonal world}\n    Reward: ACTION_COST for each action, GOAL_REWARD when robot arrives at target\n'''\n\n\nclass Primal2Env(MAPFEnv):\n    metadata = {\"render.modes\": [\"human\", \"ansi\"]}\n\n    def __init__(self, observer, map_generator, num_agents=None,\n                 IsDiagonal=False, frozen_steps=0, isOneShot=False):\n        super(Primal2Env, self).__init__(observer=observer, map_generator=map_generator,\n                                          num_agents=num_agents,\n                                          IsDiagonal=IsDiagonal, frozen_steps=frozen_steps, isOneShot=isOneShot)\n\n    def _reset(self, new_generator=None):\n        if new_generator is None:\n            self.set_world()\n        else:\n            self.map_generator = new_generator\n            self.world = World(self.map_generator, num_agents=self.num_agents, isDiagonal=self.IsDiagonal)\n            self.num_agents = self.world.num_agents\n            self.observer.set_env(self.world)\n\n        self.fresh = True\n        if self.viewer is not None:\n            self.viewer = None\n\n    def give_moving_reward(self, agentID):\n        \"\"\"\n        WARNING: ONLY CALL THIS AFTER MOVING AGENTS!\n        Only the moving agent that encounters the collision is penalized! Standing still agents\n        never get punishment.\n        \"\"\"\n        collision_status = self.world.agents[agentID].status\n        if collision_status == 0:\n            reward = self.ACTION_COST\n            self.isStandingOnGoal[agentID] = False\n        elif collision_status == 1:\n            reward = self.ACTION_COST + self.GOAL_REWARD\n            self.isStandingOnGoal[agentID] = True\n            self.world.agents[agentID].dones += 1\n        else:\n            reward = self.ACTION_COST + self.COLLISION_REWARD\n            self.isStandingOnGoal[agentID] = False\n        self.individual_rewards[agentID] = reward\n\n    def listValidActions(self, agent_ID, agent_obs):\n        \"\"\"\n        :return: action:int, pos:(int,int)\n        in non-corridor states:\n            return all valid actions\n        in corridor states:\n            if standing on goal: Only going 'forward' allowed\n            if not standing on goal: only going 'forward' allowed\n        \"\"\"\n\n        def get_last_pos(agentID, position):\n            \"\"\"\n            get the last different position of an agent\n            \"\"\"\n            history_list = copy.deepcopy(self.world.agents[agentID].position_history)\n            history_list.reverse()\n            assert (history_list[0] == self.world.getPos(agentID))\n            history_list.pop(0)\n            if history_list == []:\n                return None\n            for pos in history_list:\n                if pos != position:\n                    return pos\n            return None\n\n        available_actions = []\n        pos = self.world.getPos(agent_ID)\n        # if the agent is inside a corridor\n        if self.world.corridor_map[pos[0], pos[1]][1] == 1:\n            corridor_id = self.world.corridor_map[pos[0], pos[1]][0]\n            if [pos[0], pos[1]] not in self.world.corridors[corridor_id]['StoppingPoints']:\n                possible_moves = self.world.blank_env_valid_neighbor(*pos)\n                last_position = get_last_pos(agent_ID, pos)\n                for possible_position in possible_moves:\n                    if possible_position is not None and possible_position != last_position \\\n                            and self.world.state[possible_position[0], possible_position[1]] == 0:\n                        available_actions.append(dir2action(tuple_minus(possible_position, pos)))\n\n                    elif len(self.world.corridors[corridor_id]['EndPoints']) == 1 and possible_position is not None \\\n                            and possible_moves.count(None) == 3:\n                        available_actions.append(dir2action(tuple_minus(possible_position, pos)))\n\n                if not available_actions:\n                    available_actions.append(0)\n            else:\n                possible_moves = self.world.blank_env_valid_neighbor(*pos)\n                last_position = get_last_pos(agent_ID, pos)\n                if last_position in self.world.corridors[corridor_id]['Positions']:\n                    available_actions.append(0)\n                    for possible_position in possible_moves:\n                        if possible_position is not None and possible_position != last_position \\\n                                and self.world.state[possible_position[0], possible_position[1]] == 0:\n                            available_actions.append(dir2action(tuple_minus(possible_position, pos)))\n                else:\n                    for possible_position in possible_moves:\n                        if possible_position is not None \\\n                                and self.world.state[possible_position[0], possible_position[1]] == 0:\n                            available_actions.append(dir2action(tuple_minus(possible_position, pos)))\n                    if not available_actions:\n                        available_actions.append(0)\n        else:\n            available_actions.append(0)  # standing still always allowed \n            num_actions = 4 + 1 if not self.IsDiagonal else 8 + 1\n            for action in range(1, num_actions):\n                direction = action2dir(action)\n                new_pos = tuple_plus(direction, pos)\n                lastpos = None\n                blocking_valid = self.get_blocking_validity(agent_obs, agent_ID, new_pos)\n                if not blocking_valid:\n                    continue\n                try:\n                    lastpos = self.world.agents[agent_ID].position_history[-2]\n                except:\n                    pass\n                if new_pos == lastpos:\n                    continue\n                if self.world.corridor_map[new_pos[0], new_pos[1]][1] == 1:\n                    valid = self.get_convention_validity(agent_obs, agent_ID, new_pos)\n                    if not valid:\n                        continue\n                if self.world.state[new_pos[0], new_pos[1]] == 0:\n                    available_actions.append(action)\n\n        return available_actions\n\n    def get_blocking_validity(self, observation, agent_ID, pos):\n        top_left = (self.world.getPos(agent_ID)[0] - self.obs_size // 2,\n                    self.world.getPos(agent_ID)[1] - self.obs_size // 2)\n        blocking_map = observation[0][5]\n        if blocking_map[pos[0] - top_left[0], pos[1] - top_left[1]] == 1:\n            return 0\n        return 1\n\n    def get_convention_validity(self, observation, agent_ID, pos):\n        top_left = (self.world.getPos(agent_ID)[0] - self.obs_size // 2,\n                    self.world.getPos(agent_ID)[1] - self.obs_size // 2)\n        blocking_map = observation[0][5]\n        if blocking_map[pos[0] - top_left[0], pos[1] - top_left[1]] == -1:\n            deltay_map = observation[0][7]\n            if deltay_map[pos[0] - top_left[0], pos[1] - top_left[1]] > 0:\n                return 1\n            elif deltay_map[pos[0] - top_left[0], pos[1] - top_left[1]] == 0:\n                deltax_map = observation[0][6]\n                if deltax_map[pos[0] - top_left[0], pos[1] - top_left[1]] > 0:\n                    return 1\n                else:\n                    return 0\n            elif deltay_map[pos[0] - top_left[0], pos[1] - top_left[1]] < 0:\n                return 0\n            else:\n                print('Weird')\n        else:\n            return 1\n\n\nclass DummyEnv(Primal2Env):\n    def __init__(self, observer, map_generator, num_agents=None, IsDiagonal=False):\n        super(DummyEnv, self).__init__(observer=observer, map_generator=map_generator,\n                                       num_agents=num_agents,\n                                       IsDiagonal=IsDiagonal)\n\n    def _render(self, mode='human', close=False, screen_width=800, screen_height=800):\n        pass\n\n\nif __name__ == '__main__':\n    from matplotlib import pyplot\n    from Primal2Observer import Primal2Observer\n    from Map_Generator import maze_generator\n    from Map_Generator import manual_generator\n\n    state0 = [[-1, -1, -1, -1, -1, -1, -1],\n              [-1, 1, -1, 0, 0, 0, -1],\n              [-1, 0, -1, -1, -1, 0, -1],\n              [-1, 0, 0, 0, -1, 0, -1],\n              [-1, 0, -1, 0, 0, 0, -1],\n              [-1, 2, -1, 0, 0, 0, -1],\n              [-1, -1, -1, -1, -1, -1, -1]]\n    n_agents = 3\n    env = Primal2Env(num_agents=n_agents,\n                      observer=Primal2Observer(observation_size=5),\n                      map_generator=maze_generator(env_size=(8, 10),\n                                                   wall_components=(3, 8), obstacle_density=(0.3, 0.7)),\n                      IsDiagonal=False)\n    print(env.world.state)\n    print(env.world.goals_map)\n    c = 0\n    a = c\n    b = c\n    for j in range(0, 50):\n          movement = {1: a, 2: b, 3: c, 4: c, 5: c, 6: c, 7: c, 8: c}\n          env.step_all(movement)\n          obs = env._observe()\n\n          print(env.world.state)\n          a = int(input())\n          b = int(input())\n"
  },
  {
    "path": "Primal2Observer.py",
    "content": "from Observer_Builder import ObservationBuilder\nimport numpy as np\nimport copy\nfrom Env_Builder import *\n\nimport time\n\n\nclass Primal2Observer(ObservationBuilder):\n    \"\"\"\n    obs shape: (8 + num_future_steps * obs_size * obs_size )\n    map order: poss_map, goal_map, goals_map, obs_map, pathlength_map, blocking_map, deltax_map, deltay_map, astar maps\n    \"\"\"\n\n    def __init__(self, observation_size=11, num_future_steps=3, printTime=False):\n        super(Primal2Observer, self).__init__()\n        self.observation_size = observation_size\n        self.num_future_steps = num_future_steps\n        self.NUM_CHANNELS = 8 + self.num_future_steps\n        self.printTime = printTime\n\n    def set_world(self, world):\n        super().set_env(world)\n\n    def get_next_positions(self, agent_id):\n        agent_pos = self.world.getPos(agent_id)\n        positions = []\n        current_pos = [agent_pos[0], agent_pos[1]]\n        next_positions = self.world.blank_env_valid_neighbor(current_pos[0], current_pos[1])\n        for position in next_positions:\n            if position is not None and position != agent_pos:\n                positions.append([position[0], position[1]])\n                next_next_positions = self.world.blank_env_valid_neighbor(position[0], position[1])\n                for pos in next_next_positions:\n                    if pos is not None and pos not in positions and pos != agent_pos:\n                        positions.append([pos[0], pos[1]])\n\n        return positions\n\n    def _get(self, agent_id, all_astar_maps):\n\n        start_time = time.time()\n\n        assert (agent_id > 0)\n        agent_pos = self.world.getPos(agent_id)\n        top_left = (agent_pos[0] - self.observation_size // 2,\n                    agent_pos[1] - self.observation_size // 2)\n        bottom_right = (top_left[0] + self.observation_size, top_left[1] + self.observation_size)\n        centre = (self.observation_size - 1) / 2\n        obs_shape = (self.observation_size, self.observation_size)\n\n        goal_map = np.zeros(obs_shape)\n        poss_map = np.zeros(obs_shape)\n        goals_map = np.zeros(obs_shape)\n        obs_map = np.zeros(obs_shape)\n        astar_map = np.zeros([self.num_future_steps, self.observation_size, self.observation_size])\n        astar_map_unpadded = np.zeros([self.num_future_steps, self.world.state.shape[0], self.world.state.shape[1]])\n        pathlength_map = np.zeros(obs_shape)\n        deltax_map = np.zeros(obs_shape)\n        deltay_map = np.zeros(obs_shape)\n        blocking_map = np.zeros(obs_shape)\n\n        time1 = time.time() - start_time\n        start_time = time.time()\n\n        # concatenate all_astar maps\n        other_agents = list(range(self.world.num_agents))  # needs to be 0-indexed for numpy magic below\n        other_agents.remove(agent_id - 1)  # 0-indexing again\n        astar_map_unpadded = np.zeros([self.num_future_steps, self.world.state.shape[0], self.world.state.shape[1]])\n        astar_map_unpadded[:self.num_future_steps, max(0, top_left[0]):min(bottom_right[0], self.world.state.shape[0]),\n        max(0, top_left[1]):min(bottom_right[1], self.world.state.shape[1])] = \\\n            np.sum(all_astar_maps[other_agents, :self.num_future_steps,\n                   max(0, top_left[0]):min(bottom_right[0], self.world.state.shape[0]),\n                   max(0, top_left[1]):min(bottom_right[1], self.world.state.shape[1])], axis=0)\n\n        time2 = time.time() - start_time\n        start_time = time.time()\n\n        # original layers from PRIMAL1\n        visible_agents = []\n        for i in range(top_left[0], top_left[0] + self.observation_size):\n            for j in range(top_left[1], top_left[1] + self.observation_size):\n                if i >= self.world.state.shape[0] or i < 0 or j >= self.world.state.shape[1] or j < 0:\n                    # out of bounds, just treat as an obstacle\n                    obs_map[i - top_left[0], j - top_left[1]] = 1\n                    pathlength_map[i - top_left[0], j - top_left[1]] = -1\n                    continue\n\n                astar_map[:self.num_future_steps, i - top_left[0], j - top_left[1]] = astar_map_unpadded[\n                                                                                      :self.num_future_steps, i, j]\n                if self.world.state[i, j] == -1:\n                    # obstacles\n                    obs_map[i - top_left[0], j - top_left[1]] = 1\n                if self.world.state[i, j] == agent_id:\n                    # agent's position\n                    poss_map[i - top_left[0], j - top_left[1]] = 1\n                    # updated_poss_map[i - top_left[0], j - top_left[1]] = 0\n                if self.world.goals_map[i, j] == agent_id:\n                    # agent's goal\n                    goal_map[i - top_left[0], j - top_left[1]] = 1\n                if self.world.state[i, j] > 0 and self.world.state[i, j] != agent_id:\n                    # other agents' positions\n                    visible_agents.append(self.world.state[i, j])\n                    poss_map[i - top_left[0], j - top_left[1]] = 1\n                    # updated_poss_map[i - top_left[0], j - top_left[1]] = self.world.state[i, j]\n\n                # we can keep this map even if on goal,\n                # since observation is computed after the refresh of new distance map\n                pathlength_map[i - top_left[0], j - top_left[1]] = self.world.agents[agent_id].distanceMap[i, j]\n\n        time3 = time.time() - start_time\n        start_time = time.time()\n\n        for agent in visible_agents:\n            x, y = self.world.getGoal(agent)\n            min_node = (max(top_left[0], min(top_left[0] + self.observation_size - 1, x)),\n                        max(top_left[1], min(top_left[1] + self.observation_size - 1, y)))\n            goals_map[min_node[0] - top_left[0], min_node[1] - top_left[1]] = 1\n\n        dx = self.world.getGoal(agent_id)[0] - agent_pos[0]\n        dy = self.world.getGoal(agent_id)[1] - agent_pos[1]\n        mag = (dx ** 2 + dy ** 2) ** .5\n        if mag != 0:\n            dx = dx / mag\n            dy = dy / mag\n        if mag > 60:\n            mag = 60\n\n        time4 = time.time() - start_time\n        start_time = time.time()\n\n        current_corridor_id = -1\n        current_corridor = self.world.corridor_map[self.world.getPos(agent_id)[0], self.world.getPos(agent_id)[1]][1]\n        if current_corridor == 1:\n            current_corridor_id = \\\n                self.world.corridor_map[self.world.getPos(agent_id)[0], self.world.getPos(agent_id)[1]][0]\n\n        positions = self.get_next_positions(agent_id)\n        for position in positions:\n            cell_info = self.world.corridor_map[position[0], position[1]]\n            if cell_info[1] == 1:\n                corridor_id = cell_info[0]\n                if corridor_id != current_corridor_id:\n                    if len(self.world.corridors[corridor_id]['EndPoints']) == 1:\n                        if [position[0], position[1]] == self.world.corridors[corridor_id]['StoppingPoints'][0]:\n                            blocking_map[position[0] - top_left[0], position[1] - top_left[1]] = self.get_blocking(\n                                corridor_id,\n                                0, agent_id,\n                                1)\n                    elif [position[0], position[1]] == self.world.corridors[corridor_id]['StoppingPoints'][0]:\n                        end_point_pos = self.world.corridors[corridor_id]['EndPoints'][0]\n                        deltax_map[position[0] - top_left[0], position[1] - top_left[1]] = (self.world.corridors[\n                            corridor_id]['DeltaX'][(end_point_pos[0], end_point_pos[1])])  # / max(mag, 1)\n                        deltay_map[position[0] - top_left[0], position[1] - top_left[1]] = (self.world.corridors[\n                            corridor_id]['DeltaY'][(end_point_pos[0], end_point_pos[1])])  # / max(mag, 1)\n                        blocking_map[position[0] - top_left[0], position[1] - top_left[1]] = self.get_blocking(\n                            corridor_id,\n                            0, agent_id,\n                            2)\n                    elif [position[0], position[1]] == self.world.corridors[corridor_id]['StoppingPoints'][1]:\n                        end_point_pos = self.world.corridors[corridor_id]['EndPoints'][1]\n                        deltax_map[position[0] - top_left[0], position[1] - top_left[1]] = (self.world.corridors[\n                            corridor_id]['DeltaX'][(end_point_pos[0], end_point_pos[1])])  # / max(mag, 1)\n                        deltay_map[position[0] - top_left[0], position[1] - top_left[1]] = (self.world.corridors[\n                            corridor_id]['DeltaY'][(end_point_pos[0], end_point_pos[1])])  # / max(mag, 1)\n                        blocking_map[position[0] - top_left[0], position[1] - top_left[1]] = self.get_blocking(\n                            corridor_id,\n                            1, agent_id,\n                            2)\n                    else:\n                        pass\n\n        time5 = time.time() - start_time\n        start_time = time.time()\n\n        free_spaces = list(np.argwhere(pathlength_map > 0))\n        distance_list = []\n        for arg in free_spaces:\n            dist = pathlength_map[arg[0], arg[1]]\n            if dist not in distance_list:\n                distance_list.append(dist)\n        distance_list.sort()\n        step_size = (1 / len(distance_list))\n        for i in range(self.observation_size):\n            for j in range(self.observation_size):\n                dist_mag = pathlength_map[i, j]\n                if dist_mag > 0:\n                    index = distance_list.index(dist_mag)\n                    pathlength_map[i, j] = (index + 1) * step_size\n\n        state = np.array([poss_map, goal_map, goals_map, obs_map, pathlength_map, blocking_map, deltax_map,\n                          deltay_map])\n        state = np.concatenate((state, astar_map), axis=0)\n\n        time6 = time.time() - start_time\n        start_time = time.time()\n\n        return state, [dx, dy, mag], np.array([time1, time2, time3, time4, time5, time6])\n\n    def get_many(self, handles=None):\n        observations = {}\n        all_astar_maps = self.get_astar_map()\n        if handles is None:\n            handles = list(range(1, self.world.num_agents + 1))\n\n        times = np.zeros((1, 6))\n\n        for h in handles:\n            state, vector, time = self._get(h, all_astar_maps)\n            observations[h] = [state, vector]\n            times += time\n        if self.printTime:\n            print(times)\n        return observations\n\n    def get_astar_map(self):\n        \"\"\"\n\n        :return: a dict of 3D np arrays. Each astar_maps[agentID] is a num_future_steps * obs_size * obs_size matrix.\n        \"\"\"\n\n        def get_single_astar_path(distance_map, start_position, path_len):\n            \"\"\"\n            :param distance_map:\n            :param start_position:\n            :param path_len:\n            :return: [[(x,y), ...],..] a list of lists of positions from start_position, the length of the return can be\n            smaller than num_future_steps. Index of the return: list[step][0-n] = tuple(x, y)\n            \"\"\"\n\n            def get_astar_one_step(position):\n                next_astar_cell = []\n                h = self.world.state.shape[0]\n                w = self.world.state.shape[1]\n                for direction in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n                    # print(position, direction)\n                    new_pos = tuple_plus(position, direction)\n                    if 0 < new_pos[0] <= h and 0 < new_pos[1] <= w:\n                        if distance_map[new_pos] == distance_map[position] - 1 \\\n                                and distance_map[new_pos] >= 0:\n                            next_astar_cell.append(new_pos)\n                return next_astar_cell\n\n            path_counter = 0\n            astar_list = [[start_position]]\n            while path_counter < path_len:\n                last_step_cells = astar_list[-1]\n                next_step_cells = []\n                for cells_per_step in last_step_cells:\n                    new_cell_list = get_astar_one_step(cells_per_step)\n                    if not new_cell_list:  # no next step, should be standing on goal\n                        astar_list.pop(0)\n                        return astar_list\n                    next_step_cells.extend(new_cell_list)\n                next_step_cells = list(set(next_step_cells))  # remove repeated positions\n                astar_list.append(next_step_cells)\n                path_counter += 1\n\n            astar_list.pop(0)\n            return astar_list\n\n        astar_maps = {}\n        for agentID in range(1, self.world.num_agents + 1):\n            astar_maps.update(\n                {agentID: np.zeros([self.num_future_steps, self.world.state.shape[0], self.world.state.shape[1]])})\n\n            distance_map0, start_pos0 = self.world.agents[agentID].distanceMap, self.world.agents[agentID].position\n\n            astar_path = get_single_astar_path(distance_map0, start_pos0, self.num_future_steps)\n\n\n            if not len(astar_path) == self.num_future_steps:  # this agent reaches its goal during future steps\n                distance_map1, start_pos1 = self.world.agents[agentID].next_distanceMap, \\\n                                            self.world.agents[agentID].goal_pos\n                astar_path.extend(\n                    get_single_astar_path(distance_map1, start_pos1, self.num_future_steps - len(astar_path)))\n\n            for i in range(self.num_future_steps - len(astar_path)):  # only happen when min_distance not sufficient\n                astar_path.extend([[astar_path[-1][-1]]])  # stay at the last pos\n\n            assert len(astar_path) == self.num_future_steps\n            for step in range(self.num_future_steps):\n                for cell in astar_path[step]:\n                    astar_maps[agentID][step, cell[0], cell[1]] = 1\n\n        return np.asarray([astar_maps[i] for i in range(1, self.world.num_agents + 1)])\n\n    def get_blocking(self, corridor_id, reverse, agent_id, dead_end):\n        def get_last_pos(agentID, position):\n            history_list = copy.deepcopy(self.world.agents[agentID].position_history)\n            history_list.reverse()\n            assert (history_list[0] == self.world.getPos(agentID))\n            history_list.pop(0)\n            if history_list == []:\n                return None\n            for pos in history_list:\n                if pos != position:\n                    return pos\n            return None\n\n        positions_to_check = copy.deepcopy(self.world.corridors[corridor_id]['Positions'])\n        if reverse:\n            positions_to_check.reverse()\n        idx = -1\n        for position in positions_to_check:\n            idx += 1\n            state = self.world.state[position[0], position[1]]\n            if state > 0 and state != agent_id:\n                if dead_end == 1:\n                    return 1\n                if idx == 0:\n                    return 1\n                last_pos = get_last_pos(state, position)\n                if last_pos == None:\n                    return 1\n                if idx == len(positions_to_check) - 1:\n                    if last_pos != positions_to_check[idx - 1]:\n                        return 1\n                    break\n                if last_pos == positions_to_check[idx + 1]:\n                    return 1\n        if dead_end == 2:\n            if not reverse:\n                other_endpoint = self.world.corridors[corridor_id]['EndPoints'][1]\n            else:\n                other_endpoint = self.world.corridors[corridor_id]['EndPoints'][0]\n            state_endpoint = self.world.state[other_endpoint[0], other_endpoint[1]]\n            if state_endpoint > 0 and state_endpoint != agent_id:\n                return -1\n        return 0\n\n\nif __name__ == \"__main__\":\n    pass\n"
  },
  {
    "path": "README.md",
    "content": "# PRIMAL_2: Pathfinding via Reinforcement and Imitation Multi_agent Learning - Lifelong\n\n## Setting up Code\n- cd into the od_mstar3 folder.\n- python3 setup.py build_ext --inplace\n- Check by going back to the root of the git folder, running python3 and \"import cpp_mstar\"\n\n\n## Running Code\n- Pick appropriate number of meta agents via variables `NUM_META_AGENTS` and `NUM_IL_META_AGENTS` in `parameters.py`\n- The number of RL meta-agents is implicity defined by the difference between total meta-agents and IL meta-agents (`NUM_RL_META_AGENTS` = `NUM_META_AGENTS` - `NUM_IL_META_AGENTS`)\n- Name training run via `training_version` in `parameters.py`\n- call `python driver.py`\n\n\n## Frequently asked questions\n1. I got `pyglet.canvas.xlib.NoSuchDisplayException: Cannot connect to \"None\"` when running on a server.\n\nRunning your code starting with `xvfb-run` will solve the problem. You may refer to https://stackoverflow.com/questions/60922076/pyglet-canvas-xlib-nosuchdisplayexception-cannot-connect-to-none-only-happens and relevant issues on StackFlow for help.\n\n2. In one-shot environment, why agent turns black after reaching a goal?\n\nIn the one-shot scenario, agent will 'disappear'(i.e., removed from the env). For visualization we keep it as black. Removal of agent who has achieved its goal is necessary, since a lot of narrow corridors in the map could cause unsolvable block and collision. One-shot scenario per se is just a way to test the optimality of the planner. By contrast we do not remove any agents for any reason in continuous env.\n\n## Key Files\n- `parameters.py` - Training parameters.\n- `driver.py` - Driver of program. Holds global network for A3C.\n- `Runner.py` - Compute node for training. Maintains a single meta agent.\n- `Worker.py` - A single agent in a simulation environment. Majority of episode computation, including gradient calculation, occurs here.\n- `Ray_ACNet.py` - Defines network architecture.\n- `Env_Builder.py` - Defines the lower level structure of the Lifelong MAPF environment for PRIMAL2, including the world and agents class.\n- `PRIMAL2Env.py` - Defines the high level environment class. \n- `Map_Generator2.py` - Algorithm used to generate worlds, parameterized by world size, obstacle density and wall components.\n- `PRIMAL2Observer.py` - Defines the decentralized observation of each PRIMAL2 agent.\n- `Obsever_Builder.py` - The high level observation class\n\n\n## Other Links\n- fully trained PRIMAL2 model in one-shot environment -  https://www.dropbox.com/s/3nppkpy7psg0j5v/model_PRIMAL2_oneshot_3astarMaps.7z?dl=0\n- fully trained PRIMAL2 model in LMAPF environment - https://www.dropbox.com/s/6wjq2bje4mcjywj/model_PRIMAL2_continuous_3astarMaps.7z?dl=0\n\n\n## Authors\n\n[Mehul Damani](damanimehul24@gmail.com)\n\n[Zhiyao Luo](luozhiyao933@126.com)\n\n[Emerson Wenzel](emersonwenzel@gmail.com)\n\n[Guillaume Sartoretti](guillaume.sartoretti@gmail.com)\n"
  },
  {
    "path": "Ray_ACNet.py",
    "content": "import tensorflow as tf\nimport tensorflow.contrib.layers as layers\nimport numpy as np\n\n# parameters for training\nGRAD_CLIP = 10.0\nKEEP_PROB1 = 1  # was 0.5\nKEEP_PROB2 = 1  # was 0.7\nRNN_SIZE = 512\nGOAL_REPR_SIZE = 12\n\n\n# Used to initialize weights for policy and value output layers (Do we need to use that? Maybe not now)\ndef normalized_columns_initializer(std=1.0):\n    def _initializer(shape, dtype=None, partition_info=None):\n        out = np.random.randn(*shape).astype(np.float32)\n        out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))\n        return tf.constant(out)\n\n    return _initializer\n\n\nclass ACNet:\n    def __init__(self, scope, a_size, trainer, TRAINING, NUM_CHANNEL, OBS_SIZE, GLOBAL_NET_SCOPE, GLOBAL_NETWORK=False):\n        with tf.variable_scope(str(scope) + '/qvalues'):\n            self.trainer = trainer\n            # The input size may require more work to fit the interface.\n            self.inputs = tf.placeholder(shape=[None, NUM_CHANNEL, OBS_SIZE, OBS_SIZE], dtype=tf.float32)\n            self.goal_pos = tf.placeholder(shape=[None, 3], dtype=tf.float32)\n            self.myinput = tf.transpose(self.inputs, perm=[0, 2, 3, 1])\n            self.policy, self.value, self.state_out, self.state_in, self.state_init, self.valids = self._build_net(\n                self.myinput, self.goal_pos, RNN_SIZE, TRAINING, a_size)\n        if TRAINING:\n            self.actions = tf.placeholder(shape=[None], dtype=tf.int32)\n            self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32)\n            self.train_valid = tf.placeholder(shape=[None, a_size], dtype=tf.float32)\n            self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget')\n            self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)\n\n            self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])\n            self.train_value = tf.placeholder(tf.float32, [None])\n            \n            self.train_policy = tf.placeholder(tf.float32, [None])\n            \n            self.train_imitation = tf.placeholder(tf.float32, [None]) # NEED THIS\n\n            self.optimal_actions = tf.placeholder(tf.int32, [None]) # NEED THIS\n\n            self.optimal_actions_onehot = tf.one_hot(self.optimal_actions, a_size, dtype=tf.float32) # NEED THIS\n            \n            self.train_valids= tf.placeholder(tf.float32, [None,1])\n\n            # Loss Functions\n            self.value_loss  = 0.1 * tf.reduce_mean(\n                self.train_value * tf.square(self.target_v - tf.reshape(self.value, shape=[-1])))\n            \n            self.entropy     = - tf.reduce_mean(self.policy * tf.log(tf.clip_by_value(self.policy, 1e-10, 1.0)))\n            \n            self.policy_loss = - 0.5 * tf.reduce_mean(self.train_policy*\n                tf.log(tf.clip_by_value(self.responsible_outputs, 1e-15, 1.0)) * self.advantages)\n\n            \n            self.valid_loss  = - 16 * tf.reduce_mean(self.train_valids * tf.log(tf.clip_by_value(self.valids, 1e-10, 1.0)) * \\\n                                                    self.train_valid + tf.log(\n                                 tf.clip_by_value(1 - self.valids, 1e-10, 1.0)) * (1 - self.train_valid))\n            \n\n            self.loss = self.value_loss + self.policy_loss + self.valid_loss - self.entropy * 0.01\n\n\n            # IMPORTANT: 0 * self.value_loss is important so we can\n            #            fetch the gradients properly\n            self.imitation_loss =  0 * self.value_loss + tf.reduce_mean(self.train_imitation*\n               tf.keras.backend.categorical_crossentropy(self.optimal_actions_onehot, self.policy))\n            \n            \n            # Get gradients from local network using local losses and\n            # normalize the gradients using clipping\n            \n            local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope + '/qvalues')\n            self.gradients = tf.gradients(self.loss, local_vars)\n            self.var_norms = tf.global_norm(local_vars)\n            self.grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP)\n\n            # Apply local gradients to global network\n            global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE + '/qvalues')\n            if self.trainer:\n                self.apply_grads = self.trainer.apply_gradients(zip(self.grads, global_vars))\n\n\n            self.local_vars = local_vars\n            \n            # now the gradients for imitation loss\n            self.i_gradients = tf.gradients(self.imitation_loss, local_vars)\n            self.i_var_norms = tf.global_norm(local_vars)\n            self.i_grads, self.i_grad_norms = tf.clip_by_global_norm(self.i_gradients, GRAD_CLIP)\n\n            # Apply local gradients to global network\n            if self.trainer:\n                self.apply_imitation_grads = self.trainer.apply_gradients(zip(self.i_grads, global_vars))\n\n            \n        if GLOBAL_NETWORK:\n            print(\"\\n\\n\\n\\n is a global network\\n\\n\\n\\n\")\n            weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n            self.tempGradients = [tf.placeholder(shape=w.get_shape(), dtype=tf.float32) for w in weightVars]\n            self.apply_grads = self.trainer.apply_gradients(zip(self.tempGradients, weightVars))\n            #self.clippedGrads, norms = tf.clip_by_global_norm(self.tempGradients, GRAD_CLIP)\n            #self.apply_grads = self.trainer.apply_gradients(zip(self.clippedGrads, weightVars))\n            \n        print(\"Hello World... From  \" + str(scope))  # :)\n\n    def _build_net(self, inputs, goal_pos, RNN_SIZE, TRAINING, a_size):\n        def conv_mlp(inputs, kernal_size, output_size):\n            inputs = tf.reshape(inputs, [-1, 1, kernal_size, 1])\n            conv = layers.conv2d(inputs=inputs, padding=\"VALID\", num_outputs=output_size,\n                                 kernel_size=[1, kernal_size], stride=1,\n                                 data_format=\"NHWC\", weights_initializer=w_init, activation_fn=tf.nn.relu)\n\n            return conv\n\n        def VGG_Block(inputs):\n            def conv_2d(inputs, kernal_size, output_size):\n                conv = layers.conv2d(inputs=inputs, padding=\"SAME\", num_outputs=output_size,\n                                     kernel_size=[kernal_size[0], kernal_size[1]], stride=1,\n                                     data_format=\"NHWC\", weights_initializer=w_init, activation_fn=tf.nn.relu)\n\n                return conv\n\n            conv1 = conv_2d(inputs, [3, 3], RNN_SIZE // 4)\n            conv1a = conv_2d(conv1, [3, 3], RNN_SIZE // 4)\n            conv1b = conv_2d(conv1a, [3, 3], RNN_SIZE // 4)\n            pool1 = layers.max_pool2d(inputs=conv1b, kernel_size=[2, 2])\n            return pool1\n\n        w_init = layers.variance_scaling_initializer()\n        vgg1 = VGG_Block(inputs)\n        vgg2 = VGG_Block(vgg1)\n\n        conv3 = layers.conv2d(inputs=vgg2, padding=\"VALID\", num_outputs=RNN_SIZE - GOAL_REPR_SIZE, kernel_size=[2, 2],\n                              stride=1, data_format=\"NHWC\", weights_initializer=w_init, activation_fn=None)\n\n        flat = tf.nn.relu(layers.flatten(conv3))\n        goal_layer = layers.fully_connected(inputs=goal_pos, num_outputs=GOAL_REPR_SIZE)\n        hidden_input = tf.concat([flat, goal_layer], 1)\n        h1 = layers.fully_connected(inputs=hidden_input, num_outputs=RNN_SIZE)\n        d1 = layers.dropout(h1, keep_prob=KEEP_PROB1, is_training=TRAINING)\n        h2 = layers.fully_connected(inputs=d1, num_outputs=RNN_SIZE, activation_fn=None)\n        d2 = layers.dropout(h2, keep_prob=KEEP_PROB2, is_training=TRAINING)\n        self.h3 = tf.nn.relu(d2 + hidden_input)\n        # Recurrent network for temporal dependencies\n        lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE, state_is_tuple=True)\n        c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)\n        h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)\n        state_init = [c_init, h_init]\n        c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])\n        h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])\n        state_in = (c_in, h_in)\n        rnn_in = tf.expand_dims(self.h3, [0])\n        step_size = tf.shape(inputs)[:1]\n        state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)\n        lstm_outputs, lstm_state = tf.nn.dynamic_rnn(\n            lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,\n            time_major=False)\n        lstm_c, lstm_h = lstm_state\n        state_out = (lstm_c[:1, :], lstm_h[:1, :])\n        self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE])\n\n        policy_layer = layers.fully_connected(inputs=self.rnn_out, num_outputs=a_size,\n                                              weights_initializer=normalized_columns_initializer(1. / float(a_size)),\n                                              biases_initializer=None, activation_fn=None)\n        policy = tf.nn.softmax(policy_layer)\n        policy_sig = tf.sigmoid(policy_layer)\n        value = layers.fully_connected(inputs=self.rnn_out, num_outputs=1,\n                                       weights_initializer=normalized_columns_initializer(1.0), biases_initializer=None,\n                                       activation_fn=None)\n\n        return policy, value, state_out, state_in, state_init, policy_sig\n"
  },
  {
    "path": "Runner.py",
    "content": "import tensorflow as tf\nimport threading\nimport numpy as np\nimport ray\nimport os\n\nfrom Ray_ACNet import ACNet\nimport GroupLock\n\nfrom Primal2Env import Primal2Env\nfrom Primal2Observer import Primal2Observer\nfrom Map_Generator import maze_generator\n\nfrom Worker import Worker\nimport scipy.signal as signal\n\nfrom parameters import *\n\n\n\nclass Runner(object):\n    \"\"\"Actor object to start running simulation on workers.\n        Gradient computation is also executed on this object.\"\"\"\n    def __init__(self, metaAgentID):\n        # tensorflow must be imported within the constructor\n        # because this class will be instantiated on a remote ray node\n        import tensorflow as tf\n        \n        num_agents = NUM_THREADS\n        self.env = Primal2Env(num_agents=num_agents,\n                              observer=Primal2Observer(observation_size=OBS_SIZE,\n                                                        num_future_steps=NUM_FUTURE_STEPS),\n                              map_generator=maze_generator(\n                                   env_size=ENVIRONMENT_SIZE,\n                                   wall_components=WALL_COMPONENTS,\n                                   obstacle_density=OBSTACLE_DENSITY),\n                              IsDiagonal=DIAG_MVMT,\n                               isOneShot=False)\n        \n        self.metaAgentID = metaAgentID\n\n        trainer = None\n        self.localNetwork = ACNet(GLOBAL_NET_SCOPE,a_size,trainer,True,NUM_CHANNEL,OBS_SIZE,GLOBAL_NET_SCOPE=GLOBAL_NET_SCOPE, GLOBAL_NETWORK=False)\n        self.currEpisode = int(metaAgentID)\n\n        self.global_step = tf.placeholder(tf.float32)\n        \n        \n        # first `NUM_IL_META_AGENTS` only use IL and don't need gpu/tensorflow\n        if self.metaAgentID < NUM_IL_META_AGENTS:\n            config = tf.ConfigProto(allow_soft_placement=True, device_count={\"GPU\": 0})\n            self.coord = None\n            self.saver = None\n\n        else:\n            # set up tf session\n            config = tf.ConfigProto(allow_soft_placement = True)\n            config.gpu_options.per_process_gpu_memory_fraction = 1.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1)\n            config.gpu_options.allow_growth=True\n\n            self.saver = tf.train.Saver(max_to_keep=1)\n            self.coord = tf.train.Coordinator()\n\n            \n        self.sess = tf.Session(config=config)\n        self.sess.run(tf.global_variables_initializer())\n        \n\n        self.weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n        weights = self.sess.run(self.weightVars)\n        self.weightSetters = [tf.placeholder(shape=w.shape, dtype=tf.float32) for w in weights]\n        self.set_weights_ops = [var.assign(w) for var, w in zip(self.weightVars, self.weightSetters)]\n\n    def set_weights(self, weights):\n        feed_dict = {\n            self.weightSetters[i]: w for i, w in enumerate(weights)\n        }\n        self.sess.run([self.set_weights_ops], feed_dict=feed_dict)\n\n\n        \n    def multiThreadedJob(self, episodeNumber):\n        workers = []\n        worker_threads = []\n        workerNames = [\"worker_\" + str(i+1) for i in range(NUM_THREADS)]\n        groupLock = GroupLock.GroupLock([workerNames, workerNames]) # TODO        \n\n        workersPerMetaAgent = NUM_THREADS\n\n        for a in range(NUM_THREADS):\n            agentID = a + 1\n\n            workers.append(Worker(self.metaAgentID, agentID, workersPerMetaAgent,\n                                  self.env, self.localNetwork,\n                                  self.sess, groupLock, learningAgent=True, global_step=self.global_step))\n\n        for w in workers:\n            groupLock.acquire(0, w.name)\n            worker_work = lambda: w.work(episodeNumber, self.coord, self.saver, self.weightVars)\n            t = threading.Thread(target=(worker_work))\n            t.start()\n            \n            worker_threads.append(t)\n\n        self.coord.join(worker_threads)\n\n        \n        jobResults = []\n        loss_metrics = []\n        perf_metrics = []\n        is_imitation = None\n        for w in workers:\n            if w.learningAgent:\n                if JOB_TYPE == JOB_OPTIONS.getGradient:\n                    jobResults = jobResults + w.allGradients\n                elif JOB_TYPE == JOB_OPTIONS.getExperience:\n                    jobResults.append(w.experienceBuffer)\n            \n            is_imitation = False # w.is_imitation\n\n            loss_metrics.append(w.loss_metrics)\n            perf_metrics.append(w.perf_metrics)\n\n            \n        avg_loss_metrics = list(np.mean(np.array(loss_metrics), axis=0))\n\n\n        if not is_imitation:\n            # perf_metrics structure:\n            #\n            # w.perf_metrics = [\n            #    episode_step_count,\n            #    episode_values,\n            #    episode_inv_count,\n            #    episode_stop_count,\n            #    episode_reward,\n            #    targets_done\n            # ]\n\n            \n            perf_metrics = np.array(perf_metrics)\n            avg_perf_metrics = np.mean(perf_metrics[:, :4], axis=0)\n            episode_reward = np.sum(perf_metrics[:,4])\n            targets_done = np.sum(perf_metrics[:, 5])\n            avg_perf_metrics = list(avg_perf_metrics) + [episode_reward, targets_done]            \n            all_metrics = avg_loss_metrics + avg_perf_metrics\n        else:\n            all_metrics = avg_loss_metrics\n        \n        return jobResults, all_metrics, is_imitation\n    \n\n    def imitationLearningJob(self, episodeNumber):\n        workersPerMetaAgent = NUM_THREADS\n        agentID=None\n        groupLock = None\n\n        worker = Worker(self.metaAgentID, agentID, workersPerMetaAgent,\n                        self.env, self.localNetwork,\n                        self.sess, None, learningAgent=True, global_step=self.global_step)\n\n        \n        gradients, losses = worker.imitation_learning_only(episodeNumber)\n        mean_imitation_loss = [np.mean(losses)]\n\n        is_imitation = True\n\n        return gradients, mean_imitation_loss, is_imitation\n        \n        \n    def job(self, global_weights, episodeNumber):\n        print(\"starting episode {} on metaAgent {}\".format(episodeNumber, self.metaAgentID))\n\n        # set the local weights to the global weight values from the master network\n        self.set_weights(global_weights)\n\n\n        # set first `NUM_IL_META_AGENTS` to perform imitation learning\n        if self.metaAgentID < NUM_IL_META_AGENTS:\n            print(\"running imitation job\")\n            jobResults, metrics, is_imitation = self.imitationLearningJob(episodeNumber)\n\n        elif COMPUTE_TYPE == COMPUTE_OPTIONS.multiThreaded:\n            jobResults, metrics, is_imitation = self.multiThreadedJob(episodeNumber)\n\n        elif COMPUTE_TYPE == COMPUTE_OPTIONS.synchronous:\n            print(\"not implemented\")\n            assert(1==0)\n\n                   \n        \n        # Get the job results from the learning agents\n        # and send them back to the master network        \n        info = {\n            \"id\": self.metaAgentID,\n            \"episode_number\": episodeNumber,\n            \"is_imitation\": is_imitation\n        }\n\n        return jobResults, metrics, info\n\n\n@ray.remote(num_cpus=3, num_gpus= 1.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1))\nclass RLRunner(Runner):\n    def __init__(self, metaAgentID):        \n        super().__init__(metaAgentID)\n\n\n@ray.remote(num_cpus=1, num_gpus=0)\nclass imitationRunner(Runner):\n    def __init__(self, metaAgentID):        \n        super().__init__(metaAgentID)\n"
  },
  {
    "path": "Worker.py",
    "content": "import scipy.signal as signal\nimport copy\nimport numpy as np\nimport ray\nimport os\nimport imageio\nfrom Env_Builder import *\n\nfrom Map_Generator import maze_generator\n\nfrom parameters import *\n\n\n# helper functions\ndef discount(x, gamma):\n    return signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]\n\n\nclass Worker():\n    def __init__(self, metaAgentID, workerID, workers_per_metaAgent, env, localNetwork, sess, groupLock, learningAgent,\n                 global_step):\n\n        self.metaAgentID = metaAgentID\n        self.agentID = workerID\n        self.name = \"worker_\" + str(workerID)\n        self.num_workers = workers_per_metaAgent\n        self.global_step = global_step\n        self.nextGIF = 0\n\n        self.env = env\n        self.local_AC = localNetwork\n        self.groupLock = groupLock\n        self.learningAgent = learningAgent\n        self.sess = sess\n        self.allGradients = []\n\n    def calculateImitationGradient(self, rollout, episode_count):\n        rollout = np.array(rollout, dtype=object)\n        # we calculate the loss differently for imitation\n        # if imitation=True the rollout is assumed to have different dimensions:\n        # [o[0],o[1],optimal_actions]\n\n        temp_actions = np.stack(rollout[:, 2])\n        rnn_state = self.local_AC.state_init\n        feed_dict = {self.global_step             : episode_count,\n                     self.local_AC.inputs         : np.stack(rollout[:, 0]),\n                     self.local_AC.goal_pos       : np.stack(rollout[:, 1]),\n                     self.local_AC.optimal_actions: np.stack(rollout[:, 2]),\n                     self.local_AC.state_in[0]    : rnn_state[0],\n                     self.local_AC.state_in[1]    : rnn_state[1],\n                     self.local_AC.train_imitation: (rollout[:, 3]),\n                     self.local_AC.target_v       : np.stack(temp_actions),\n                     self.local_AC.train_value    : temp_actions,\n\n                     }\n\n        v_l, i_l, i_grads = self.sess.run([self.local_AC.value_loss,\n                                           self.local_AC.imitation_loss,\n                                           self.local_AC.i_grads],\n                                          feed_dict=feed_dict)\n\n        return [i_l], i_grads\n\n    def calculateGradient(self, rollout, bootstrap_value, episode_count, rnn_state0):\n        # ([s,a,r,s1,v[0,0]])\n\n        rollout = np.array(rollout, dtype=object)\n        observations = rollout[:, 0]\n        goals = rollout[:, -3]\n        actions = rollout[:, 1]\n        rewards = rollout[:, 2]\n        values = rollout[:, 4]\n        valids = rollout[:, 5]\n        train_value = rollout[:, -2]\n        train_policy = rollout[:, -1]\n\n        # Here we take the rewards and values from the rollout, and use them to\n        # generate the advantage and discounted returns. (With bootstrapping)\n        # The advantage function uses \"Generalized Advantage Estimation\"\n        self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])\n        discounted_rewards = discount(self.rewards_plus, gamma)[:-1]\n        self.value_plus = np.asarray(values.tolist() + [bootstrap_value])\n        advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]\n        advantages = discount(advantages, gamma)\n\n        num_samples = min(EPISODE_SAMPLES, len(advantages))\n        sampleInd = np.sort(np.random.choice(advantages.shape[0], size=(num_samples,), replace=False))\n\n        feed_dict = {\n            self.global_step          : episode_count,\n            self.local_AC.target_v    : np.stack(discounted_rewards),\n            self.local_AC.inputs      : np.stack(observations),\n            self.local_AC.goal_pos    : np.stack(goals),\n            self.local_AC.actions     : actions,\n            self.local_AC.train_valid : np.stack(valids),\n            self.local_AC.advantages  : advantages,\n            self.local_AC.train_value : train_value,\n            self.local_AC.state_in[0] : rnn_state0[0],\n            self.local_AC.state_in[1] : rnn_state0[1],\n            self.local_AC.train_policy: train_policy,\n            self.local_AC.train_valids: np.vstack(train_policy)\n        }\n\n        v_l, p_l, valid_l, e_l, g_n, v_n, grads = self.sess.run([self.local_AC.value_loss,\n                                                                 self.local_AC.policy_loss,\n                                                                 self.local_AC.valid_loss,\n                                                                 self.local_AC.entropy,\n                                                                 self.local_AC.grad_norms,\n                                                                 self.local_AC.var_norms,\n                                                                 self.local_AC.grads],\n                                                                feed_dict=feed_dict)\n\n        return [v_l, p_l, valid_l, e_l, g_n, v_n], grads\n\n    def imitation_learning_only(self, episode_count):\n        self.env._reset()\n        rollouts, targets_done = self.parse_path(episode_count)\n\n        if rollouts is None:\n            return None, 0\n\n        gradients = []\n        losses = []\n        for i in range(self.num_workers):\n            train_buffer = rollouts[i]\n\n            imitation_loss, grads = self.calculateImitationGradient(train_buffer, episode_count)\n\n            gradients.append(grads)\n            losses.append(imitation_loss)\n\n        return gradients, losses\n\n    def run_episode_multithreaded(self, episode_count, coord):\n\n        if self.metaAgentID < NUM_IL_META_AGENTS:\n            assert (1 == 0)\n            # print(\"THIS CODE SHOULD NOT TRIGGER\")\n            self.is_imitation = True\n            self.imitation_learning_only()\n\n        global episode_lengths, episode_mean_values, episode_invalid_ops, episode_stop_ops, episode_rewards, episode_finishes\n\n        num_agents = self.num_workers\n\n        with self.sess.as_default(), self.sess.graph.as_default():\n            while self.shouldRun(coord, episode_count):\n                episode_buffer, episode_values = [], []\n                episode_reward = episode_step_count = episode_inv_count = targets_done = episode_stop_count = 0\n\n                # Initial state from the environment\n                if self.agentID == 1:\n                    self.env._reset()\n                    joint_observations[self.metaAgentID] = self.env._observe()\n\n                self.synchronize()  # synchronize starting time of the threads\n\n                # Get Information For Each Agent \n                validActions = self.env.listValidActions(self.agentID,\n                                                         joint_observations[self.metaAgentID][self.agentID])\n\n                s = joint_observations[self.metaAgentID][self.agentID]\n\n                rnn_state = self.local_AC.state_init\n                rnn_state0 = rnn_state\n\n                self.synchronize()  # synchronize starting time of the threads\n                swarm_reward[self.metaAgentID] = 0\n                swarm_targets[self.metaAgentID] = 0\n\n                episode_rewards[self.metaAgentID] = []\n                episode_finishes[self.metaAgentID] = []\n                episode_lengths[self.metaAgentID] = []\n                episode_mean_values[self.metaAgentID] = []\n                episode_invalid_ops[self.metaAgentID] = []\n                episode_stop_ops[self.metaAgentID] = []\n\n                # ===============================start training =======================================================================\n                # RL\n                if True:\n                    # prepare to save GIF\n                    saveGIF = False\n                    global GIFS_FREQUENCY_RL\n                    if OUTPUT_GIFS and self.agentID == 1 and ((not TRAINING) or (episode_count >= self.nextGIF)):\n                        saveGIF = True\n                        self.nextGIF = episode_count + GIFS_FREQUENCY_RL\n                        GIF_episode = int(episode_count)\n                        GIF_frames = [self.env._render()]\n\n                    # start RL\n                    self.env.finished = False\n                    while not self.env.finished:\n                        a_dist, v, rnn_state = self.sess.run([self.local_AC.policy,\n                                                              self.local_AC.value,\n                                                              self.local_AC.state_out],\n                                                             feed_dict={self.local_AC.inputs     : [s[0]],  # state\n                                                                        self.local_AC.goal_pos   : [s[1]],\n                                                                        # goal vector\n                                                                        self.local_AC.state_in[0]: rnn_state[0],\n                                                                        self.local_AC.state_in[1]: rnn_state[1]})\n\n                        skipping_state = False\n                        train_policy = train_val = 1\n\n                        if not skipping_state:\n                            if not (np.argmax(a_dist.flatten()) in validActions):\n                                episode_inv_count += 1\n                                train_val = 0\n                            train_valid = np.zeros(a_size)\n                            train_valid[validActions] = 1\n\n                            valid_dist = np.array([a_dist[0, validActions]])\n                            valid_dist /= np.sum(valid_dist)\n\n                            a = validActions[np.random.choice(range(valid_dist.shape[1]), p=valid_dist.ravel())]\n                            joint_actions[self.metaAgentID][self.agentID] = a\n                            if a == 0:\n                                episode_stop_count += 1\n\n                        # Make A Single Agent Gather All Information\n\n                        self.synchronize()\n\n                        if self.agentID == 1:\n                            all_obs, all_rewards = self.env.step_all(joint_actions[self.metaAgentID])\n                            for i in range(1, self.num_workers + 1):\n                                joint_observations[self.metaAgentID][i] = all_obs[i]\n                                joint_rewards[self.metaAgentID][i] = all_rewards[i]\n                                joint_done[self.metaAgentID][i] = (self.env.world.agents[i].status == 1)\n                            if saveGIF and self.agentID == 1:\n                                GIF_frames.append(self.env._render())\n\n                        self.synchronize()  # synchronize threads\n\n                        # Get observation,reward, valid actions for each agent \n                        s1 = joint_observations[self.metaAgentID][self.agentID]\n                        r = copy.deepcopy(joint_rewards[self.metaAgentID][self.agentID])\n                        validActions = self.env.listValidActions(self.agentID, s1)\n\n                        self.synchronize()\n                        # Append to Appropriate buffers \n                        if not skipping_state:\n                            episode_buffer.append(\n                                [s[0], a, joint_rewards[self.metaAgentID][self.agentID], s1, v[0, 0], train_valid, s[1],\n                                 train_val, train_policy])\n                            episode_values.append(v[0, 0])\n                        episode_reward += r\n                        episode_step_count += 1\n\n                        # Update State\n                        s = s1\n\n                        # If the episode hasn't ended, but the experience buffer is full, then we\n                        # make an update step using that experience rollout.\n                        if (len(episode_buffer) > 1) and (\n                                (len(episode_buffer) % EXPERIENCE_BUFFER_SIZE == 0) or joint_done[self.metaAgentID][\n                            self.agentID] or episode_step_count == max_episode_length):\n                            # Since we don't know what the true final return is,\n                            # we \"bootstrap\" from our current value estimation.\n                            if len(episode_buffer) >= EXPERIENCE_BUFFER_SIZE:\n                                train_buffer = episode_buffer[-EXPERIENCE_BUFFER_SIZE:]\n                            else:\n                                train_buffer = episode_buffer[:]\n\n                            if joint_done[self.metaAgentID][self.agentID]:\n                                s1Value = 0  # Terminal state\n                                episode_buffer = []\n                                joint_done[self.metaAgentID][self.agentID] = False\n                                targets_done += 1\n\n                            else:\n                                s1Value = self.sess.run(self.local_AC.value,\n                                                        feed_dict={self.local_AC.inputs     : np.array([s[0]]),\n                                                                   self.local_AC.goal_pos   : [s[1]],\n                                                                   self.local_AC.state_in[0]: rnn_state[0],\n                                                                   self.local_AC.state_in[1]: rnn_state[1]})[0, 0]\n\n                            self.loss_metrics, grads = self.calculateGradient(train_buffer, s1Value, episode_count,\n                                                                              rnn_state0)\n\n                            self.allGradients.append(grads)\n\n                            rnn_state0 = rnn_state\n\n                        self.synchronize()\n\n                        # finish condition: reach max-len or all agents are done under one-shot mode\n                        if episode_step_count >= max_episode_length:\n                            break\n\n                    episode_lengths[self.metaAgentID].append(episode_step_count)\n                    episode_mean_values[self.metaAgentID].append(np.nanmean(episode_values))\n                    episode_invalid_ops[self.metaAgentID].append(episode_inv_count)\n                    episode_stop_ops[self.metaAgentID].append(episode_stop_count)\n                    swarm_reward[self.metaAgentID] += episode_reward\n                    swarm_targets[self.metaAgentID] += targets_done\n\n                    self.synchronize()\n                    if self.agentID == 1:\n                        episode_rewards[self.metaAgentID].append(swarm_reward[self.metaAgentID])\n                        episode_finishes[self.metaAgentID].append(swarm_targets[self.metaAgentID])\n\n                        if saveGIF:\n                            make_gif(np.array(GIF_frames),\n                                     '{}/episode_{:d}_{:d}_{:.1f}.gif'.format(gifs_path, GIF_episode,\n                                                                              episode_step_count,\n                                                                              swarm_reward[self.metaAgentID]))\n\n                    self.synchronize()\n\n                    perf_metrics = np.array([\n                        episode_step_count,\n                        np.nanmean(episode_values),\n                        episode_inv_count,\n                        episode_stop_count,\n                        episode_reward,\n                        targets_done\n                    ])\n\n                    assert len(self.allGradients) > 0, 'Empty gradients at end of RL episode?!'\n                    return perf_metrics\n\n    def synchronize(self):\n        # handy thing for keeping track of which to release and acquire\n        if not hasattr(self, \"lock_bool\"):\n            self.lock_bool = False\n        self.groupLock.release(int(self.lock_bool), self.name)\n        self.groupLock.acquire(int(not self.lock_bool), self.name)\n        self.lock_bool = not self.lock_bool\n\n    def work(self, currEpisode, coord, saver, allVariables):\n        '''\n        Interacts with the environment. The agent gets either gradients or experience buffer\n        '''\n        self.currEpisode = currEpisode\n\n        if COMPUTE_TYPE == COMPUTE_OPTIONS.multiThreaded:\n            self.perf_metrics = self.run_episode_multithreaded(currEpisode, coord)\n        else:\n            print(\"not implemented\")\n            assert (1 == 0)\n\n            # gradients are accessed by the runner in self.allGradients\n        return\n\n    # Used for imitation learning\n    def parse_path(self, episode_count):\n        \"\"\"needed function to take the path generated from M* and create the\n        observations and actions for the agent\n        path: the exact path ouput by M*, assuming the correct number of agents\n        returns: the list of rollouts for the \"episode\":\n                list of length num_agents with each sublist a list of tuples\n                (observation[0],observation[1],optimal_action,reward)\"\"\"\n\n        result = [[] for i in range(self.num_workers)]\n        actions = {}\n        o = {}\n        train_imitation = {}\n        targets_done = 0\n        saveGIF = False\n\n        if np.random.rand() < IL_GIF_PROB:\n            saveGIF = True\n        if saveGIF and OUTPUT_IL_GIFS:\n            GIF_frames = [self.env._render()]\n\n        single_done = False\n        new_call = False\n        new_MSTAR_call = False\n\n        all_obs = self.env._observe()\n        for agentID in range(1, self.num_workers + 1):\n            o[agentID] = all_obs[agentID]\n            train_imitation[agentID] = 1\n        step_count = 0\n        while step_count <= IL_MAX_EP_LENGTH:\n            path = self.env.expert_until_first_goal()\n            if path is None:  # solution not exists\n                if step_count != 0:\n                    return result, targets_done\n                # print('Failed intially')\n                return None, 0\n            none_on_goal = True\n            path_step = 1\n            while none_on_goal and step_count <= IL_MAX_EP_LENGTH:\n                completed_agents = []\n                start_positions = []\n                goals = []\n                for i in range(self.num_workers):\n                    agent_id = i + 1\n                    next_pos = path[path_step][i]\n                    diff = tuple_minus(next_pos, self.env.world.getPos(agent_id))\n                    actions[agent_id] = dir2action(diff)\n\n                all_obs, _ = self.env.step_all(actions)\n                for i in range(self.num_workers):\n                    agent_id = i + 1\n                    result[i].append([o[agent_id][0], o[agent_id][1], actions[agent_id], train_imitation[agent_id]])\n                    if self.env.world.agents[agent_id].status == 1:\n                        completed_agents.append(i)\n                        targets_done += 1\n                        single_done = True\n                        if targets_done % MSTAR_CALL_FREQUENCY == 0:\n                            new_MSTAR_call = True\n                        else:\n                            new_call = True\n                if saveGIF and OUTPUT_IL_GIFS:\n                    GIF_frames.append(self.env._render())\n                if single_done and new_MSTAR_call:\n                    path = self.env.expert_until_first_goal()\n                    if path is None:\n                        return result, targets_done\n                    path_step = 0\n                elif single_done and new_call:\n                    path = path[path_step:]\n                    path = [list(state) for state in path]\n                    for finished_agent in completed_agents:\n                        path = merge_plans(path, [None] * len(path), finished_agent)\n                    try:\n                        while path[-1] == path[-2]:\n                            path = path[:-1]\n                    except:\n                        assert (len(path) <= 2)\n                    start_positions_dir = self.env.getPositions()\n                    goals_dir = self.env.getGoals()\n                    for i in range(1, self.env.world.num_agents + 1):\n                        start_positions.append(start_positions_dir[i])\n                        goals.append(goals_dir[i])\n                    world = self.env.getObstacleMap()\n                    # print('OLD PATH', path) # print('CURRENT POSITIONS', start_positions) # print('CURRENT GOALS',goals) # print('WORLD',world)\n                    try:\n                        path = priority_planner(world, tuple(start_positions), tuple(goals), path)\n                    except:\n                        path = self.env.expert_until_first_goal()\n                        if path is None:\n                            return result, targets_done\n                    path_step = 0\n                o = all_obs\n                step_count += 1\n                path_step += 1\n                new_call = False\n                new_MSTAR_call = False\n        if saveGIF and OUTPUT_IL_GIFS:\n            make_gif(np.array(GIF_frames),\n                     '{}/episodeIL_{}.gif'.format(gifs_path, episode_count))\n        return result, targets_done\n\n    def shouldRun(self, coord, episode_count=None):\n        if TRAINING:\n            return not coord.should_stop()\n"
  },
  {
    "path": "driver.py",
    "content": "import numpy as np\nimport tensorflow as tf\nimport os\nimport ray\n\nfrom Ray_ACNet import ACNet\nfrom Runner import imitationRunner, RLRunner\n\nfrom parameters import *\nimport random\n\n\nray.init(num_gpus=1)\n\n\ntf.reset_default_graph()\nprint(\"Hello World\")\n\nconfig = tf.ConfigProto(allow_soft_placement = True)\nconfig.gpu_options.per_process_gpu_memory_fraction = 1.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1)\nconfig.gpu_options.allow_growth=True\n\n\n\n\n# Create directories\nif not os.path.exists(model_path):\n    os.makedirs(model_path)\nif not os.path.exists(gifs_path):\n    os.makedirs(gifs_path)\n\n\nglobal_step = tf.placeholder(tf.float32)\n        \nif ADAPT_LR:\n    # computes LR_Q/sqrt(ADAPT_COEFF*steps+1)\n    # we need the +1 so that lr at step 0 is defined\n    lr = tf.divide(tf.constant(LR_Q), tf.sqrt(tf.add(1., tf.multiply(tf.constant(ADAPT_COEFF), global_step))))\nelse:\n    lr = tf.constant(LR_Q)\n\n\ndef apply_gradients(global_network, gradients, sess, curr_episode):\n    feed_dict = {\n        global_network.tempGradients[i]: g for i, g in enumerate(gradients)\n    }\n    feed_dict[global_step] = curr_episode\n\n    sess.run([global_network.apply_grads], feed_dict=feed_dict)\n\ndef writeImitationDataToTensorboard(global_summary, metrics, curr_episode):    \n    summary = tf.Summary()\n    summary.value.add(tag='Losses/Imitation loss', simple_value=metrics[0])\n    global_summary.add_summary(summary, curr_episode)\n    global_summary.flush()\n\n\ndef writeEpisodeRatio(global_summary, numIL, numRL, sess, curr_episode):\n    summary = tf.Summary()\n\n    current_learning_rate = sess.run(lr, feed_dict={global_step: curr_episode})\n\n    RL_IL_Ratio = numRL / (numRL + numIL)\n    summary.value.add(tag='Perf/Num IL Ep.', simple_value=numIL)\n    summary.value.add(tag='Perf/Num RL Ep.', simple_value=numRL)\n    summary.value.add(tag='Perf/ RL IL ratio Ep.', simple_value=RL_IL_Ratio)\n    summary.value.add(tag='Perf/Learning Rate', simple_value=current_learning_rate)\n    global_summary.add_summary(summary, curr_episode)\n    global_summary.flush()\n\n    \n\ndef writeToTensorBoard(global_summary, tensorboardData, curr_episode, plotMeans=True):\n    # each row in tensorboardData represents an episode\n    # each column is a specific metric\n    \n    if plotMeans == True:\n        tensorboardData = np.array(tensorboardData)\n        tensorboardData = list(np.mean(tensorboardData, axis=0))\n\n        valueLoss, policyLoss, validLoss, entropyLoss, gradNorm, varNorm,\\\n            mean_length, mean_value, mean_invalid, \\\n            mean_stop, mean_reward, mean_finishes = tensorboardData\n        \n    else:\n        firstEpisode = tensorboardData[0]\n        valueLoss, policyLoss, validLoss, entropyLoss, gradNorm, varNorm, \\\n            mean_length, mean_value, mean_invalid, \\\n            mean_stop, mean_reward, mean_finishes = firstEpisode\n\n        \n    summary = tf.Summary()\n    \n    summary.value.add(tag='Perf/Reward', simple_value=mean_reward)\n    summary.value.add(tag='Perf/Targets Done', simple_value=mean_finishes)\n    summary.value.add(tag='Perf/Length', simple_value=mean_length)\n    summary.value.add(tag='Perf/Valid Rate', simple_value=(mean_length - mean_invalid) / mean_length)\n    summary.value.add(tag='Perf/Stop Rate', simple_value=(mean_stop) / mean_length)\n\n    summary.value.add(tag='Losses/Value Loss', simple_value=valueLoss)\n    summary.value.add(tag='Losses/Policy Loss', simple_value=policyLoss)\n    summary.value.add(tag='Losses/Valid Loss', simple_value=validLoss)\n    summary.value.add(tag='Losses/Entropy Loss', simple_value=entropyLoss)\n    summary.value.add(tag='Losses/Grad Norm', simple_value=gradNorm)\n    summary.value.add(tag='Losses/Var Norm', simple_value=varNorm)\n\n    \n    global_summary.add_summary(summary, int(curr_episode - len(tensorboardData)))\n    global_summary.flush()\n\n\n    \ndef main():    \n    with tf.device(\"/gpu:0\"):\n        trainer = tf.contrib.opt.NadamOptimizer(learning_rate=lr, use_locking=True)\n        global_network = ACNet(GLOBAL_NET_SCOPE,a_size,trainer,False,NUM_CHANNEL, OBS_SIZE,GLOBAL_NET_SCOPE, GLOBAL_NETWORK=True)\n\n        global_summary = tf.summary.FileWriter(train_path)\n        saver = tf.train.Saver(max_to_keep=1)\n\n    with tf.Session(config=config) as sess:\n        sess.run(tf.global_variables_initializer())\n        if load_model == True:\n            print ('Loading Model...')\n            ckpt = tf.train.get_checkpoint_state(model_path)\n            p=ckpt.model_checkpoint_path\n            p=p[p.find('-')+1:]\n            p=p[:p.find('.')]\n            curr_episode=int(p)\n\n            saver.restore(sess,ckpt.model_checkpoint_path)\n            print(\"curr_episode set to \",curr_episode)\n        else:\n            curr_episode = 0\n\n\n        \n        # launch all of the threads:\n    \n        il_agents = [imitationRunner.remote(i) for i in range(NUM_IL_META_AGENTS)]\n        rl_agents = [RLRunner.remote(i) for i in range(NUM_IL_META_AGENTS, NUM_META_AGENTS)]\n        meta_agents = il_agents + rl_agents\n\n        \n\n        # get the initial weights from the global network\n        weight_names = tf.trainable_variables()\n        weights = sess.run(weight_names) # Gets weights in numpy arrays CHECK\n\n\n        weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n        \n        # launch the first job (e.g. getGradient) on each runner\n        jobList = [] # Ray ObjectIDs \n        for i, meta_agent in enumerate(meta_agents):\n            jobList.append(meta_agent.job.remote(weights, curr_episode))\n            curr_episode += 1\n\n        tensorboardData = []\n\n\n        IDs = [None] * NUM_META_AGENTS\n\n        numImitationEpisodes = 0\n        numRLEpisodes = 0\n        try:\n            while True:\n                # wait for any job to be completed - unblock as soon as the earliest arrives\n                done_id, jobList = ray.wait(jobList)\n                \n                # get the results of the task from the object store\n                jobResults, metrics, info = ray.get(done_id)[0]\n\n                # imitation episodes write different data to tensorboard\n                if info['is_imitation']:\n                    if jobResults:\n                        writeImitationDataToTensorboard(global_summary, metrics, curr_episode)\n                        numImitationEpisodes += 1\n                else:\n                    if jobResults:\n                        tensorboardData.append(metrics)\n                        numRLEpisodes += 1\n\n\n                # Write ratio of RL to IL episodes to tensorboard\n                writeEpisodeRatio(global_summary, numImitationEpisodes, numRLEpisodes, sess, curr_episode)\n\n                \n                if JOB_TYPE == JOB_OPTIONS.getGradient:\n                    if jobResults:\n                        for gradient in jobResults:\n                            apply_gradients(global_network, gradient, sess, curr_episode)\n\n                    \n                elif JOB_TYPE == JOB_OPTIONS.getExperience:\n                    print(\"not implemented\")\n                    assert(1==0)\n                else:\n                    print(\"not implemented\")\n                    assert(1==0)\n\n\n                # Every `SUMMARY_WINDOW` RL episodes, write RL episodes to tensorboard\n                if len(tensorboardData) >= SUMMARY_WINDOW:\n                    writeToTensorBoard(global_summary, tensorboardData, curr_episode)\n                    tensorboardData = []\n                    \n                # get the updated weights from the global network\n                weight_names = tf.trainable_variables()\n                weights = sess.run(weight_names)\n                curr_episode += 1\n\n                # start a new job on the recently completed agent with the updated weights\n                jobList.extend([meta_agents[info['id']].job.remote(weights, curr_episode)])\n\n                \n                if curr_episode % 100 == 0:\n                    print ('Saving Model', end='\\n')\n                    saver.save(sess, model_path+'/model-'+str(int(curr_episode))+'.cptk')\n                    print ('Saved Model', end='\\n')\n\n                \n                    \n        except KeyboardInterrupt:\n            print(\"CTRL-C pressed. killing remote workers\")\n            for a in meta_agents:\n                ray.kill(a)\n\n\nif __name__ == \"__main__\": \n    main()\n"
  },
  {
    "path": "od_mstar3/SortedCollection.py",
    "content": "from bisect import bisect_left, bisect_right\r\n\r\n\r\nclass SortedCollection(object):\r\n    \"\"\"Sequence sorted by a key function.\r\n\r\n    SortedCollection() is much easier to work with than using bisect()\r\n    directly. It supports key functions like those use in sorted(),\r\n    min(), and max(). The result of the key function call is saved so\r\n    that keys can be searched efficiently.\r\n\r\n    Instead of returning an insertion-point which can be hard to\r\n    interpret, the five find-methods return a specific item in the\r\n    sequence. They can scan for exact matches, the last item\r\n    less-than-or-equal to a key, or the first item greater-than-or-equal\r\n    to a key.\r\n\r\n    Once found, an item's ordinal position can be located with the\r\n    index() method. New items can be added with the insert() and\r\n    insert_right() methods.  Old items can be deleted with the remove()\r\n    method.\r\n\r\n    The usual sequence methods are provided to support indexing,\r\n    slicing, length lookup, clearing, copying, forward and reverse\r\n    iteration, contains checking, item counts, item removal, and a nice\r\n    looking repr.\r\n\r\n    Finding and indexing are O(log n) operations while iteration and\r\n    insertion are O(n).  The initial sort is O(n log n).\r\n\r\n    The key function is stored in the 'key' attibute for easy\r\n    introspection or so that you can assign a new key function\r\n    (triggering an automatic re-sort).\r\n\r\n    In short, the class was designed to handle all of the common use\r\n    cases for bisect but with a simpler API and support for key\r\n    functions.\r\n\r\n    >>> from pprint import pprint\r\n    >>> from operator import itemgetter\r\n\r\n    >>> s = SortedCollection(key=itemgetter(2))\r\n    >>> for record in [\r\n    ...         ('roger', 'young', 30),\r\n    ...         ('angela', 'jones', 28),\r\n    ...         ('bill', 'smith', 22),\r\n    ...         ('david', 'thomas', 32)]:\r\n    ...     s.insert(record)\r\n\r\n    >>> pprint(list(s))         # show records sorted by age\r\n    [('bill', 'smith', 22),\r\n     ('angela', 'jones', 28),\r\n     ('roger', 'young', 30),\r\n     ('david', 'thomas', 32)]\r\n\r\n    >>> s.find_le(29)           # find oldest person aged 29 or younger\r\n    ('angela', 'jones', 28)\r\n    >>> s.find_lt(28)           # find oldest person under 28\r\n    ('bill', 'smith', 22)\r\n    >>> s.find_gt(28)           # find youngest person over 28\r\n    ('roger', 'young', 30)\r\n\r\n    >>> r = s.find_ge(32)       # find youngest person aged 32 or older\r\n    >>> s.index(r)              # get the index of their record\r\n    3\r\n    >>> s[3]                    # fetch the record at that index\r\n    ('david', 'thomas', 32)\r\n\r\n    >>> s.key = itemgetter(0)   # now sort by first name\r\n    >>> pprint(list(s))\r\n    [('angela', 'jones', 28),\r\n     ('bill', 'smith', 22),\r\n     ('david', 'thomas', 32),\r\n     ('roger', 'young', 30)]\r\n\r\n    \"\"\"\r\n\r\n    def __init__(self, iterable=(), key=None):\r\n        self._given_key = key\r\n        key = (lambda x: x) if key is None else key\r\n        decorated = sorted((key(item), item) for item in iterable)\r\n        self._keys = [k for k, item in decorated]\r\n        self._items = [item for k, item in decorated]\r\n        self._key = key\r\n\r\n    def _getkey(self):\r\n        return self._key\r\n\r\n    def _setkey(self, key):\r\n        if key is not self._key:\r\n            self.__init__(self._items, key=key)\r\n\r\n    def _delkey(self):\r\n        self._setkey(None)\r\n\r\n    key = property(_getkey, _setkey, _delkey, 'key function')\r\n\r\n    def clear(self):\r\n        self.__init__([], self._key)\r\n\r\n    def copy(self):\r\n        return self.__class__(self, self._key)\r\n\r\n    def __len__(self):\r\n        return len(self._items)\r\n\r\n    def __getitem__(self, i):\r\n        return self._items[i]\r\n\r\n    def __iter__(self):\r\n        return iter(self._items)\r\n\r\n    def __reversed__(self):\r\n        return reversed(self._items)\r\n\r\n    def __repr__(self):\r\n        return '%s(%r, key=%s)' % (\r\n            self.__class__.__name__,\r\n            self._items,\r\n            getattr(self._given_key, '__name__', repr(self._given_key))\r\n        )\r\n\r\n    def __reduce__(self):\r\n        return self.__class__, (self._items, self._given_key)\r\n\r\n    def __contains__(self, item):\r\n        \"\"\"So if an item has its key value changed, you are not going to\r\n        be able to recover its value\r\n        \"\"\"\r\n        k = self._key(item)\r\n        i = bisect_left(self._keys, k)\r\n        j = bisect_right(self._keys, k)\r\n        return item in self._items[i:j]\r\n\r\n    def resort(self):\r\n        \"\"\"If all the key values are expected to have changed\r\n        dramatically, resort the items list, and regenerate the internal\r\n        representation\r\n\r\n        Note that this operation is not guaranteed to be stable, as it\r\n        depends on the ordering of a key, item pair, and the ordering of\r\n        the items is effectively arbitrary\r\n        \"\"\"\r\n        decorated = sorted((self.key(item), item) for item in self._items)\r\n        self._keys = [k for k, item in decorated]\r\n        self._items = [item for k, item in decorated]\r\n\r\n    def index(self, item):\r\n        \"\"\"Find the position of an item.  Raise ValueError if not found.\"\"\"\r\n        k = self._key(item)\r\n        i = bisect_left(self._keys, k)\r\n        j = bisect_right(self._keys, k)\r\n        return self._items[i:j].index(item) + i\r\n\r\n    def count(self, item):\r\n        \"\"\"Return number of occurrences of item\"\"\"\r\n        k = self._key(item)\r\n        i = bisect_left(self._keys, k)\r\n        j = bisect_right(self._keys, k)\r\n        return self._items[i:j].count(item)\r\n\r\n    def insert(self, item):\r\n        \"\"\"Insert a new item.  If equal keys are found, add to the left\"\"\"\r\n        k = self._key(item)\r\n        i = bisect_left(self._keys, k)\r\n        self._keys.insert(i, k)\r\n        self._items.insert(i, item)\r\n\r\n    def insert_right(self, item):\r\n        \"\"\"Insert a new item.  If equal keys are found, add to the right\"\"\"\r\n        k = self._key(item)\r\n        i = bisect_right(self._keys, k)\r\n        self._keys.insert(i, k)\r\n        self._items.insert(i, item)\r\n\r\n    def remove(self, item):\r\n        \"\"\"Remove first occurence of item.\r\n\r\n        Raise ValueError if not found\r\n        \"\"\"\r\n        i = self.index(item)\r\n        del self._keys[i]\r\n        del self._items[i]\r\n\r\n    def pop(self):\r\n        \"\"\"returns the rightmost value (greatest key value)\"\"\"\r\n        del self._keys[-1]\r\n        return self._items.pop()\r\n\r\n    def consistent_pop(self):\r\n        \"\"\"returns the rightmost value (greatest key value) and checks\r\n        whether its cached key value is consistent with its current\r\n        cost.\r\n\r\n        returns:\r\n          value with greatest cached key\r\n          boolean: True if cached key is same as current key\r\n        \"\"\"\r\n        cached_key = self._keys.pop()\r\n        val = self._items.pop()\r\n        return val, self._key(val) == cached_key\r\n\r\n    def find(self, k):\r\n        \"\"\"Return first item with a key == k.\r\n        Will fail if the key value of k was changed since it was\r\n        inserted\r\n\r\n        Raise ValueError if not found.\r\n        \"\"\"\r\n        i = bisect_left(self._keys, k)\r\n        if i != len(self) and self._keys[i] == k:\r\n            return self._items[i]\r\n        raise ValueError('No item found with key equal to: %r' % (k, ))\r\n\r\n    def find_le(self, k):\r\n        \"\"\"Return last item with a key <= k.\r\n\r\n        Raise ValueError if not found.\r\n        \"\"\"\r\n        i = bisect_right(self._keys, k)\r\n        if i:\r\n            return self._items[i - 1]\r\n        raise ValueError('No item found with key at or below: %r' % (k, ))\r\n\r\n    def find_lt(self, k):\r\n        \"\"\"Return last item with a key < k.\r\n\r\n        Raise ValueError if not found.\r\n        \"\"\"\r\n        i = bisect_left(self._keys, k)\r\n        if i:\r\n            return self._items[i - 1]\r\n        raise ValueError('No item found with key below: %r' % (k, ))\r\n\r\n    def find_ge(self, k):\r\n        \"\"\"Return first item with a key >= equal to k.\r\n\r\n        Raise ValueError if not found\r\n        \"\"\"\r\n        i = bisect_left(self._keys, k)\r\n        if i != len(self):\r\n            return self._items[i]\r\n        raise ValueError('No item found with key at or above: %r' % (k, ))\r\n\r\n    def find_gt(self, k):\r\n        \"\"\"Return first item with a key > k.\r\n\r\n        Raise ValueError if not found\r\n        \"\"\"\r\n        i = bisect_right(self._keys, k)\r\n        if i != len(self):\r\n            return self._items[i]\r\n        raise ValueError('No item found with key above: %r' % (k, ))\r\n"
  },
  {
    "path": "od_mstar3/col_checker.cpp",
    "content": "#include \"col_checker.hpp\"\n#include \"col_set.hpp\"\n\nusing namespace mstar;\n\n// /**\n//  * Performs simple pebble motion on the graph collision checking\n//  *\n//  * @param c1 source\n//  * @param c2 target\n//  *\n//  * @return collision set of the edge\n//  */\n// template<class T>\n// ColSet simple_edge_check(const T &c1,\n// \t\t\t const T&c2){\n//   ColSet col;\n//   for (uint i = 0; i < c1.size(); i++){\n//     for (uint j = i; j < c1.size(); j++){\n//       if (c2[i] == c2[j] || (c1[i] == c2[j] && c1[j] == c2[i])){\n// \tadd_col_set_in_place({{i, j}}, col);\n//       }\n//     }\n//   }\n//   return col;\n// }\n\n/**\n * Iterator version\n */\ntemplate<class T>\nColSet simple_edge_check(T source_start, T source_end,\n\t\t\t T target_start, T target_end){\n  int size = source_end - source_start;\n  ColSet col;\n  for (uint i = 0; i < size; i++){\n    for (uint j = i + 1; j < size; j++){\n      if (*(target_start + i) == *(target_start + j) ||\n\t  (*(source_start + i) == *(target_start + j) &&\n\t   *(source_start + j) == *(target_start + i))){\n\tadd_col_set_in_place({{i, j}}, col);\n      }\n    }\n  }\n  return col;\n}\n\nColSet SimpleGraphColCheck::check_edge(const OdCoord &c1,\n\t\t\t\t       const OdCoord &c2,\n\t\t\t\t       const std::vector<int> ids) const{\n  if (c2.is_standard()){\n    return simple_edge_check(c1.coord.cbegin(), c1.coord.cend(),\n\t\t\t     c2.coord.cbegin(), c2.coord.cend());\n  }\n  // c2 is an intermediate vertex, so only check for collisions between\n  // robots with an assigned move in c2\n  int size = c2.move_tuple.size();\n  return simple_edge_check(c1.coord.cbegin(), c1.coord.cbegin() + size,\n\t\t\t   c2.move_tuple.cbegin(), c2.move_tuple.cend());\n}\n"
  },
  {
    "path": "od_mstar3/col_checker.hpp",
    "content": "#ifndef MSTAR_COL_CHECKER_H\n#define MSTAR_COL_CHECKER_H\n\n#include \"mstar_type_defs.hpp\"\n\nnamespace mstar{\n\n  class ColChecker{\n  public:\n    virtual ~ColChecker(){};\n    virtual ColSet check_edge(const OdCoord &c1, const OdCoord &c2,\n\t\t\t      const std::vector<int> ids) const = 0;\n  };\n\n  /**\n   * Collision checker for simple bidirected graphs, where no edges overlap\n   *\n   * I.e. for pebble motion on the graph where you only have to worry about\n   * robots swapping positions, and not about diagonals crossing.  Allows\n   * for rotations\n   */\n    class SimpleGraphColCheck: public ColChecker{\n    public:\n      /**\n       * Checks for collision while traversing the edge from c1 to c2\n       *\n       * Finds collisions both while traversing the edge and when at the\n       * goal configuration.\n       *\n       * @param c1 the source coordinate of the edge\n       * @param c2 the target coordinate of the edge\n       * @param ids list of global robot ids.  Necessary for heterogeneous\n       *            robots\n       *\n       * @return the collision set containing the colliding robots\n       */\n      ColSet check_edge(const OdCoord &c1, const OdCoord &c2,\n\t\t\tconst std::vector<int> ids) const;\n  };\n};\n\n#endif\n"
  },
  {
    "path": "od_mstar3/col_set.hpp",
    "content": "#ifndef MSTAR_COL_SET_H\n#define MSTAR_COL_SET_H\n\n#include <algorithm>\n\n/***********************************************************************\n * Provides logic for combining collision sets\n *\n * Assumes that a collision set is of form T<T<int>> where T are\n * collections and the inner collection is sorted\n **********************************************************************/\n\nnamespace mstar{\n  /**\n   * tests if two sets are disjoint\n   * \n   * Currently doesnt try to leverage sorted.  Empty sets will always be\n   * treated as disjoint\n   *\n   * @param s1, s2 The sets to check\n   *\n   * @return True if disjoint, else false\n   */\n  template <class T> bool is_disjoint(const T &s1, const T &s2){\n    for (auto i = s1.cbegin(); i != s1.cend(); ++i){\n      for (auto j = s2.cbegin(); j != s2.cend(); ++j){\n\tif (*i == *j){\n\t  return false;\n\t}\n      }\n    }\n    return true;\n  };\n\n  /**\n   * Tests if s1 is a superset of s2\n   *\n   * Uses == to compare elements.  Does not leverage sorted values\n   *\n   * @param s1 potential superset\n   * @param s2 potential subset\n   *\n   * @return True if s1 is a superset of s2, otherwise false\n   */\n  template <class T> bool is_superset(const T &s1, const T &s2){\n    for (auto j = s2.cbegin(); j != s2.cend(); ++j){\n      bool included = false;\n      for (auto i = s1.cbegin(); i != s1.cend(); ++i){\n\tif (*i == *j){\n\t  included = true;\n\t  break;\n\t}\n      }\n      if (!included){\n\treturn false;\n      }\n    }\n    return true;\n  };\n\n  /**\n   * specialization of is_superset that exploits sorted values\n   */\n  template <class T, class... extra>\n  bool is_superset(const std::set<T, extra...> &s1,\n\t\t   const std::set<T, extra...> &s2){\n    return std::includes(s1.cbegin(), s1.cend(), s2.cbegin(), s2.cend());\n  }\n\n  /**\n   * Merges two sorted sets\n   *\n   * Elements of the set must be sorted.  Container of the sets must be\n   * resizeable for output\n   *\n   */\n  template <class T> T merge(const T &s1, const T &s2){\n    T out(s1.size() + s2.size());\n    auto it = std::set_union(s1.begin(), s1.end(), s2.begin(), s2.end(),\n\t\t\t     out.begin());\n    out.resize(it - out.begin());\n    return out;\n  }\n\n  template <class T, class... extra>\n  std::set<T, extra...> merge(std::set<T, extra...> s1,\n\t\t\t      const std::set<T, extra...> &s2){\n    s1.insert(s2.cbegin(), s2.cend());\n    return s1;\n  }\n\n  /**\n   * Adds c1 to c2\n   *\n   * Mutates c2\n   *\n   * @param c1 collision set 1\n   * @param c2 collision set 2\n   *\n   * @return true if c2 is changed, else false\n   */\n  template <class T, template<class, class...> class TT, class... args>\n  bool add_col_set_in_place(TT<T, args...> c1, TT<T, args...> &c2){\n    bool changed = false;\n    // TODO: This could be more efficient\n    while (c1.size() > 0){\n      int i = 0;\n      // whether c1[-1] overlaps any element of c2\n      bool found_overlap = false;\n      while (i < c2.size()){\n  \tif (!is_disjoint(c2[i], c1.back())) {\n  \t  // found overlap\n  \t  if (is_superset(c2[i], c1.back())){\n  \t      // current element in c1 contained by the element in c2, so\n  \t      // the c1 element can be dropped\n  \t      c1.pop_back();\n  \t      found_overlap = true;\n  \t      break;\n  \t    }\n  \t  // Non-trivial overlap.  Need to add the union of the current\n  \t  // elements back to c1 to check if there is any further overlap\n  \t  // with elements of c2\n\t  \n\t  // Could just merge in place, but doubt it really matters\n\t  c1.back().insert(c2[i].cbegin(), c2[i].cend());\n  \t  c2.erase(c2.begin() + i);\n\t  found_overlap = true;\n\t  changed = true;\n\t  break;\n\t} else{\n\t  // no overlap between c1[-1] and c2[i], so check next element\n\t  // of c2\n\t  ++i;\n\t}\n      }\n      if (!found_overlap){\n\t// no overlap between c1[-1] and all elements of c2, so can\n\t// be added to c2 (although this will force checks against\n\tc2.push_back(c1.back());\n\tc1.pop_back();\n\tchanged = true;\n      }\n    }\n    return changed;\n  }\n\n  /**\n   * Adds two collision sets, c1, c2\n   *\n   * The template monstrosity is necessary because std::vectors require two\n   * parameters of which we care about one (the type), and the other is the\n   * allocator.  Other containers may require more\n   *\n   * @param c1 collision set 1\n   * @param c2 collision set 2\n   *\n   * @return A new collision set formed by adding c1 and c2\n   */\n  template <class T, template<class, class...> class TT, class... args>\n  TT<T, args...> add_col_set(TT<T, args...> c1, TT<T, args...> c2){\n    add_col_set_in_place(c1, c2);\n    return c2;\n  }\n\n  /**\n   * Computes the collision set used for expansion\n   *\n   * Based the generating collision set of a vertex, which is the collision\n   * set of the vertex's predecessor when the predecessor was expanded.  It\n   * is useful as it specifies which partial solutions have been cached.\n   * For example, if the generating collision set is {{1, 2}}, then a\n   * subplanner already knows how to get robots 1 and 2 to the goal, and it\n   * is more efficient to directly query that subplanner, rather than set the\n   * collision set to be empty.\n   *\n   * However, you have to account for new collisions, as stored in the\n   * vertex's collision set.  If a collision set element is a subset of an\n   * element of the generating collision set, use the element form the\n   * generating collision set.  If a generating collision set element has\n   * a non-empty intersection with a element of the collision set that is\n   * not a subset, don't use that generating collision set element\n   *\n   * @param col_set the collision set of the vertex\n   * @param gen_set the generating collision set of the vertex\n   *\n   * @return A new collision set to use when expanding the vertex\n   */\n  template <class T, template<class, class...> class TT, class... args>\n  TT<T, args...> col_set_to_expand(TT<T, args...> col_set,\n\t\t\t\t   TT<T, args...> gen_set){\n    TT<T, args...> ret;\n    while(gen_set.size() > 0){\n      // Check the last element of the generating collision set.  Either it\n      // can be used, or there is a non-superset intersection, and it must\n      // be removed\n\n      // Need to keep any elements of the collision set that are subsets\n      // of the generating collision set element, as a later element of the\n      // collision set may invalidate the generating collision set element\n      TT<T, args...> elements_to_remove;\n\n      uint i = 0;\n\n      bool gen_set_elem_valid = true;\n      while (i < col_set.size()){\n\tif (is_superset(gen_set.back(), col_set[i])){\n\t  elements_to_remove.push_back(col_set[i]);\n\t  col_set.erase(col_set.begin() + i);\n\t} else if (!is_disjoint(gen_set.back(), col_set[i])){\n\t  // generating collision set element has a non-empty intersection\n\t  // with a collision set element that is not a sub-set, so is\n\t  // invalid\n\t  gen_set.pop_back();\n\t  // Need to return any collision set elements that were removed as\n\t  // being subsets of gen_set.back\n\t  col_set.insert(col_set.end(), elements_to_remove.begin(),\n\t\t\t elements_to_remove.end());\n\t  gen_set_elem_valid = false;\n\t  break;\n\t} else{\n\t  i += 1;\n\t}\n      }\n      if (gen_set_elem_valid){\n\tret.push_back(gen_set.back());\n\tgen_set.pop_back();\n      }\n    }\n    // Any remaining collision set elements were not contained by any element\n    // of the generating collision set, so should be used directly\n    ret.insert(ret.end(), col_set.begin(), col_set.end());\n    return ret;\n  };\n   \n}\n\n#endif\n"
  },
  {
    "path": "od_mstar3/col_set_addition.py",
    "content": "\"\"\"Encapsulates the basic collision set addition functions, so they can\r\nbe accessible to any code that uses it\r\n\r\nAlso provides exceptions for indicating no solution or out of time\r\n\"\"\"\r\n\r\n\r\ndef add_col_set_recursive(c1, c2):\r\n    \"\"\"Returns a new collision set resulting from adding c1 to c2.  No\r\n    side effecting\r\n\r\n    collision set is done for the recursive case, where\r\n    ({1, 2}, ) + ({3, 4}, ) = ({1, 2}, {3, 4})\r\n\r\n    c1, c2 - tuples of (immutable) sets\r\n\r\n    returns:\r\n    recursive collision set containing c1 and c2\r\n\r\n    \"\"\"\r\n    # Make shallow copies\r\n    c1 = list(c1)\r\n    c2 = list(c2)\r\n    while len(c1) > 0:\r\n        i = 0\r\n        # Whether c1[-1] overlaps with any element of c2\r\n        found_overlap = False\r\n        while i < len(c2):\r\n            if not c2[i].isdisjoint(c1[-1]):\r\n                # Found overlap\r\n                if c2[i].issuperset(c1[-1]):\r\n                    # No change in c2\r\n                    c1.pop()\r\n                    found_overlap = True\r\n                    break\r\n                # Have found a non-trivial overlap.  Need to add the\r\n                # union to  c1 so that we can check if the union has any\r\n                # further overlap with elements of c2\r\n                temp = c2.pop(i)\r\n                # replace c2[i] with the union of c2[i] and c1[-1]\r\n                c1.append(temp.union(c1.pop()))\r\n                found_overlap = True\r\n                break\r\n            else:\r\n                # No overlap between c1[-1] and c2[i], so check next\r\n                # element of c2\r\n                i += 1\r\n        if not found_overlap:\r\n            # c1[-1] has no overlap with any element of c2, so it can be\r\n            # added as is to c2\r\n            c2.append(c1.pop())\r\n    return tuple(c2)\r\n\r\n\r\ndef add_col_set(c1, c2):\r\n    \"\"\"Adds the collision sets c1 to c2.  c2 is assumed to contain a\r\n    single,\r\n    possibly empty, set\r\n\r\n    c1, c2 - input collision sets\r\n\r\n    returns:\r\n    combined collision set containing c1 and c2\r\n\r\n    \"\"\"\r\n    temp = frozenset([])\r\n    if len(c2) >= 1:\r\n        temp = c2[0]\r\n        assert len(c2) == 1\r\n    for i in c1:\r\n        temp = temp.union(i)\r\n    if len(temp) == 0:\r\n        return ()\r\n    return (temp, )\r\n\r\n\r\ndef col_set_add(c1, c2, recursive):\r\n    \"\"\"Adds two collision sets\r\n\r\n    c1, c2     - input collision sets\r\n    recursive - boolean, whether to perform recursive M* style addition\r\n\r\n    returns:\r\n    collision set containing c1 and c2\r\n\r\n    \"\"\"\r\n    if recursive:\r\n        return add_col_set_recursive(c1, c2)\r\n    else:\r\n        return add_col_set(c1, c2)\r\n\r\n\r\ndef effective_col_set(col_set, prev_col_set):\r\n    \"\"\"Computes the effective collision set to use given the current\r\n    collision set and the collision set used to get to the current node\r\n\r\n    Only makes sense when used with recursive M*\r\n\r\n    The purpose of this code is that in recursive M*, you invoke a\r\n    subplanner to figure out how to get to the goal, which caches the\r\n    entire path to the goal .  The next step, you have an empty\r\n    collision set, so you don't query the subplanner with the cached\r\n    path, and have to find a bunch of collisions before using the cached\r\n    solution.  This is intended for use with a memory of what the\r\n    collision set was when you reached a given node.\r\n\r\n    Computes the \"effecitve collision set\".  Elements of the memorized\r\n    collision set are used if they have no non-empty intersections with\r\n    elements of the current collision set that are not subsets of the\r\n    memorized component.\r\n\r\n    elements of col_set are NOT used if they are contained within some\r\n    element of prev_col_set that is used.  Elements of prev_col_set are\r\n    used if they completely contain all elements of col_set with which\r\n    they intersect\r\n\r\n    col_set      - current collision set\r\n    prev_col_set - \"memorized\" collision set, i.e. the collision set of\r\n                   the optimal predecessor at the time the path from the\r\n                   optimal predecessor was first found\r\n\r\n    returns:\r\n    effective collision set.  Consists of the elements of the previous\r\n    collision set, which should index subplanners which have cached\r\n    paths available, and elements of the current collision set which\r\n    are not contained within prev_col_set\r\n    \"\"\"\r\n    effective_set = []\r\n    prev_col_set = list(prev_col_set)\r\n    col_set = list(col_set)\r\n    while(len(prev_col_set) > 0):\r\n        # Need to keep around the elements of col_set that won't be\r\n        # used, because the containing element of prev_col_set may be\r\n        # invalidated by a later element of col_set\r\n        col_set_to_remove = []\r\n        j = 0\r\n        while (j < len(col_set)):\r\n            if col_set[j].issubset(prev_col_set[-1]):\r\n                # this element is contained in prev_col_set, so can be\r\n                # skipped unless prev_col_set-1] is invalidated by some\r\n                # later element of col_set\r\n                col_set_to_remove.append(col_set.pop(j))\r\n            elif not col_set[j].isdisjoint(prev_col_set[-1]):\r\n                # this element partially overlaps prev_col_set,\r\n                # invalidating it, so cannot use this element of\r\n                # prev_col_set\r\n                prev_col_set.pop()\r\n                # return the elements of col_set we were going to remove\r\n                col_set.extend(col_set_to_remove)\r\n                break\r\n            else:\r\n                j += 1\r\n        else:\r\n            # Never broke, so prev_col_set can be used as part of the\r\n            # effective collision set\r\n            effective_set.append(prev_col_set.pop())\r\n    # Just copy over any elements of col_set that survived\r\n    effective_set.extend(col_set)\r\n    return tuple(effective_set)\r\n\r\n\r\nclass OutOfTimeError(Exception):\r\n    def __init__(self, value=None):\r\n        self.value = value\r\n\r\n    def __str__(self):\r\n        return repr(self.value)\r\n\r\n\r\nclass NoSolutionError(Exception):\r\n    def __init__(self, value=None):\r\n        self.value = value\r\n\r\n    def __str__(self):\r\n        return repr(self.value)\r\n\r\n\r\nclass OutOfScopeError(NoSolutionError):\r\n    def __init__(self, value=None, col_set=()):\r\n        self.value = value\r\n        self.col_set = col_set\r\n\r\n    def __str__(self):\r\n        return repr(self.value)\r\n"
  },
  {
    "path": "od_mstar3/cython_od_mstar.cpp",
    "content": "/* Generated by Cython 0.29.21 */\n\n/* BEGIN: Cython Metadata\n{\n    \"distutils\": {\n        \"depends\": [\n            \"grid_planning.hpp\"\n        ],\n        \"extra_compile_args\": [\n            \"-std=c++11\"\n        ],\n        \"language\": \"c++\",\n        \"name\": \"cpp_mstar\",\n        \"sources\": [\n            \"cython_od_mstar.pyx\",\n            \"policy.cpp\",\n            \"col_checker.cpp\",\n            \"od_mstar.cpp\",\n            \"grid_policy.cpp\",\n            \"grid_planning.cpp\"\n        ]\n    },\n    \"module_name\": \"cpp_mstar\"\n}\nEND: Cython Metadata */\n\n#define PY_SSIZE_T_CLEAN\n#include \"Python.h\"\n#ifndef Py_PYTHON_H\n    #error Python headers needed to compile C extensions, please install development version of Python.\n#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)\n    #error Cython requires Python 2.6+ or Python 3.3+.\n#else\n#define CYTHON_ABI \"0_29_21\"\n#define CYTHON_HEX_VERSION 0x001D15F0\n#define CYTHON_FUTURE_DIVISION 0\n#include <stddef.h>\n#ifndef offsetof\n  #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )\n#endif\n#if !defined(WIN32) && !defined(MS_WINDOWS)\n  #ifndef __stdcall\n    #define __stdcall\n  #endif\n  #ifndef __cdecl\n    #define __cdecl\n  #endif\n  #ifndef __fastcall\n    #define __fastcall\n  #endif\n#endif\n#ifndef DL_IMPORT\n  #define DL_IMPORT(t) t\n#endif\n#ifndef DL_EXPORT\n  #define DL_EXPORT(t) t\n#endif\n#define __PYX_COMMA ,\n#ifndef HAVE_LONG_LONG\n  #if PY_VERSION_HEX >= 0x02070000\n    #define HAVE_LONG_LONG\n  #endif\n#endif\n#ifndef PY_LONG_LONG\n  #define PY_LONG_LONG LONG_LONG\n#endif\n#ifndef Py_HUGE_VAL\n  #define Py_HUGE_VAL HUGE_VAL\n#endif\n#ifdef PYPY_VERSION\n  #define CYTHON_COMPILING_IN_PYPY 1\n  #define CYTHON_COMPILING_IN_PYSTON 0\n  #define CYTHON_COMPILING_IN_CPYTHON 0\n  #undef CYTHON_USE_TYPE_SLOTS\n  #define CYTHON_USE_TYPE_SLOTS 0\n  #undef CYTHON_USE_PYTYPE_LOOKUP\n  #define CYTHON_USE_PYTYPE_LOOKUP 0\n  #if PY_VERSION_HEX < 0x03050000\n    #undef CYTHON_USE_ASYNC_SLOTS\n    #define CYTHON_USE_ASYNC_SLOTS 0\n  #elif !defined(CYTHON_USE_ASYNC_SLOTS)\n    #define CYTHON_USE_ASYNC_SLOTS 1\n  #endif\n  #undef CYTHON_USE_PYLIST_INTERNALS\n  #define CYTHON_USE_PYLIST_INTERNALS 0\n  #undef CYTHON_USE_UNICODE_INTERNALS\n  #define CYTHON_USE_UNICODE_INTERNALS 0\n  #undef CYTHON_USE_UNICODE_WRITER\n  #define CYTHON_USE_UNICODE_WRITER 0\n  #undef CYTHON_USE_PYLONG_INTERNALS\n  #define CYTHON_USE_PYLONG_INTERNALS 0\n  #undef CYTHON_AVOID_BORROWED_REFS\n  #define CYTHON_AVOID_BORROWED_REFS 1\n  #undef CYTHON_ASSUME_SAFE_MACROS\n  #define CYTHON_ASSUME_SAFE_MACROS 0\n  #undef CYTHON_UNPACK_METHODS\n  #define CYTHON_UNPACK_METHODS 0\n  #undef CYTHON_FAST_THREAD_STATE\n  #define CYTHON_FAST_THREAD_STATE 0\n  #undef CYTHON_FAST_PYCALL\n  #define CYTHON_FAST_PYCALL 0\n  #undef CYTHON_PEP489_MULTI_PHASE_INIT\n  #define CYTHON_PEP489_MULTI_PHASE_INIT 0\n  #undef CYTHON_USE_TP_FINALIZE\n  #define CYTHON_USE_TP_FINALIZE 0\n  #undef CYTHON_USE_DICT_VERSIONS\n  #define CYTHON_USE_DICT_VERSIONS 0\n  #undef CYTHON_USE_EXC_INFO_STACK\n  #define CYTHON_USE_EXC_INFO_STACK 0\n#elif defined(PYSTON_VERSION)\n  #define CYTHON_COMPILING_IN_PYPY 0\n  #define CYTHON_COMPILING_IN_PYSTON 1\n  #define CYTHON_COMPILING_IN_CPYTHON 0\n  #ifndef CYTHON_USE_TYPE_SLOTS\n    #define CYTHON_USE_TYPE_SLOTS 1\n  #endif\n  #undef CYTHON_USE_PYTYPE_LOOKUP\n  #define CYTHON_USE_PYTYPE_LOOKUP 0\n  #undef CYTHON_USE_ASYNC_SLOTS\n  #define CYTHON_USE_ASYNC_SLOTS 0\n  #undef CYTHON_USE_PYLIST_INTERNALS\n  #define CYTHON_USE_PYLIST_INTERNALS 0\n  #ifndef CYTHON_USE_UNICODE_INTERNALS\n    #define CYTHON_USE_UNICODE_INTERNALS 1\n  #endif\n  #undef CYTHON_USE_UNICODE_WRITER\n  #define CYTHON_USE_UNICODE_WRITER 0\n  #undef CYTHON_USE_PYLONG_INTERNALS\n  #define CYTHON_USE_PYLONG_INTERNALS 0\n  #ifndef CYTHON_AVOID_BORROWED_REFS\n    #define CYTHON_AVOID_BORROWED_REFS 0\n  #endif\n  #ifndef CYTHON_ASSUME_SAFE_MACROS\n    #define CYTHON_ASSUME_SAFE_MACROS 1\n  #endif\n  #ifndef CYTHON_UNPACK_METHODS\n    #define CYTHON_UNPACK_METHODS 1\n  #endif\n  #undef CYTHON_FAST_THREAD_STATE\n  #define CYTHON_FAST_THREAD_STATE 0\n  #undef CYTHON_FAST_PYCALL\n  #define CYTHON_FAST_PYCALL 0\n  #undef CYTHON_PEP489_MULTI_PHASE_INIT\n  #define CYTHON_PEP489_MULTI_PHASE_INIT 0\n  #undef CYTHON_USE_TP_FINALIZE\n  #define CYTHON_USE_TP_FINALIZE 0\n  #undef CYTHON_USE_DICT_VERSIONS\n  #define CYTHON_USE_DICT_VERSIONS 0\n  #undef CYTHON_USE_EXC_INFO_STACK\n  #define CYTHON_USE_EXC_INFO_STACK 0\n#else\n  #define CYTHON_COMPILING_IN_PYPY 0\n  #define CYTHON_COMPILING_IN_PYSTON 0\n  #define CYTHON_COMPILING_IN_CPYTHON 1\n  #ifndef CYTHON_USE_TYPE_SLOTS\n    #define CYTHON_USE_TYPE_SLOTS 1\n  #endif\n  #if PY_VERSION_HEX < 0x02070000\n    #undef CYTHON_USE_PYTYPE_LOOKUP\n    #define CYTHON_USE_PYTYPE_LOOKUP 0\n  #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)\n    #define CYTHON_USE_PYTYPE_LOOKUP 1\n  #endif\n  #if PY_MAJOR_VERSION < 3\n    #undef CYTHON_USE_ASYNC_SLOTS\n    #define CYTHON_USE_ASYNC_SLOTS 0\n  #elif !defined(CYTHON_USE_ASYNC_SLOTS)\n    #define CYTHON_USE_ASYNC_SLOTS 1\n  #endif\n  #if PY_VERSION_HEX < 0x02070000\n    #undef CYTHON_USE_PYLONG_INTERNALS\n    #define CYTHON_USE_PYLONG_INTERNALS 0\n  #elif !defined(CYTHON_USE_PYLONG_INTERNALS)\n    #define CYTHON_USE_PYLONG_INTERNALS 1\n  #endif\n  #ifndef CYTHON_USE_PYLIST_INTERNALS\n    #define CYTHON_USE_PYLIST_INTERNALS 1\n  #endif\n  #ifndef CYTHON_USE_UNICODE_INTERNALS\n    #define CYTHON_USE_UNICODE_INTERNALS 1\n  #endif\n  #if PY_VERSION_HEX < 0x030300F0\n    #undef CYTHON_USE_UNICODE_WRITER\n    #define CYTHON_USE_UNICODE_WRITER 0\n  #elif !defined(CYTHON_USE_UNICODE_WRITER)\n    #define CYTHON_USE_UNICODE_WRITER 1\n  #endif\n  #ifndef CYTHON_AVOID_BORROWED_REFS\n    #define CYTHON_AVOID_BORROWED_REFS 0\n  #endif\n  #ifndef CYTHON_ASSUME_SAFE_MACROS\n    #define CYTHON_ASSUME_SAFE_MACROS 1\n  #endif\n  #ifndef CYTHON_UNPACK_METHODS\n    #define CYTHON_UNPACK_METHODS 1\n  #endif\n  #ifndef CYTHON_FAST_THREAD_STATE\n    #define CYTHON_FAST_THREAD_STATE 1\n  #endif\n  #ifndef CYTHON_FAST_PYCALL\n    #define CYTHON_FAST_PYCALL 1\n  #endif\n  #ifndef CYTHON_PEP489_MULTI_PHASE_INIT\n    #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)\n  #endif\n  #ifndef CYTHON_USE_TP_FINALIZE\n    #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)\n  #endif\n  #ifndef CYTHON_USE_DICT_VERSIONS\n    #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)\n  #endif\n  #ifndef CYTHON_USE_EXC_INFO_STACK\n    #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)\n  #endif\n#endif\n#if !defined(CYTHON_FAST_PYCCALL)\n#define CYTHON_FAST_PYCCALL  (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)\n#endif\n#if CYTHON_USE_PYLONG_INTERNALS\n  #include \"longintrepr.h\"\n  #undef SHIFT\n  #undef BASE\n  #undef MASK\n  #ifdef SIZEOF_VOID_P\n    enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };\n  #endif\n#endif\n#ifndef __has_attribute\n  #define __has_attribute(x) 0\n#endif\n#ifndef __has_cpp_attribute\n  #define __has_cpp_attribute(x) 0\n#endif\n#ifndef CYTHON_RESTRICT\n  #if defined(__GNUC__)\n    #define CYTHON_RESTRICT __restrict__\n  #elif defined(_MSC_VER) && _MSC_VER >= 1400\n    #define CYTHON_RESTRICT __restrict\n  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L\n    #define CYTHON_RESTRICT restrict\n  #else\n    #define CYTHON_RESTRICT\n  #endif\n#endif\n#ifndef CYTHON_UNUSED\n# if defined(__GNUC__)\n#   if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))\n#     define CYTHON_UNUSED __attribute__ ((__unused__))\n#   else\n#     define CYTHON_UNUSED\n#   endif\n# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))\n#   define CYTHON_UNUSED __attribute__ ((__unused__))\n# else\n#   define CYTHON_UNUSED\n# endif\n#endif\n#ifndef CYTHON_MAYBE_UNUSED_VAR\n#  if defined(__cplusplus)\n     template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }\n#  else\n#    define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)\n#  endif\n#endif\n#ifndef CYTHON_NCP_UNUSED\n# if CYTHON_COMPILING_IN_CPYTHON\n#  define CYTHON_NCP_UNUSED\n# else\n#  define CYTHON_NCP_UNUSED CYTHON_UNUSED\n# endif\n#endif\n#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)\n#ifdef _MSC_VER\n    #ifndef _MSC_STDINT_H_\n        #if _MSC_VER < 1300\n           typedef unsigned char     uint8_t;\n           typedef unsigned int      uint32_t;\n        #else\n           typedef unsigned __int8   uint8_t;\n           typedef unsigned __int32  uint32_t;\n        #endif\n    #endif\n#else\n   #include <stdint.h>\n#endif\n#ifndef CYTHON_FALLTHROUGH\n  #if defined(__cplusplus) && __cplusplus >= 201103L\n    #if __has_cpp_attribute(fallthrough)\n      #define CYTHON_FALLTHROUGH [[fallthrough]]\n    #elif __has_cpp_attribute(clang::fallthrough)\n      #define CYTHON_FALLTHROUGH [[clang::fallthrough]]\n    #elif __has_cpp_attribute(gnu::fallthrough)\n      #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]\n    #endif\n  #endif\n  #ifndef CYTHON_FALLTHROUGH\n    #if __has_attribute(fallthrough)\n      #define CYTHON_FALLTHROUGH __attribute__((fallthrough))\n    #else\n      #define CYTHON_FALLTHROUGH\n    #endif\n  #endif\n  #if defined(__clang__ ) && defined(__apple_build_version__)\n    #if __apple_build_version__ < 7000000\n      #undef  CYTHON_FALLTHROUGH\n      #define CYTHON_FALLTHROUGH\n    #endif\n  #endif\n#endif\n\n#ifndef __cplusplus\n  #error \"Cython files generated with the C++ option must be compiled with a C++ compiler.\"\n#endif\n#ifndef CYTHON_INLINE\n  #if defined(__clang__)\n    #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))\n  #else\n    #define CYTHON_INLINE inline\n  #endif\n#endif\ntemplate<typename T>\nvoid __Pyx_call_destructor(T& x) {\n    x.~T();\n}\ntemplate<typename T>\nclass __Pyx_FakeReference {\n  public:\n    __Pyx_FakeReference() : ptr(NULL) { }\n    __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }\n    T *operator->() { return ptr; }\n    T *operator&() { return ptr; }\n    operator T&() { return *ptr; }\n    template<typename U> bool operator ==(U other) { return *ptr == other; }\n    template<typename U> bool operator !=(U other) { return *ptr != other; }\n  private:\n    T *ptr;\n};\n\n#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)\n  #define Py_OptimizeFlag 0\n#endif\n#define __PYX_BUILD_PY_SSIZE_T \"n\"\n#define CYTHON_FORMAT_SSIZE_T \"z\"\n#if PY_MAJOR_VERSION < 3\n  #define __Pyx_BUILTIN_MODULE_NAME \"__builtin__\"\n  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\\\n          PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\n  #define __Pyx_DefaultClassType PyClass_Type\n#else\n  #define __Pyx_BUILTIN_MODULE_NAME \"builtins\"\n#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2\n  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\\\n          PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\n#else\n  #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\\\n          PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\n#endif\n  #define __Pyx_DefaultClassType PyType_Type\n#endif\n#ifndef Py_TPFLAGS_CHECKTYPES\n  #define Py_TPFLAGS_CHECKTYPES 0\n#endif\n#ifndef Py_TPFLAGS_HAVE_INDEX\n  #define Py_TPFLAGS_HAVE_INDEX 0\n#endif\n#ifndef Py_TPFLAGS_HAVE_NEWBUFFER\n  #define Py_TPFLAGS_HAVE_NEWBUFFER 0\n#endif\n#ifndef Py_TPFLAGS_HAVE_FINALIZE\n  #define Py_TPFLAGS_HAVE_FINALIZE 0\n#endif\n#ifndef METH_STACKLESS\n  #define METH_STACKLESS 0\n#endif\n#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)\n  #ifndef METH_FASTCALL\n     #define METH_FASTCALL 0x80\n  #endif\n  typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);\n  typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,\n                                                          Py_ssize_t nargs, PyObject *kwnames);\n#else\n  #define __Pyx_PyCFunctionFast _PyCFunctionFast\n  #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords\n#endif\n#if CYTHON_FAST_PYCCALL\n#define __Pyx_PyFastCFunction_Check(func)\\\n    ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))\n#else\n#define __Pyx_PyFastCFunction_Check(func) 0\n#endif\n#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)\n  #define PyObject_Malloc(s)   PyMem_Malloc(s)\n  #define PyObject_Free(p)     PyMem_Free(p)\n  #define PyObject_Realloc(p)  PyMem_Realloc(p)\n#endif\n#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1\n  #define PyMem_RawMalloc(n)           PyMem_Malloc(n)\n  #define PyMem_RawRealloc(p, n)       PyMem_Realloc(p, n)\n  #define PyMem_RawFree(p)             PyMem_Free(p)\n#endif\n#if CYTHON_COMPILING_IN_PYSTON\n  #define __Pyx_PyCode_HasFreeVars(co)  PyCode_HasFreeVars(co)\n  #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)\n#else\n  #define __Pyx_PyCode_HasFreeVars(co)  (PyCode_GetNumFree(co) > 0)\n  #define __Pyx_PyFrame_SetLineNumber(frame, lineno)  (frame)->f_lineno = (lineno)\n#endif\n#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000\n  #define __Pyx_PyThreadState_Current PyThreadState_GET()\n#elif PY_VERSION_HEX >= 0x03060000\n  #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()\n#elif PY_VERSION_HEX >= 0x03000000\n  #define __Pyx_PyThreadState_Current PyThreadState_GET()\n#else\n  #define __Pyx_PyThreadState_Current _PyThreadState_Current\n#endif\n#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)\n#include \"pythread.h\"\n#define Py_tss_NEEDS_INIT 0\ntypedef int Py_tss_t;\nstatic CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {\n  *key = PyThread_create_key();\n  return 0;\n}\nstatic CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {\n  Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));\n  *key = Py_tss_NEEDS_INIT;\n  return key;\n}\nstatic CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {\n  PyObject_Free(key);\n}\nstatic CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {\n  return *key != Py_tss_NEEDS_INIT;\n}\nstatic CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {\n  PyThread_delete_key(*key);\n  *key = Py_tss_NEEDS_INIT;\n}\nstatic CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {\n  return PyThread_set_key_value(*key, value);\n}\nstatic CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {\n  return PyThread_get_key_value(*key);\n}\n#endif\n#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)\n#define __Pyx_PyDict_NewPresized(n)  ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))\n#else\n#define __Pyx_PyDict_NewPresized(n)  PyDict_New()\n#endif\n#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION\n  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)\n  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)\n#else\n  #define __Pyx_PyNumber_Divide(x,y)         PyNumber_Divide(x,y)\n  #define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceDivide(x,y)\n#endif\n#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS\n#define __Pyx_PyDict_GetItemStr(dict, name)  _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)\n#else\n#define __Pyx_PyDict_GetItemStr(dict, name)  PyDict_GetItem(dict, name)\n#endif\n#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)\n  #define CYTHON_PEP393_ENABLED 1\n  #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ?\\\n                                              0 : _PyUnicode_Ready((PyObject *)(op)))\n  #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_LENGTH(u)\n  #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)\n  #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u)   PyUnicode_MAX_CHAR_VALUE(u)\n  #define __Pyx_PyUnicode_KIND(u)         PyUnicode_KIND(u)\n  #define __Pyx_PyUnicode_DATA(u)         PyUnicode_DATA(u)\n  #define __Pyx_PyUnicode_READ(k, d, i)   PyUnicode_READ(k, d, i)\n  #define __Pyx_PyUnicode_WRITE(k, d, i, ch)  PyUnicode_WRITE(k, d, i, ch)\n  #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)\n  #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))\n  #else\n  #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != PyUnicode_GET_LENGTH(u))\n  #endif\n#else\n  #define CYTHON_PEP393_ENABLED 0\n  #define PyUnicode_1BYTE_KIND  1\n  #define PyUnicode_2BYTE_KIND  2\n  #define PyUnicode_4BYTE_KIND  4\n  #define __Pyx_PyUnicode_READY(op)       (0)\n  #define __Pyx_PyUnicode_GET_LENGTH(u)   PyUnicode_GET_SIZE(u)\n  #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))\n  #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u)   ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)\n  #define __Pyx_PyUnicode_KIND(u)         (sizeof(Py_UNICODE))\n  #define __Pyx_PyUnicode_DATA(u)         ((void*)PyUnicode_AS_UNICODE(u))\n  #define __Pyx_PyUnicode_READ(k, d, i)   ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))\n  #define __Pyx_PyUnicode_WRITE(k, d, i, ch)  (((void)(k)), ((Py_UNICODE*)d)[i] = ch)\n  #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != PyUnicode_GET_SIZE(u))\n#endif\n#if CYTHON_COMPILING_IN_PYPY\n  #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)\n  #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)\n#else\n  #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)\n  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\\\n      PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))\n#endif\n#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)\n  #define PyUnicode_Contains(u, s)  PySequence_Contains(u, s)\n#endif\n#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)\n  #define PyByteArray_Check(obj)  PyObject_TypeCheck(obj, &PyByteArray_Type)\n#endif\n#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)\n  #define PyObject_Format(obj, fmt)  PyObject_CallMethod(obj, \"__format__\", \"O\", fmt)\n#endif\n#define __Pyx_PyString_FormatSafe(a, b)   ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))\n#define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))\n#if PY_MAJOR_VERSION >= 3\n  #define __Pyx_PyString_Format(a, b)  PyUnicode_Format(a, b)\n#else\n  #define __Pyx_PyString_Format(a, b)  PyString_Format(a, b)\n#endif\n#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)\n  #define PyObject_ASCII(o)            PyObject_Repr(o)\n#endif\n#if PY_MAJOR_VERSION >= 3\n  #define PyBaseString_Type            PyUnicode_Type\n  #define PyStringObject               PyUnicodeObject\n  #define PyString_Type                PyUnicode_Type\n  #define PyString_Check               PyUnicode_Check\n  #define PyString_CheckExact          PyUnicode_CheckExact\n#ifndef PyObject_Unicode\n  #define PyObject_Unicode             PyObject_Str\n#endif\n#endif\n#if PY_MAJOR_VERSION >= 3\n  #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)\n  #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)\n#else\n  #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))\n  #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))\n#endif\n#ifndef PySet_CheckExact\n  #define PySet_CheckExact(obj)        (Py_TYPE(obj) == &PySet_Type)\n#endif\n#if PY_VERSION_HEX >= 0x030900A4\n  #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)\n  #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)\n#else\n  #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)\n  #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)\n#endif\n#if CYTHON_ASSUME_SAFE_MACROS\n  #define __Pyx_PySequence_SIZE(seq)  Py_SIZE(seq)\n#else\n  #define __Pyx_PySequence_SIZE(seq)  PySequence_Size(seq)\n#endif\n#if PY_MAJOR_VERSION >= 3\n  #define PyIntObject                  PyLongObject\n  #define PyInt_Type                   PyLong_Type\n  #define PyInt_Check(op)              PyLong_Check(op)\n  #define PyInt_CheckExact(op)         PyLong_CheckExact(op)\n  #define PyInt_FromString             PyLong_FromString\n  #define PyInt_FromUnicode            PyLong_FromUnicode\n  #define PyInt_FromLong               PyLong_FromLong\n  #define PyInt_FromSize_t             PyLong_FromSize_t\n  #define PyInt_FromSsize_t            PyLong_FromSsize_t\n  #define PyInt_AsLong                 PyLong_AsLong\n  #define PyInt_AS_LONG                PyLong_AS_LONG\n  #define PyInt_AsSsize_t              PyLong_AsSsize_t\n  #define PyInt_AsUnsignedLongMask     PyLong_AsUnsignedLongMask\n  #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask\n  #define PyNumber_Int                 PyNumber_Long\n#endif\n#if PY_MAJOR_VERSION >= 3\n  #define PyBoolObject                 PyLongObject\n#endif\n#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY\n  #ifndef PyUnicode_InternFromString\n    #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)\n  #endif\n#endif\n#if PY_VERSION_HEX < 0x030200A4\n  typedef long Py_hash_t;\n  #define __Pyx_PyInt_FromHash_t PyInt_FromLong\n  #define __Pyx_PyInt_AsHash_t   PyInt_AsLong\n#else\n  #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t\n  #define __Pyx_PyInt_AsHash_t   PyInt_AsSsize_t\n#endif\n#if PY_MAJOR_VERSION >= 3\n  #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))\n#else\n  #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)\n#endif\n#if CYTHON_USE_ASYNC_SLOTS\n  #if PY_VERSION_HEX >= 0x030500B1\n    #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods\n    #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)\n  #else\n    #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))\n  #endif\n#else\n  #define __Pyx_PyType_AsAsync(obj) NULL\n#endif\n#ifndef __Pyx_PyAsyncMethodsStruct\n    typedef struct {\n        unaryfunc am_await;\n        unaryfunc am_aiter;\n        unaryfunc am_anext;\n    } __Pyx_PyAsyncMethodsStruct;\n#endif\n\n#if defined(WIN32) || defined(MS_WINDOWS)\n  #define _USE_MATH_DEFINES\n#endif\n#include <math.h>\n#ifdef NAN\n#define __PYX_NAN() ((float) NAN)\n#else\nstatic CYTHON_INLINE float __PYX_NAN() {\n  float value;\n  memset(&value, 0xFF, sizeof(value));\n  return value;\n}\n#endif\n#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)\n#define __Pyx_truncl trunc\n#else\n#define __Pyx_truncl truncl\n#endif\n\n#define __PYX_MARK_ERR_POS(f_index, lineno) \\\n    { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }\n#define __PYX_ERR(f_index, lineno, Ln_error) \\\n    { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }\n\n#ifndef __PYX_EXTERN_C\n  #ifdef __cplusplus\n    #define __PYX_EXTERN_C extern \"C\"\n  #else\n    #define __PYX_EXTERN_C extern\n  #endif\n#endif\n\n#define __PYX_HAVE__cpp_mstar\n#define __PYX_HAVE_API__cpp_mstar\n/* Early includes */\n#include \"ios\"\n#include \"new\"\n#include \"stdexcept\"\n#include \"typeinfo\"\n#include <vector>\n#include <utility>\n\n    #if __cplusplus > 199711L\n    #include <type_traits>\n\n    namespace cython_std {\n    template <typename T> typename std::remove_reference<T>::type&& move(T& t) noexcept { return std::move(t); }\n    template <typename T> typename std::remove_reference<T>::type&& move(T&& t) noexcept { return std::move(t); }\n    }\n\n    #endif\n    \n#include \"grid_planning.hpp\"\n#ifdef _OPENMP\n#include <omp.h>\n#endif /* _OPENMP */\n\n#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)\n#define CYTHON_WITHOUT_ASSERTIONS\n#endif\n\ntypedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;\n                const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;\n\n#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0\n#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0\n#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)\n#define __PYX_DEFAULT_STRING_ENCODING \"\"\n#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString\n#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize\n#define __Pyx_uchar_cast(c) ((unsigned char)c)\n#define __Pyx_long_cast(x) ((long)x)\n#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (\\\n    (sizeof(type) < sizeof(Py_ssize_t))  ||\\\n    (sizeof(type) > sizeof(Py_ssize_t) &&\\\n          likely(v < (type)PY_SSIZE_T_MAX ||\\\n                 v == (type)PY_SSIZE_T_MAX)  &&\\\n          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\\\n                                v == (type)PY_SSIZE_T_MIN)))  ||\\\n    (sizeof(type) == sizeof(Py_ssize_t) &&\\\n          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\\\n                               v == (type)PY_SSIZE_T_MAX)))  )\nstatic CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {\n    return (size_t) i < (size_t) limit;\n}\n#if defined (__cplusplus) && __cplusplus >= 201103L\n    #include <cstdlib>\n    #define __Pyx_sst_abs(value) std::abs(value)\n#elif SIZEOF_INT >= SIZEOF_SIZE_T\n    #define __Pyx_sst_abs(value) abs(value)\n#elif SIZEOF_LONG >= SIZEOF_SIZE_T\n    #define __Pyx_sst_abs(value) labs(value)\n#elif defined (_MSC_VER)\n    #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))\n#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L\n    #define __Pyx_sst_abs(value) llabs(value)\n#elif defined (__GNUC__)\n    #define __Pyx_sst_abs(value) __builtin_llabs(value)\n#else\n    #define __Pyx_sst_abs(value) ((value<0) ? -value : value)\n#endif\nstatic CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);\nstatic CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);\n#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))\n#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)\n#define __Pyx_PyBytes_FromString        PyBytes_FromString\n#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize\nstatic CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);\n#if PY_MAJOR_VERSION < 3\n    #define __Pyx_PyStr_FromString        __Pyx_PyBytes_FromString\n    #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize\n#else\n    #define __Pyx_PyStr_FromString        __Pyx_PyUnicode_FromString\n    #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize\n#endif\n#define __Pyx_PyBytes_AsWritableString(s)     ((char*) PyBytes_AS_STRING(s))\n#define __Pyx_PyBytes_AsWritableSString(s)    ((signed char*) PyBytes_AS_STRING(s))\n#define __Pyx_PyBytes_AsWritableUString(s)    ((unsigned char*) PyBytes_AS_STRING(s))\n#define __Pyx_PyBytes_AsString(s)     ((const char*) PyBytes_AS_STRING(s))\n#define __Pyx_PyBytes_AsSString(s)    ((const signed char*) PyBytes_AS_STRING(s))\n#define __Pyx_PyBytes_AsUString(s)    ((const unsigned char*) PyBytes_AS_STRING(s))\n#define __Pyx_PyObject_AsWritableString(s)    ((char*) __Pyx_PyObject_AsString(s))\n#define __Pyx_PyObject_AsWritableSString(s)    ((signed char*) __Pyx_PyObject_AsString(s))\n#define __Pyx_PyObject_AsWritableUString(s)    ((unsigned char*) __Pyx_PyObject_AsString(s))\n#define __Pyx_PyObject_AsSString(s)    ((const signed char*) __Pyx_PyObject_AsString(s))\n#define __Pyx_PyObject_AsUString(s)    ((const unsigned char*) __Pyx_PyObject_AsString(s))\n#define __Pyx_PyObject_FromCString(s)  __Pyx_PyObject_FromString((const char*)s)\n#define __Pyx_PyBytes_FromCString(s)   __Pyx_PyBytes_FromString((const char*)s)\n#define __Pyx_PyByteArray_FromCString(s)   __Pyx_PyByteArray_FromString((const char*)s)\n#define __Pyx_PyStr_FromCString(s)     __Pyx_PyStr_FromString((const char*)s)\n#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)\nstatic CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {\n    const Py_UNICODE *u_end = u;\n    while (*u_end++) ;\n    return (size_t)(u_end - u - 1);\n}\n#define __Pyx_PyUnicode_FromUnicode(u)       PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))\n#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode\n#define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode\n#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)\n#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)\nstatic CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);\nstatic CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);\nstatic CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);\nstatic CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);\n#define __Pyx_PySequence_Tuple(obj)\\\n    (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))\nstatic CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);\nstatic CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);\n#if CYTHON_ASSUME_SAFE_MACROS\n#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))\n#else\n#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)\n#endif\n#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))\n#if PY_MAJOR_VERSION >= 3\n#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))\n#else\n#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))\n#endif\n#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))\n#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII\nstatic int __Pyx_sys_getdefaultencoding_not_ascii;\nstatic int __Pyx_init_sys_getdefaultencoding_params(void) {\n    PyObject* sys;\n    PyObject* default_encoding = NULL;\n    PyObject* ascii_chars_u = NULL;\n    PyObject* ascii_chars_b = NULL;\n    const char* default_encoding_c;\n    sys = PyImport_ImportModule(\"sys\");\n    if (!sys) goto bad;\n    default_encoding = PyObject_CallMethod(sys, (char*) \"getdefaultencoding\", NULL);\n    Py_DECREF(sys);\n    if (!default_encoding) goto bad;\n    default_encoding_c = PyBytes_AsString(default_encoding);\n    if (!default_encoding_c) goto bad;\n    if (strcmp(default_encoding_c, \"ascii\") == 0) {\n        __Pyx_sys_getdefaultencoding_not_ascii = 0;\n    } else {\n        char ascii_chars[128];\n        int c;\n        for (c = 0; c < 128; c++) {\n            ascii_chars[c] = c;\n        }\n        __Pyx_sys_getdefaultencoding_not_ascii = 1;\n        ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);\n        if (!ascii_chars_u) goto bad;\n        ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);\n        if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {\n            PyErr_Format(\n                PyExc_ValueError,\n                \"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.\",\n                default_encoding_c);\n            goto bad;\n        }\n        Py_DECREF(ascii_chars_u);\n        Py_DECREF(ascii_chars_b);\n    }\n    Py_DECREF(default_encoding);\n    return 0;\nbad:\n    Py_XDECREF(default_encoding);\n    Py_XDECREF(ascii_chars_u);\n    Py_XDECREF(ascii_chars_b);\n    return -1;\n}\n#endif\n#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3\n#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)\n#else\n#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)\n#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT\nstatic char* __PYX_DEFAULT_STRING_ENCODING;\nstatic int __Pyx_init_sys_getdefaultencoding_params(void) {\n    PyObject* sys;\n    PyObject* default_encoding = NULL;\n    char* default_encoding_c;\n    sys = PyImport_ImportModule(\"sys\");\n    if (!sys) goto bad;\n    default_encoding = PyObject_CallMethod(sys, (char*) (const char*) \"getdefaultencoding\", NULL);\n    Py_DECREF(sys);\n    if (!default_encoding) goto bad;\n    default_encoding_c = PyBytes_AsString(default_encoding);\n    if (!default_encoding_c) goto bad;\n    __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);\n    if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;\n    strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);\n    Py_DECREF(default_encoding);\n    return 0;\nbad:\n    Py_XDECREF(default_encoding);\n    return -1;\n}\n#endif\n#endif\n\n\n/* Test for GCC > 2.95 */\n#if defined(__GNUC__)     && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))\n  #define likely(x)   __builtin_expect(!!(x), 1)\n  #define unlikely(x) __builtin_expect(!!(x), 0)\n#else /* !__GNUC__ or GCC < 2.95 */\n  #define likely(x)   (x)\n  #define unlikely(x) (x)\n#endif /* __GNUC__ */\nstatic CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }\n\nstatic PyObject *__pyx_m = NULL;\nstatic PyObject *__pyx_d;\nstatic PyObject *__pyx_b;\nstatic PyObject *__pyx_cython_runtime = NULL;\nstatic PyObject *__pyx_empty_tuple;\nstatic PyObject *__pyx_empty_bytes;\nstatic PyObject *__pyx_empty_unicode;\nstatic int __pyx_lineno;\nstatic int __pyx_clineno = 0;\nstatic const char * __pyx_cfilenm= __FILE__;\nstatic const char *__pyx_filename;\n\n\nstatic const char *__pyx_f[] = {\n  \"cython_od_mstar.pyx\",\n  \"stringsource\",\n};\n\n/*--- Type declarations ---*/\n\n/* --- Runtime support code (head) --- */\n/* Refnanny.proto */\n#ifndef CYTHON_REFNANNY\n  #define CYTHON_REFNANNY 0\n#endif\n#if CYTHON_REFNANNY\n  typedef struct {\n    void (*INCREF)(void*, PyObject*, int);\n    void (*DECREF)(void*, PyObject*, int);\n    void (*GOTREF)(void*, PyObject*, int);\n    void (*GIVEREF)(void*, PyObject*, int);\n    void* (*SetupContext)(const char*, int, const char*);\n    void (*FinishContext)(void**);\n  } __Pyx_RefNannyAPIStruct;\n  static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;\n  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);\n  #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;\n#ifdef WITH_THREAD\n  #define __Pyx_RefNannySetupContext(name, acquire_gil)\\\n          if (acquire_gil) {\\\n              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\\\n              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\\\n              PyGILState_Release(__pyx_gilstate_save);\\\n          } else {\\\n              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\\\n          }\n#else\n  #define __Pyx_RefNannySetupContext(name, acquire_gil)\\\n          __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)\n#endif\n  #define __Pyx_RefNannyFinishContext()\\\n          __Pyx_RefNanny->FinishContext(&__pyx_refnanny)\n  #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)\n  #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)\n  #define __Pyx_GOTREF(r)  __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)\n  #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)\n  #define __Pyx_XINCREF(r)  do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)\n  #define __Pyx_XDECREF(r)  do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)\n  #define __Pyx_XGOTREF(r)  do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)\n  #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)\n#else\n  #define __Pyx_RefNannyDeclarations\n  #define __Pyx_RefNannySetupContext(name, acquire_gil)\n  #define __Pyx_RefNannyFinishContext()\n  #define __Pyx_INCREF(r) Py_INCREF(r)\n  #define __Pyx_DECREF(r) Py_DECREF(r)\n  #define __Pyx_GOTREF(r)\n  #define __Pyx_GIVEREF(r)\n  #define __Pyx_XINCREF(r) Py_XINCREF(r)\n  #define __Pyx_XDECREF(r) Py_XDECREF(r)\n  #define __Pyx_XGOTREF(r)\n  #define __Pyx_XGIVEREF(r)\n#endif\n#define __Pyx_XDECREF_SET(r, v) do {\\\n        PyObject *tmp = (PyObject *) r;\\\n        r = v; __Pyx_XDECREF(tmp);\\\n    } while (0)\n#define __Pyx_DECREF_SET(r, v) do {\\\n        PyObject *tmp = (PyObject *) r;\\\n        r = v; __Pyx_DECREF(tmp);\\\n    } while (0)\n#define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)\n#define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)\n\n/* RaiseArgTupleInvalid.proto */\nstatic void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,\n    Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);\n\n/* RaiseDoubleKeywords.proto */\nstatic void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);\n\n/* ParseKeywords.proto */\nstatic int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\\\n    PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\\\n    const char* function_name);\n\n/* PyObjectGetAttrStr.proto */\n#if CYTHON_USE_TYPE_SLOTS\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);\n#else\n#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)\n#endif\n\n/* Import.proto */\nstatic PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);\n\n/* PyFunctionFastCall.proto */\n#if CYTHON_FAST_PYCALL\n#define __Pyx_PyFunction_FastCall(func, args, nargs)\\\n    __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)\n#if 1 || PY_VERSION_HEX < 0x030600B1\nstatic PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);\n#else\n#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)\n#endif\n#define __Pyx_BUILD_ASSERT_EXPR(cond)\\\n    (sizeof(char [1 - 2*!(cond)]) - 1)\n#ifndef Py_MEMBER_SIZE\n#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)\n#endif\n  static size_t __pyx_pyframe_localsplus_offset = 0;\n  #include \"frameobject.h\"\n  #define __Pxy_PyFrame_Initialize_Offsets()\\\n    ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\\\n     (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))\n  #define __Pyx_PyFrame_GetLocalsplus(frame)\\\n    (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))\n#endif\n\n/* PyCFunctionFastCall.proto */\n#if CYTHON_FAST_PYCCALL\nstatic CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);\n#else\n#define __Pyx_PyCFunction_FastCall(func, args, nargs)  (assert(0), NULL)\n#endif\n\n/* PyObjectCall.proto */\n#if CYTHON_COMPILING_IN_CPYTHON\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);\n#else\n#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)\n#endif\n\n/* PyIntCompare.proto */\nstatic CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace);\n\n/* GetTopmostException.proto */\n#if CYTHON_USE_EXC_INFO_STACK\nstatic _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);\n#endif\n\n/* PyThreadStateGet.proto */\n#if CYTHON_FAST_THREAD_STATE\n#define __Pyx_PyThreadState_declare  PyThreadState *__pyx_tstate;\n#define __Pyx_PyThreadState_assign  __pyx_tstate = __Pyx_PyThreadState_Current;\n#define __Pyx_PyErr_Occurred()  __pyx_tstate->curexc_type\n#else\n#define __Pyx_PyThreadState_declare\n#define __Pyx_PyThreadState_assign\n#define __Pyx_PyErr_Occurred()  PyErr_Occurred()\n#endif\n\n/* SaveResetException.proto */\n#if CYTHON_FAST_THREAD_STATE\n#define __Pyx_ExceptionSave(type, value, tb)  __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)\nstatic CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);\n#define __Pyx_ExceptionReset(type, value, tb)  __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)\nstatic CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);\n#else\n#define __Pyx_ExceptionSave(type, value, tb)   PyErr_GetExcInfo(type, value, tb)\n#define __Pyx_ExceptionReset(type, value, tb)  PyErr_SetExcInfo(type, value, tb)\n#endif\n\n/* PyErrExceptionMatches.proto */\n#if CYTHON_FAST_THREAD_STATE\n#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)\nstatic CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);\n#else\n#define __Pyx_PyErr_ExceptionMatches(err)  PyErr_ExceptionMatches(err)\n#endif\n\n/* GetException.proto */\n#if CYTHON_FAST_THREAD_STATE\n#define __Pyx_GetException(type, value, tb)  __Pyx__GetException(__pyx_tstate, type, value, tb)\nstatic int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);\n#else\nstatic int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);\n#endif\n\n/* PyObjectCallMethO.proto */\n#if CYTHON_COMPILING_IN_CPYTHON\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);\n#endif\n\n/* PyObjectCallOneArg.proto */\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);\n\n/* IncludeStringH.proto */\n#include <string.h>\n\n/* BytesEquals.proto */\nstatic CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);\n\n/* UnicodeEquals.proto */\nstatic CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);\n\n/* StrEquals.proto */\n#if PY_MAJOR_VERSION >= 3\n#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals\n#else\n#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals\n#endif\n\n/* GetBuiltinName.proto */\nstatic PyObject *__Pyx_GetBuiltinName(PyObject *name);\n\n/* PyDictVersioning.proto */\n#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS\n#define __PYX_DICT_VERSION_INIT  ((PY_UINT64_T) -1)\n#define __PYX_GET_DICT_VERSION(dict)  (((PyDictObject*)(dict))->ma_version_tag)\n#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\\\n    (version_var) = __PYX_GET_DICT_VERSION(dict);\\\n    (cache_var) = (value);\n#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\\\n    static PY_UINT64_T __pyx_dict_version = 0;\\\n    static PyObject *__pyx_dict_cached_value = NULL;\\\n    if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\\\n        (VAR) = __pyx_dict_cached_value;\\\n    } else {\\\n        (VAR) = __pyx_dict_cached_value = (LOOKUP);\\\n        __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\\\n    }\\\n}\nstatic CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);\nstatic CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);\nstatic CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);\n#else\n#define __PYX_GET_DICT_VERSION(dict)  (0)\n#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\n#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP)  (VAR) = (LOOKUP);\n#endif\n\n/* GetModuleGlobalName.proto */\n#if CYTHON_USE_DICT_VERSIONS\n#define __Pyx_GetModuleGlobalName(var, name)  {\\\n    static PY_UINT64_T __pyx_dict_version = 0;\\\n    static PyObject *__pyx_dict_cached_value = NULL;\\\n    (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\\\n        (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\\\n        __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\\\n}\n#define __Pyx_GetModuleGlobalNameUncached(var, name)  {\\\n    PY_UINT64_T __pyx_dict_version;\\\n    PyObject *__pyx_dict_cached_value;\\\n    (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\\\n}\nstatic PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);\n#else\n#define __Pyx_GetModuleGlobalName(var, name)  (var) = __Pyx__GetModuleGlobalName(name)\n#define __Pyx_GetModuleGlobalNameUncached(var, name)  (var) = __Pyx__GetModuleGlobalName(name)\nstatic CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);\n#endif\n\n/* PyObjectCallNoArg.proto */\n#if CYTHON_COMPILING_IN_CPYTHON\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);\n#else\n#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)\n#endif\n\n/* PyErrFetchRestore.proto */\n#if CYTHON_FAST_THREAD_STATE\n#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)\n#define __Pyx_ErrRestoreWithState(type, value, tb)  __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)\n#define __Pyx_ErrFetchWithState(type, value, tb)    __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)\n#define __Pyx_ErrRestore(type, value, tb)  __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)\n#define __Pyx_ErrFetch(type, value, tb)    __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)\nstatic CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);\nstatic CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);\n#if CYTHON_COMPILING_IN_CPYTHON\n#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))\n#else\n#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)\n#endif\n#else\n#define __Pyx_PyErr_Clear() PyErr_Clear()\n#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)\n#define __Pyx_ErrRestoreWithState(type, value, tb)  PyErr_Restore(type, value, tb)\n#define __Pyx_ErrFetchWithState(type, value, tb)  PyErr_Fetch(type, value, tb)\n#define __Pyx_ErrRestoreInState(tstate, type, value, tb)  PyErr_Restore(type, value, tb)\n#define __Pyx_ErrFetchInState(tstate, type, value, tb)  PyErr_Fetch(type, value, tb)\n#define __Pyx_ErrRestore(type, value, tb)  PyErr_Restore(type, value, tb)\n#define __Pyx_ErrFetch(type, value, tb)  PyErr_Fetch(type, value, tb)\n#endif\n\n/* RaiseException.proto */\nstatic void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);\n\n/* RaiseTooManyValuesToUnpack.proto */\nstatic CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);\n\n/* RaiseNeedMoreValuesToUnpack.proto */\nstatic CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);\n\n/* IterFinish.proto */\nstatic CYTHON_INLINE int __Pyx_IterFinish(void);\n\n/* UnpackItemEndCheck.proto */\nstatic int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);\n\n/* ListCompAppend.proto */\n#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS\nstatic CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {\n    PyListObject* L = (PyListObject*) list;\n    Py_ssize_t len = Py_SIZE(list);\n    if (likely(L->allocated > len)) {\n        Py_INCREF(x);\n        PyList_SET_ITEM(list, len, x);\n        __Pyx_SET_SIZE(list, len + 1);\n        return 0;\n    }\n    return PyList_Append(list, x);\n}\n#else\n#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)\n#endif\n\n/* ImportFrom.proto */\nstatic PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);\n\n/* CLineInTraceback.proto */\n#ifdef CYTHON_CLINE_IN_TRACEBACK\n#define __Pyx_CLineForTraceback(tstate, c_line)  (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)\n#else\nstatic int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);\n#endif\n\n/* CodeObjectCache.proto */\ntypedef struct {\n    PyCodeObject* code_object;\n    int code_line;\n} __Pyx_CodeObjectCacheEntry;\nstruct __Pyx_CodeObjectCache {\n    int count;\n    int max_count;\n    __Pyx_CodeObjectCacheEntry* entries;\n};\nstatic struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};\nstatic int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);\nstatic PyCodeObject *__pyx_find_code_object(int code_line);\nstatic void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);\n\n/* AddTraceback.proto */\nstatic void __Pyx_AddTraceback(const char *funcname, int c_line,\n                               int py_line, const char *filename);\n\n/* CppExceptionConversion.proto */\n#ifndef __Pyx_CppExn2PyErr\n#include <new>\n#include <typeinfo>\n#include <stdexcept>\n#include <ios>\nstatic void __Pyx_CppExn2PyErr() {\n  try {\n    if (PyErr_Occurred())\n      ; // let the latest Python exn pass through and ignore the current one\n    else\n      throw;\n  } catch (const std::bad_alloc& exn) {\n    PyErr_SetString(PyExc_MemoryError, exn.what());\n  } catch (const std::bad_cast& exn) {\n    PyErr_SetString(PyExc_TypeError, exn.what());\n  } catch (const std::bad_typeid& exn) {\n    PyErr_SetString(PyExc_TypeError, exn.what());\n  } catch (const std::domain_error& exn) {\n    PyErr_SetString(PyExc_ValueError, exn.what());\n  } catch (const std::invalid_argument& exn) {\n    PyErr_SetString(PyExc_ValueError, exn.what());\n  } catch (const std::ios_base::failure& exn) {\n    PyErr_SetString(PyExc_IOError, exn.what());\n  } catch (const std::out_of_range& exn) {\n    PyErr_SetString(PyExc_IndexError, exn.what());\n  } catch (const std::overflow_error& exn) {\n    PyErr_SetString(PyExc_OverflowError, exn.what());\n  } catch (const std::range_error& exn) {\n    PyErr_SetString(PyExc_ArithmeticError, exn.what());\n  } catch (const std::underflow_error& exn) {\n    PyErr_SetString(PyExc_ArithmeticError, exn.what());\n  } catch (const std::exception& exn) {\n    PyErr_SetString(PyExc_RuntimeError, exn.what());\n  }\n  catch (...)\n  {\n    PyErr_SetString(PyExc_RuntimeError, \"Unknown exception\");\n  }\n}\n#endif\n\n/* CIntToPy.proto */\nstatic CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);\n\n/* CIntFromPy.proto */\nstatic CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);\n\n/* CIntFromPy.proto */\nstatic CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *);\n\n/* CIntToPy.proto */\nstatic CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);\n\n/* CIntFromPy.proto */\nstatic CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);\n\n/* FastTypeChecks.proto */\n#if CYTHON_COMPILING_IN_CPYTHON\n#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)\nstatic CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);\nstatic CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);\nstatic CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);\n#else\n#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)\n#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)\n#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))\n#endif\n#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)\n\n/* CheckBinaryVersion.proto */\nstatic int __Pyx_check_binary_version(void);\n\n/* InitStrings.proto */\nstatic int __Pyx_InitStrings(__Pyx_StringTabEntry *t);\n\n\n/* Module declarations from 'libcpp' */\n\n/* Module declarations from 'libcpp.vector' */\n\n/* Module declarations from 'libcpp.utility' */\n\n/* Module declarations from 'libcpp.pair' */\n\n/* Module declarations from 'cpp_mstar' */\nstatic std::pair<int,int>  __pyx_convert_pair_from_py_int__and_int(PyObject *); /*proto*/\nstatic std::vector<std::pair<int,int> >  __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(PyObject *); /*proto*/\nstatic PyObject *__pyx_convert_pair_to_py_int____int(std::pair<int,int>  const &); /*proto*/\nstatic PyObject *__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(const std::vector<std::pair<int,int> >  &); /*proto*/\nstatic PyObject *__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(const std::vector<std::vector<std::pair<int,int> > >  &); /*proto*/\n#define __Pyx_MODULE_NAME \"cpp_mstar\"\nextern int __pyx_module_is_main_cpp_mstar;\nint __pyx_module_is_main_cpp_mstar = 0;\n\n/* Implementation of 'cpp_mstar' */\nstatic PyObject *__pyx_builtin_range;\nstatic const char __pyx_k_e[] = \"e\";\nstatic const char __pyx_k_i[] = \"i\";\nstatic const char __pyx_k_obs[] = \"obs\";\nstatic const char __pyx_k_row[] = \"row\";\nstatic const char __pyx_k_main[] = \"__main__\";\nstatic const char __pyx_k_name[] = \"__name__\";\nstatic const char __pyx_k_temp[] = \"temp\";\nstatic const char __pyx_k_test[] = \"__test__\";\nstatic const char __pyx_k_goals[] = \"goals\";\nstatic const char __pyx_k_range[] = \"range\";\nstatic const char __pyx_k_world[] = \"world\";\nstatic const char __pyx_k_import[] = \"__import__\";\nstatic const char __pyx_k_init_pos[] = \"init_pos\";\nstatic const char __pyx_k_resource[] = \"resource\";\nstatic const char __pyx_k_RLIMIT_AS[] = \"RLIMIT_AS\";\nstatic const char __pyx_k_cpp_mstar[] = \"cpp_mstar\";\nstatic const char __pyx_k_find_path[] = \"find_path\";\nstatic const char __pyx_k_inflation[] = \"inflation\";\nstatic const char __pyx_k_setrlimit[] = \"setrlimit\";\nstatic const char __pyx_k_time_limit[] = \"time_limit\";\nstatic const char __pyx_k_No_Solution[] = \"No Solution\";\nstatic const char __pyx_k_Out_of_Time[] = \"Out of Time\";\nstatic const char __pyx_k_OutOfTimeError[] = \"OutOfTimeError\";\nstatic const char __pyx_k_NoSolutionError[] = \"NoSolutionError\";\nstatic const char __pyx_k_cline_in_traceback[] = \"cline_in_traceback\";\nstatic const char __pyx_k_cython_od_mstar_pyx[] = \"cython_od_mstar.pyx\";\nstatic const char __pyx_k_od_mstar3_col_set_addition[] = \"od_mstar3.col_set_addition\";\nstatic PyObject *__pyx_n_s_NoSolutionError;\nstatic PyObject *__pyx_kp_s_No_Solution;\nstatic PyObject *__pyx_n_s_OutOfTimeError;\nstatic PyObject *__pyx_kp_s_Out_of_Time;\nstatic PyObject *__pyx_n_s_RLIMIT_AS;\nstatic PyObject *__pyx_n_s_cline_in_traceback;\nstatic PyObject *__pyx_n_s_cpp_mstar;\nstatic PyObject *__pyx_kp_s_cython_od_mstar_pyx;\nstatic PyObject *__pyx_n_s_e;\nstatic PyObject *__pyx_n_s_find_path;\nstatic PyObject *__pyx_n_s_goals;\nstatic PyObject *__pyx_n_s_i;\nstatic PyObject *__pyx_n_s_import;\nstatic PyObject *__pyx_n_s_inflation;\nstatic PyObject *__pyx_n_s_init_pos;\nstatic PyObject *__pyx_n_s_main;\nstatic PyObject *__pyx_n_s_name;\nstatic PyObject *__pyx_n_s_obs;\nstatic PyObject *__pyx_n_s_od_mstar3_col_set_addition;\nstatic PyObject *__pyx_n_s_range;\nstatic PyObject *__pyx_n_s_resource;\nstatic PyObject *__pyx_n_s_row;\nstatic PyObject *__pyx_n_s_setrlimit;\nstatic PyObject *__pyx_n_s_temp;\nstatic PyObject *__pyx_n_s_test;\nstatic PyObject *__pyx_n_s_time_limit;\nstatic PyObject *__pyx_n_s_world;\nstatic PyObject *__pyx_pf_9cpp_mstar_find_path(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_world, PyObject *__pyx_v_init_pos, PyObject *__pyx_v_goals, PyObject *__pyx_v_inflation, PyObject *__pyx_v_time_limit); /* proto */\nstatic PyObject *__pyx_int_1;\nstatic PyObject *__pyx_int_8589934592;\nstatic PyObject *__pyx_tuple_;\nstatic PyObject *__pyx_tuple__2;\nstatic PyObject *__pyx_codeobj__3;\n/* Late includes */\n\n/* \"cython_od_mstar.pyx\":16\n *         double inflation, int time_limit) except +\n * \n * def find_path(world, init_pos, goals, inflation, time_limit):             # <<<<<<<<<<<<<<\n *     \"\"\"Finds a path invoking C++ implementation\n * \n */\n\n/* Python wrapper */\nstatic PyObject *__pyx_pw_9cpp_mstar_1find_path(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/\nstatic char __pyx_doc_9cpp_mstar_find_path[] = \"Finds a path invoking C++ implementation\\n\\n    Uses recursive ODrM* to explore a 4 connected grid\\n\\n    world - matrix specifying obstacles, 1 for obstacle, 0 for free\\n    init_pos  - [[x, y], ...] specifying start position for each robot\\n    goals     - [[x, y], ...] specifying goal position for each robot\\n    inflation - inflation factor for heuristic\\n    time_limit - time until failure in seconds\\n\\n    returns:\\n    [[[x1, y1], ...], [[x2, y2], ...], ...] path in the joint\\n    configuration space\\n\\n    raises:\\n    NoSolutionError if problem has no solution\\n    OutOfTimeError if the planner ran out of time\\n    \";\nstatic PyMethodDef __pyx_mdef_9cpp_mstar_1find_path = {\"find_path\", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_9cpp_mstar_1find_path, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9cpp_mstar_find_path};\nstatic PyObject *__pyx_pw_9cpp_mstar_1find_path(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {\n  PyObject *__pyx_v_world = 0;\n  PyObject *__pyx_v_init_pos = 0;\n  PyObject *__pyx_v_goals = 0;\n  PyObject *__pyx_v_inflation = 0;\n  PyObject *__pyx_v_time_limit = 0;\n  int __pyx_lineno = 0;\n  const char *__pyx_filename = NULL;\n  int __pyx_clineno = 0;\n  PyObject *__pyx_r = 0;\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"find_path (wrapper)\", 0);\n  {\n    static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_world,&__pyx_n_s_init_pos,&__pyx_n_s_goals,&__pyx_n_s_inflation,&__pyx_n_s_time_limit,0};\n    PyObject* values[5] = {0,0,0,0,0};\n    if (unlikely(__pyx_kwds)) {\n      Py_ssize_t kw_args;\n      const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);\n      switch (pos_args) {\n        case  5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);\n        CYTHON_FALLTHROUGH;\n        case  4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);\n        CYTHON_FALLTHROUGH;\n        case  3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);\n        CYTHON_FALLTHROUGH;\n        case  2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);\n        CYTHON_FALLTHROUGH;\n        case  1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);\n        CYTHON_FALLTHROUGH;\n        case  0: break;\n        default: goto __pyx_L5_argtuple_error;\n      }\n      kw_args = PyDict_Size(__pyx_kwds);\n      switch (pos_args) {\n        case  0:\n        if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_world)) != 0)) kw_args--;\n        else goto __pyx_L5_argtuple_error;\n        CYTHON_FALLTHROUGH;\n        case  1:\n        if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_init_pos)) != 0)) kw_args--;\n        else {\n          __Pyx_RaiseArgtupleInvalid(\"find_path\", 1, 5, 5, 1); __PYX_ERR(0, 16, __pyx_L3_error)\n        }\n        CYTHON_FALLTHROUGH;\n        case  2:\n        if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_goals)) != 0)) kw_args--;\n        else {\n          __Pyx_RaiseArgtupleInvalid(\"find_path\", 1, 5, 5, 2); __PYX_ERR(0, 16, __pyx_L3_error)\n        }\n        CYTHON_FALLTHROUGH;\n        case  3:\n        if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inflation)) != 0)) kw_args--;\n        else {\n          __Pyx_RaiseArgtupleInvalid(\"find_path\", 1, 5, 5, 3); __PYX_ERR(0, 16, __pyx_L3_error)\n        }\n        CYTHON_FALLTHROUGH;\n        case  4:\n        if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_time_limit)) != 0)) kw_args--;\n        else {\n          __Pyx_RaiseArgtupleInvalid(\"find_path\", 1, 5, 5, 4); __PYX_ERR(0, 16, __pyx_L3_error)\n        }\n      }\n      if (unlikely(kw_args > 0)) {\n        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, \"find_path\") < 0)) __PYX_ERR(0, 16, __pyx_L3_error)\n      }\n    } else if (PyTuple_GET_SIZE(__pyx_args) != 5) {\n      goto __pyx_L5_argtuple_error;\n    } else {\n      values[0] = PyTuple_GET_ITEM(__pyx_args, 0);\n      values[1] = PyTuple_GET_ITEM(__pyx_args, 1);\n      values[2] = PyTuple_GET_ITEM(__pyx_args, 2);\n      values[3] = PyTuple_GET_ITEM(__pyx_args, 3);\n      values[4] = PyTuple_GET_ITEM(__pyx_args, 4);\n    }\n    __pyx_v_world = values[0];\n    __pyx_v_init_pos = values[1];\n    __pyx_v_goals = values[2];\n    __pyx_v_inflation = values[3];\n    __pyx_v_time_limit = values[4];\n  }\n  goto __pyx_L4_argument_unpacking_done;\n  __pyx_L5_argtuple_error:;\n  __Pyx_RaiseArgtupleInvalid(\"find_path\", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error)\n  __pyx_L3_error:;\n  __Pyx_AddTraceback(\"cpp_mstar.find_path\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n  __Pyx_RefNannyFinishContext();\n  return NULL;\n  __pyx_L4_argument_unpacking_done:;\n  __pyx_r = __pyx_pf_9cpp_mstar_find_path(__pyx_self, __pyx_v_world, __pyx_v_init_pos, __pyx_v_goals, __pyx_v_inflation, __pyx_v_time_limit);\n\n  /* function exit code */\n  __Pyx_RefNannyFinishContext();\n  return __pyx_r;\n}\n\nstatic PyObject *__pyx_pf_9cpp_mstar_find_path(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_world, PyObject *__pyx_v_init_pos, PyObject *__pyx_v_goals, PyObject *__pyx_v_inflation, PyObject *__pyx_v_time_limit) {\n  PyObject *__pyx_v_resource = NULL;\n  std::vector<std::vector<bool> >  __pyx_v_obs;\n  std::vector<bool>  __pyx_v_temp;\n  PyObject *__pyx_v_row = NULL;\n  PyObject *__pyx_v_i = NULL;\n  PyObject *__pyx_v_e = NULL;\n  PyObject *__pyx_r = NULL;\n  __Pyx_RefNannyDeclarations\n  PyObject *__pyx_t_1 = NULL;\n  PyObject *__pyx_t_2 = NULL;\n  PyObject *__pyx_t_3 = NULL;\n  PyObject *__pyx_t_4 = NULL;\n  int __pyx_t_5;\n  PyObject *__pyx_t_6 = NULL;\n  Py_ssize_t __pyx_t_7;\n  PyObject *(*__pyx_t_8)(PyObject *);\n  std::vector<bool>  __pyx_t_9;\n  Py_ssize_t __pyx_t_10;\n  PyObject *(*__pyx_t_11)(PyObject *);\n  bool __pyx_t_12;\n  PyObject *__pyx_t_13 = NULL;\n  PyObject *__pyx_t_14 = NULL;\n  PyObject *__pyx_t_15 = NULL;\n  std::vector<std::pair<int,int> >  __pyx_t_16;\n  std::vector<std::pair<int,int> >  __pyx_t_17;\n  double __pyx_t_18;\n  std::vector<std::vector<std::pair<int,int> > >  __pyx_t_19;\n  int __pyx_t_20;\n  PyObject *__pyx_t_21 = NULL;\n  int __pyx_lineno = 0;\n  const char *__pyx_filename = NULL;\n  int __pyx_clineno = 0;\n  __Pyx_RefNannySetupContext(\"find_path\", 0);\n\n  /* \"cython_od_mstar.pyx\":36\n *     \"\"\"\n * \n *     import resource             # <<<<<<<<<<<<<<\n *     resource.setrlimit(resource.RLIMIT_AS, (2**33,2**33)) # 8Gb\n * \n */\n  __pyx_t_1 = __Pyx_Import(__pyx_n_s_resource, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_1);\n  __pyx_v_resource = __pyx_t_1;\n  __pyx_t_1 = 0;\n\n  /* \"cython_od_mstar.pyx\":37\n * \n *     import resource\n *     resource.setrlimit(resource.RLIMIT_AS, (2**33,2**33)) # 8Gb             # <<<<<<<<<<<<<<\n * \n *     # convert to boolean.  For some reason coercion doesn't seem to\n */\n  __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_resource, __pyx_n_s_setrlimit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_2);\n  __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_resource, __pyx_n_s_RLIMIT_AS); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 37, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_3);\n  __pyx_t_4 = NULL;\n  __pyx_t_5 = 0;\n  if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {\n    __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);\n    if (likely(__pyx_t_4)) {\n      PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);\n      __Pyx_INCREF(__pyx_t_4);\n      __Pyx_INCREF(function);\n      __Pyx_DECREF_SET(__pyx_t_2, function);\n      __pyx_t_5 = 1;\n    }\n  }\n  #if CYTHON_FAST_PYCALL\n  if (PyFunction_Check(__pyx_t_2)) {\n    PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_tuple_};\n    __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error)\n    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;\n    __Pyx_GOTREF(__pyx_t_1);\n    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;\n  } else\n  #endif\n  #if CYTHON_FAST_PYCCALL\n  if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {\n    PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_tuple_};\n    __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error)\n    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;\n    __Pyx_GOTREF(__pyx_t_1);\n    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;\n  } else\n  #endif\n  {\n    __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 37, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_6);\n    if (__pyx_t_4) {\n      __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL;\n    }\n    __Pyx_GIVEREF(__pyx_t_3);\n    PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_t_3);\n    __Pyx_INCREF(__pyx_tuple_);\n    __Pyx_GIVEREF(__pyx_tuple_);\n    PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_tuple_);\n    __pyx_t_3 = 0;\n    __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_1);\n    __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;\n  }\n  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;\n  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;\n\n  /* \"cython_od_mstar.pyx\":43\n *     cdef vector[vector[bool]] obs\n *     cdef vector[bool] temp\n *     for row in world:             # <<<<<<<<<<<<<<\n *         temp = vector[bool]()\n *         for i in row:\n */\n  if (likely(PyList_CheckExact(__pyx_v_world)) || PyTuple_CheckExact(__pyx_v_world)) {\n    __pyx_t_1 = __pyx_v_world; __Pyx_INCREF(__pyx_t_1); __pyx_t_7 = 0;\n    __pyx_t_8 = NULL;\n  } else {\n    __pyx_t_7 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_world); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_1);\n    __pyx_t_8 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 43, __pyx_L1_error)\n  }\n  for (;;) {\n    if (likely(!__pyx_t_8)) {\n      if (likely(PyList_CheckExact(__pyx_t_1))) {\n        if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_1)) break;\n        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS\n        __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_2); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 43, __pyx_L1_error)\n        #else\n        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L1_error)\n        __Pyx_GOTREF(__pyx_t_2);\n        #endif\n      } else {\n        if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_1)) break;\n        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS\n        __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_2); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 43, __pyx_L1_error)\n        #else\n        __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L1_error)\n        __Pyx_GOTREF(__pyx_t_2);\n        #endif\n      }\n    } else {\n      __pyx_t_2 = __pyx_t_8(__pyx_t_1);\n      if (unlikely(!__pyx_t_2)) {\n        PyObject* exc_type = PyErr_Occurred();\n        if (exc_type) {\n          if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();\n          else __PYX_ERR(0, 43, __pyx_L1_error)\n        }\n        break;\n      }\n      __Pyx_GOTREF(__pyx_t_2);\n    }\n    __Pyx_XDECREF_SET(__pyx_v_row, __pyx_t_2);\n    __pyx_t_2 = 0;\n\n    /* \"cython_od_mstar.pyx\":44\n *     cdef vector[bool] temp\n *     for row in world:\n *         temp = vector[bool]()             # <<<<<<<<<<<<<<\n *         for i in row:\n *             temp.push_back(i == 1)\n */\n    try {\n      __pyx_t_9 = std::vector<bool> ();\n    } catch(...) {\n      __Pyx_CppExn2PyErr();\n      __PYX_ERR(0, 44, __pyx_L1_error)\n    }\n    __pyx_v_temp = __pyx_t_9;\n\n    /* \"cython_od_mstar.pyx\":45\n *     for row in world:\n *         temp = vector[bool]()\n *         for i in row:             # <<<<<<<<<<<<<<\n *             temp.push_back(i == 1)\n *         obs.push_back(temp)\n */\n    if (likely(PyList_CheckExact(__pyx_v_row)) || PyTuple_CheckExact(__pyx_v_row)) {\n      __pyx_t_2 = __pyx_v_row; __Pyx_INCREF(__pyx_t_2); __pyx_t_10 = 0;\n      __pyx_t_11 = NULL;\n    } else {\n      __pyx_t_10 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_row); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error)\n      __Pyx_GOTREF(__pyx_t_2);\n      __pyx_t_11 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 45, __pyx_L1_error)\n    }\n    for (;;) {\n      if (likely(!__pyx_t_11)) {\n        if (likely(PyList_CheckExact(__pyx_t_2))) {\n          if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_2)) break;\n          #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS\n          __pyx_t_6 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) __PYX_ERR(0, 45, __pyx_L1_error)\n          #else\n          __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 45, __pyx_L1_error)\n          __Pyx_GOTREF(__pyx_t_6);\n          #endif\n        } else {\n          if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_2)) break;\n          #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS\n          __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) __PYX_ERR(0, 45, __pyx_L1_error)\n          #else\n          __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 45, __pyx_L1_error)\n          __Pyx_GOTREF(__pyx_t_6);\n          #endif\n        }\n      } else {\n        __pyx_t_6 = __pyx_t_11(__pyx_t_2);\n        if (unlikely(!__pyx_t_6)) {\n          PyObject* exc_type = PyErr_Occurred();\n          if (exc_type) {\n            if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();\n            else __PYX_ERR(0, 45, __pyx_L1_error)\n          }\n          break;\n        }\n        __Pyx_GOTREF(__pyx_t_6);\n      }\n      __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6);\n      __pyx_t_6 = 0;\n\n      /* \"cython_od_mstar.pyx\":46\n *         temp = vector[bool]()\n *         for i in row:\n *             temp.push_back(i == 1)             # <<<<<<<<<<<<<<\n *         obs.push_back(temp)\n *     try:\n */\n      __pyx_t_6 = __Pyx_PyInt_EqObjC(__pyx_v_i, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 46, __pyx_L1_error)\n      __Pyx_GOTREF(__pyx_t_6);\n      __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_12 == ((bool)-1)) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L1_error)\n      __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;\n      try {\n        __pyx_v_temp.push_back(__pyx_t_12);\n      } catch(...) {\n        __Pyx_CppExn2PyErr();\n        __PYX_ERR(0, 46, __pyx_L1_error)\n      }\n\n      /* \"cython_od_mstar.pyx\":45\n *     for row in world:\n *         temp = vector[bool]()\n *         for i in row:             # <<<<<<<<<<<<<<\n *             temp.push_back(i == 1)\n *         obs.push_back(temp)\n */\n    }\n    __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;\n\n    /* \"cython_od_mstar.pyx\":47\n *         for i in row:\n *             temp.push_back(i == 1)\n *         obs.push_back(temp)             # <<<<<<<<<<<<<<\n *     try:\n *         return find_grid_path(obs, init_pos, goals, inflation, time_limit)\n */\n    try {\n      __pyx_v_obs.push_back(__pyx_v_temp);\n    } catch(...) {\n      __Pyx_CppExn2PyErr();\n      __PYX_ERR(0, 47, __pyx_L1_error)\n    }\n\n    /* \"cython_od_mstar.pyx\":43\n *     cdef vector[vector[bool]] obs\n *     cdef vector[bool] temp\n *     for row in world:             # <<<<<<<<<<<<<<\n *         temp = vector[bool]()\n *         for i in row:\n */\n  }\n  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;\n\n  /* \"cython_od_mstar.pyx\":48\n *             temp.push_back(i == 1)\n *         obs.push_back(temp)\n *     try:             # <<<<<<<<<<<<<<\n *         return find_grid_path(obs, init_pos, goals, inflation, time_limit)\n *     except Exception as e:\n */\n  {\n    __Pyx_PyThreadState_declare\n    __Pyx_PyThreadState_assign\n    __Pyx_ExceptionSave(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15);\n    __Pyx_XGOTREF(__pyx_t_13);\n    __Pyx_XGOTREF(__pyx_t_14);\n    __Pyx_XGOTREF(__pyx_t_15);\n    /*try:*/ {\n\n      /* \"cython_od_mstar.pyx\":49\n *         obs.push_back(temp)\n *     try:\n *         return find_grid_path(obs, init_pos, goals, inflation, time_limit)             # <<<<<<<<<<<<<<\n *     except Exception as e:\n *         if str(e) == \"Out of Time\":\n */\n      __Pyx_XDECREF(__pyx_r);\n      __pyx_t_16 = __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(__pyx_v_init_pos); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L7_error)\n      __pyx_t_17 = __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(__pyx_v_goals); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L7_error)\n      __pyx_t_18 = __pyx_PyFloat_AsDouble(__pyx_v_inflation); if (unlikely((__pyx_t_18 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L7_error)\n      __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_v_time_limit); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L7_error)\n      try {\n        __pyx_t_19 = mstar::find_grid_path(__pyx_v_obs, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_5);\n      } catch(...) {\n        __Pyx_CppExn2PyErr();\n        __PYX_ERR(0, 49, __pyx_L7_error)\n      }\n      __pyx_t_1 = __pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(__pyx_t_19); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 49, __pyx_L7_error)\n      __Pyx_GOTREF(__pyx_t_1);\n      __pyx_r = __pyx_t_1;\n      __pyx_t_1 = 0;\n      goto __pyx_L11_try_return;\n\n      /* \"cython_od_mstar.pyx\":48\n *             temp.push_back(i == 1)\n *         obs.push_back(temp)\n *     try:             # <<<<<<<<<<<<<<\n *         return find_grid_path(obs, init_pos, goals, inflation, time_limit)\n *     except Exception as e:\n */\n    }\n    __pyx_L7_error:;\n    __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;\n    __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;\n    __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;\n    __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;\n    __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;\n\n    /* \"cython_od_mstar.pyx\":50\n *     try:\n *         return find_grid_path(obs, init_pos, goals, inflation, time_limit)\n *     except Exception as e:             # <<<<<<<<<<<<<<\n *         if str(e) == \"Out of Time\":\n *             raise OutOfTimeError()\n */\n    __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));\n    if (__pyx_t_5) {\n      __Pyx_AddTraceback(\"cpp_mstar.find_path\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n      if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 50, __pyx_L9_except_error)\n      __Pyx_GOTREF(__pyx_t_1);\n      __Pyx_GOTREF(__pyx_t_2);\n      __Pyx_GOTREF(__pyx_t_6);\n      __Pyx_INCREF(__pyx_t_2);\n      __pyx_v_e = __pyx_t_2;\n\n      /* \"cython_od_mstar.pyx\":51\n *         return find_grid_path(obs, init_pos, goals, inflation, time_limit)\n *     except Exception as e:\n *         if str(e) == \"Out of Time\":             # <<<<<<<<<<<<<<\n *             raise OutOfTimeError()\n *         elif str(e) == \"No Solution\":\n */\n      __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyString_Type)), __pyx_v_e); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 51, __pyx_L9_except_error)\n      __Pyx_GOTREF(__pyx_t_3);\n      __pyx_t_20 = (__Pyx_PyString_Equals(__pyx_t_3, __pyx_kp_s_Out_of_Time, Py_EQ)); if (unlikely(__pyx_t_20 < 0)) __PYX_ERR(0, 51, __pyx_L9_except_error)\n      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;\n      if (unlikely(__pyx_t_20)) {\n\n        /* \"cython_od_mstar.pyx\":52\n *     except Exception as e:\n *         if str(e) == \"Out of Time\":\n *             raise OutOfTimeError()             # <<<<<<<<<<<<<<\n *         elif str(e) == \"No Solution\":\n *             raise NoSolutionError()\n */\n        __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_OutOfTimeError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 52, __pyx_L9_except_error)\n        __Pyx_GOTREF(__pyx_t_4);\n        __pyx_t_21 = NULL;\n        if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {\n          __pyx_t_21 = PyMethod_GET_SELF(__pyx_t_4);\n          if (likely(__pyx_t_21)) {\n            PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);\n            __Pyx_INCREF(__pyx_t_21);\n            __Pyx_INCREF(function);\n            __Pyx_DECREF_SET(__pyx_t_4, function);\n          }\n        }\n        __pyx_t_3 = (__pyx_t_21) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_21) : __Pyx_PyObject_CallNoArg(__pyx_t_4);\n        __Pyx_XDECREF(__pyx_t_21); __pyx_t_21 = 0;\n        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 52, __pyx_L9_except_error)\n        __Pyx_GOTREF(__pyx_t_3);\n        __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;\n        __Pyx_Raise(__pyx_t_3, 0, 0, 0);\n        __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;\n        __PYX_ERR(0, 52, __pyx_L9_except_error)\n\n        /* \"cython_od_mstar.pyx\":51\n *         return find_grid_path(obs, init_pos, goals, inflation, time_limit)\n *     except Exception as e:\n *         if str(e) == \"Out of Time\":             # <<<<<<<<<<<<<<\n *             raise OutOfTimeError()\n *         elif str(e) == \"No Solution\":\n */\n      }\n\n      /* \"cython_od_mstar.pyx\":53\n *         if str(e) == \"Out of Time\":\n *             raise OutOfTimeError()\n *         elif str(e) == \"No Solution\":             # <<<<<<<<<<<<<<\n *             raise NoSolutionError()\n *         else:\n */\n      __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyString_Type)), __pyx_v_e); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 53, __pyx_L9_except_error)\n      __Pyx_GOTREF(__pyx_t_3);\n      __pyx_t_20 = (__Pyx_PyString_Equals(__pyx_t_3, __pyx_kp_s_No_Solution, Py_EQ)); if (unlikely(__pyx_t_20 < 0)) __PYX_ERR(0, 53, __pyx_L9_except_error)\n      __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;\n      if (unlikely(__pyx_t_20)) {\n\n        /* \"cython_od_mstar.pyx\":54\n *             raise OutOfTimeError()\n *         elif str(e) == \"No Solution\":\n *             raise NoSolutionError()             # <<<<<<<<<<<<<<\n *         else:\n *             raise e\n */\n        __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_NoSolutionError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 54, __pyx_L9_except_error)\n        __Pyx_GOTREF(__pyx_t_4);\n        __pyx_t_21 = NULL;\n        if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {\n          __pyx_t_21 = PyMethod_GET_SELF(__pyx_t_4);\n          if (likely(__pyx_t_21)) {\n            PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);\n            __Pyx_INCREF(__pyx_t_21);\n            __Pyx_INCREF(function);\n            __Pyx_DECREF_SET(__pyx_t_4, function);\n          }\n        }\n        __pyx_t_3 = (__pyx_t_21) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_21) : __Pyx_PyObject_CallNoArg(__pyx_t_4);\n        __Pyx_XDECREF(__pyx_t_21); __pyx_t_21 = 0;\n        if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 54, __pyx_L9_except_error)\n        __Pyx_GOTREF(__pyx_t_3);\n        __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;\n        __Pyx_Raise(__pyx_t_3, 0, 0, 0);\n        __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;\n        __PYX_ERR(0, 54, __pyx_L9_except_error)\n\n        /* \"cython_od_mstar.pyx\":53\n *         if str(e) == \"Out of Time\":\n *             raise OutOfTimeError()\n *         elif str(e) == \"No Solution\":             # <<<<<<<<<<<<<<\n *             raise NoSolutionError()\n *         else:\n */\n      }\n\n      /* \"cython_od_mstar.pyx\":56\n *             raise NoSolutionError()\n *         else:\n *             raise e             # <<<<<<<<<<<<<<\n */\n      /*else*/ {\n        __Pyx_Raise(__pyx_v_e, 0, 0, 0);\n        __PYX_ERR(0, 56, __pyx_L9_except_error)\n      }\n    }\n    goto __pyx_L9_except_error;\n    __pyx_L9_except_error:;\n\n    /* \"cython_od_mstar.pyx\":48\n *             temp.push_back(i == 1)\n *         obs.push_back(temp)\n *     try:             # <<<<<<<<<<<<<<\n *         return find_grid_path(obs, init_pos, goals, inflation, time_limit)\n *     except Exception as e:\n */\n    __Pyx_XGIVEREF(__pyx_t_13);\n    __Pyx_XGIVEREF(__pyx_t_14);\n    __Pyx_XGIVEREF(__pyx_t_15);\n    __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15);\n    goto __pyx_L1_error;\n    __pyx_L11_try_return:;\n    __Pyx_XGIVEREF(__pyx_t_13);\n    __Pyx_XGIVEREF(__pyx_t_14);\n    __Pyx_XGIVEREF(__pyx_t_15);\n    __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15);\n    goto __pyx_L0;\n  }\n\n  /* \"cython_od_mstar.pyx\":16\n *         double inflation, int time_limit) except +\n * \n * def find_path(world, init_pos, goals, inflation, time_limit):             # <<<<<<<<<<<<<<\n *     \"\"\"Finds a path invoking C++ implementation\n * \n */\n\n  /* function exit code */\n  __pyx_L1_error:;\n  __Pyx_XDECREF(__pyx_t_1);\n  __Pyx_XDECREF(__pyx_t_2);\n  __Pyx_XDECREF(__pyx_t_3);\n  __Pyx_XDECREF(__pyx_t_4);\n  __Pyx_XDECREF(__pyx_t_6);\n  __Pyx_XDECREF(__pyx_t_21);\n  __Pyx_AddTraceback(\"cpp_mstar.find_path\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n  __pyx_r = NULL;\n  __pyx_L0:;\n  __Pyx_XDECREF(__pyx_v_resource);\n  __Pyx_XDECREF(__pyx_v_row);\n  __Pyx_XDECREF(__pyx_v_i);\n  __Pyx_XDECREF(__pyx_v_e);\n  __Pyx_XGIVEREF(__pyx_r);\n  __Pyx_RefNannyFinishContext();\n  return __pyx_r;\n}\n\n/* \"pair.from_py\":145\n * \n * @cname(\"__pyx_convert_pair_from_py_int__and_int\")\n * cdef pair[X,Y] __pyx_convert_pair_from_py_int__and_int(object o) except *:             # <<<<<<<<<<<<<<\n *     x, y = o\n *     return pair[X,Y](<X>x, <Y>y)\n */\n\nstatic std::pair<int,int>  __pyx_convert_pair_from_py_int__and_int(PyObject *__pyx_v_o) {\n  PyObject *__pyx_v_x = NULL;\n  PyObject *__pyx_v_y = NULL;\n  std::pair<int,int>  __pyx_r;\n  __Pyx_RefNannyDeclarations\n  PyObject *__pyx_t_1 = NULL;\n  PyObject *__pyx_t_2 = NULL;\n  PyObject *__pyx_t_3 = NULL;\n  PyObject *(*__pyx_t_4)(PyObject *);\n  int __pyx_t_5;\n  int __pyx_t_6;\n  int __pyx_lineno = 0;\n  const char *__pyx_filename = NULL;\n  int __pyx_clineno = 0;\n  __Pyx_RefNannySetupContext(\"__pyx_convert_pair_from_py_int__and_int\", 0);\n\n  /* \"pair.from_py\":146\n * @cname(\"__pyx_convert_pair_from_py_int__and_int\")\n * cdef pair[X,Y] __pyx_convert_pair_from_py_int__and_int(object o) except *:\n *     x, y = o             # <<<<<<<<<<<<<<\n *     return pair[X,Y](<X>x, <Y>y)\n * \n */\n  if ((likely(PyTuple_CheckExact(__pyx_v_o))) || (PyList_CheckExact(__pyx_v_o))) {\n    PyObject* sequence = __pyx_v_o;\n    Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);\n    if (unlikely(size != 2)) {\n      if (size > 2) __Pyx_RaiseTooManyValuesError(2);\n      else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);\n      __PYX_ERR(1, 146, __pyx_L1_error)\n    }\n    #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS\n    if (likely(PyTuple_CheckExact(sequence))) {\n      __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); \n      __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); \n    } else {\n      __pyx_t_1 = PyList_GET_ITEM(sequence, 0); \n      __pyx_t_2 = PyList_GET_ITEM(sequence, 1); \n    }\n    __Pyx_INCREF(__pyx_t_1);\n    __Pyx_INCREF(__pyx_t_2);\n    #else\n    __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 146, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_1);\n    __pyx_t_2 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 146, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_2);\n    #endif\n  } else {\n    Py_ssize_t index = -1;\n    __pyx_t_3 = PyObject_GetIter(__pyx_v_o); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 146, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_3);\n    __pyx_t_4 = Py_TYPE(__pyx_t_3)->tp_iternext;\n    index = 0; __pyx_t_1 = __pyx_t_4(__pyx_t_3); if (unlikely(!__pyx_t_1)) goto __pyx_L3_unpacking_failed;\n    __Pyx_GOTREF(__pyx_t_1);\n    index = 1; __pyx_t_2 = __pyx_t_4(__pyx_t_3); if (unlikely(!__pyx_t_2)) goto __pyx_L3_unpacking_failed;\n    __Pyx_GOTREF(__pyx_t_2);\n    if (__Pyx_IternextUnpackEndCheck(__pyx_t_4(__pyx_t_3), 2) < 0) __PYX_ERR(1, 146, __pyx_L1_error)\n    __pyx_t_4 = NULL;\n    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;\n    goto __pyx_L4_unpacking_done;\n    __pyx_L3_unpacking_failed:;\n    __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;\n    __pyx_t_4 = NULL;\n    if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);\n    __PYX_ERR(1, 146, __pyx_L1_error)\n    __pyx_L4_unpacking_done:;\n  }\n  __pyx_v_x = __pyx_t_1;\n  __pyx_t_1 = 0;\n  __pyx_v_y = __pyx_t_2;\n  __pyx_t_2 = 0;\n\n  /* \"pair.from_py\":147\n * cdef pair[X,Y] __pyx_convert_pair_from_py_int__and_int(object o) except *:\n *     x, y = o\n *     return pair[X,Y](<X>x, <Y>y)             # <<<<<<<<<<<<<<\n * \n * \n */\n  __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_v_x); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 147, __pyx_L1_error)\n  __pyx_t_6 = __Pyx_PyInt_As_int(__pyx_v_y); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 147, __pyx_L1_error)\n  __pyx_r = std::pair<int,int> (((int)__pyx_t_5), ((int)__pyx_t_6));\n  goto __pyx_L0;\n\n  /* \"pair.from_py\":145\n * \n * @cname(\"__pyx_convert_pair_from_py_int__and_int\")\n * cdef pair[X,Y] __pyx_convert_pair_from_py_int__and_int(object o) except *:             # <<<<<<<<<<<<<<\n *     x, y = o\n *     return pair[X,Y](<X>x, <Y>y)\n */\n\n  /* function exit code */\n  __pyx_L1_error:;\n  __Pyx_XDECREF(__pyx_t_1);\n  __Pyx_XDECREF(__pyx_t_2);\n  __Pyx_XDECREF(__pyx_t_3);\n  __Pyx_AddTraceback(\"pair.from_py.__pyx_convert_pair_from_py_int__and_int\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n  __Pyx_pretend_to_initialize(&__pyx_r);\n  __pyx_L0:;\n  __Pyx_XDECREF(__pyx_v_x);\n  __Pyx_XDECREF(__pyx_v_y);\n  __Pyx_RefNannyFinishContext();\n  return __pyx_r;\n}\n\n/* \"vector.from_py\":45\n * \n * @cname(\"__pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___\")\n * cdef vector[X] __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(object o) except *:             # <<<<<<<<<<<<<<\n *     cdef vector[X] v\n *     for item in o:\n */\n\nstatic std::vector<std::pair<int,int> >  __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(PyObject *__pyx_v_o) {\n  std::vector<std::pair<int,int> >  __pyx_v_v;\n  PyObject *__pyx_v_item = NULL;\n  std::vector<std::pair<int,int> >  __pyx_r;\n  __Pyx_RefNannyDeclarations\n  PyObject *__pyx_t_1 = NULL;\n  Py_ssize_t __pyx_t_2;\n  PyObject *(*__pyx_t_3)(PyObject *);\n  PyObject *__pyx_t_4 = NULL;\n  std::pair<int,int>  __pyx_t_5;\n  int __pyx_lineno = 0;\n  const char *__pyx_filename = NULL;\n  int __pyx_clineno = 0;\n  __Pyx_RefNannySetupContext(\"__pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___\", 0);\n\n  /* \"vector.from_py\":47\n * cdef vector[X] __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(object o) except *:\n *     cdef vector[X] v\n *     for item in o:             # <<<<<<<<<<<<<<\n *         v.push_back(<X>item)\n *     return v\n */\n  if (likely(PyList_CheckExact(__pyx_v_o)) || PyTuple_CheckExact(__pyx_v_o)) {\n    __pyx_t_1 = __pyx_v_o; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;\n    __pyx_t_3 = NULL;\n  } else {\n    __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_o); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 47, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_1);\n    __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 47, __pyx_L1_error)\n  }\n  for (;;) {\n    if (likely(!__pyx_t_3)) {\n      if (likely(PyList_CheckExact(__pyx_t_1))) {\n        if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break;\n        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS\n        __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 47, __pyx_L1_error)\n        #else\n        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 47, __pyx_L1_error)\n        __Pyx_GOTREF(__pyx_t_4);\n        #endif\n      } else {\n        if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;\n        #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS\n        __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 47, __pyx_L1_error)\n        #else\n        __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 47, __pyx_L1_error)\n        __Pyx_GOTREF(__pyx_t_4);\n        #endif\n      }\n    } else {\n      __pyx_t_4 = __pyx_t_3(__pyx_t_1);\n      if (unlikely(!__pyx_t_4)) {\n        PyObject* exc_type = PyErr_Occurred();\n        if (exc_type) {\n          if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();\n          else __PYX_ERR(1, 47, __pyx_L1_error)\n        }\n        break;\n      }\n      __Pyx_GOTREF(__pyx_t_4);\n    }\n    __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_4);\n    __pyx_t_4 = 0;\n\n    /* \"vector.from_py\":48\n *     cdef vector[X] v\n *     for item in o:\n *         v.push_back(<X>item)             # <<<<<<<<<<<<<<\n *     return v\n * \n */\n    __pyx_t_5 = __pyx_convert_pair_from_py_int__and_int(__pyx_v_item); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 48, __pyx_L1_error)\n    __pyx_v_v.push_back(((std::pair<int,int> )__pyx_t_5));\n\n    /* \"vector.from_py\":47\n * cdef vector[X] __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(object o) except *:\n *     cdef vector[X] v\n *     for item in o:             # <<<<<<<<<<<<<<\n *         v.push_back(<X>item)\n *     return v\n */\n  }\n  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;\n\n  /* \"vector.from_py\":49\n *     for item in o:\n *         v.push_back(<X>item)\n *     return v             # <<<<<<<<<<<<<<\n * \n * \n */\n  __pyx_r = __pyx_v_v;\n  goto __pyx_L0;\n\n  /* \"vector.from_py\":45\n * \n * @cname(\"__pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___\")\n * cdef vector[X] __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(object o) except *:             # <<<<<<<<<<<<<<\n *     cdef vector[X] v\n *     for item in o:\n */\n\n  /* function exit code */\n  __pyx_L1_error:;\n  __Pyx_XDECREF(__pyx_t_1);\n  __Pyx_XDECREF(__pyx_t_4);\n  __Pyx_AddTraceback(\"vector.from_py.__pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n  __Pyx_pretend_to_initialize(&__pyx_r);\n  __pyx_L0:;\n  __Pyx_XDECREF(__pyx_v_item);\n  __Pyx_RefNannyFinishContext();\n  return __pyx_r;\n}\n\n/* \"pair.to_py\":158\n * \n * @cname(\"__pyx_convert_pair_to_py_int____int\")\n * cdef object __pyx_convert_pair_to_py_int____int(const pair[X,Y]& p):             # <<<<<<<<<<<<<<\n *     return p.first, p.second\n * \n */\n\nstatic PyObject *__pyx_convert_pair_to_py_int____int(std::pair<int,int>  const &__pyx_v_p) {\n  PyObject *__pyx_r = NULL;\n  __Pyx_RefNannyDeclarations\n  PyObject *__pyx_t_1 = NULL;\n  PyObject *__pyx_t_2 = NULL;\n  PyObject *__pyx_t_3 = NULL;\n  int __pyx_lineno = 0;\n  const char *__pyx_filename = NULL;\n  int __pyx_clineno = 0;\n  __Pyx_RefNannySetupContext(\"__pyx_convert_pair_to_py_int____int\", 0);\n\n  /* \"pair.to_py\":159\n * @cname(\"__pyx_convert_pair_to_py_int____int\")\n * cdef object __pyx_convert_pair_to_py_int____int(const pair[X,Y]& p):\n *     return p.first, p.second             # <<<<<<<<<<<<<<\n * \n * \n */\n  __Pyx_XDECREF(__pyx_r);\n  __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_p.first); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 159, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_1);\n  __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_p.second); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 159, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_2);\n  __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 159, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_3);\n  __Pyx_GIVEREF(__pyx_t_1);\n  PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);\n  __Pyx_GIVEREF(__pyx_t_2);\n  PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);\n  __pyx_t_1 = 0;\n  __pyx_t_2 = 0;\n  __pyx_r = __pyx_t_3;\n  __pyx_t_3 = 0;\n  goto __pyx_L0;\n\n  /* \"pair.to_py\":158\n * \n * @cname(\"__pyx_convert_pair_to_py_int____int\")\n * cdef object __pyx_convert_pair_to_py_int____int(const pair[X,Y]& p):             # <<<<<<<<<<<<<<\n *     return p.first, p.second\n * \n */\n\n  /* function exit code */\n  __pyx_L1_error:;\n  __Pyx_XDECREF(__pyx_t_1);\n  __Pyx_XDECREF(__pyx_t_2);\n  __Pyx_XDECREF(__pyx_t_3);\n  __Pyx_AddTraceback(\"pair.to_py.__pyx_convert_pair_to_py_int____int\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n  __pyx_r = 0;\n  __pyx_L0:;\n  __Pyx_XGIVEREF(__pyx_r);\n  __Pyx_RefNannyFinishContext();\n  return __pyx_r;\n}\n\n/* \"vector.to_py\":60\n * \n * @cname(\"__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___\")\n * cdef object __pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(vector[X]& v):             # <<<<<<<<<<<<<<\n *     return [v[i] for i in range(v.size())]\n * \n */\n\nstatic PyObject *__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(const std::vector<std::pair<int,int> >  &__pyx_v_v) {\n  size_t __pyx_v_i;\n  PyObject *__pyx_r = NULL;\n  __Pyx_RefNannyDeclarations\n  PyObject *__pyx_t_1 = NULL;\n  size_t __pyx_t_2;\n  size_t __pyx_t_3;\n  size_t __pyx_t_4;\n  PyObject *__pyx_t_5 = NULL;\n  int __pyx_lineno = 0;\n  const char *__pyx_filename = NULL;\n  int __pyx_clineno = 0;\n  __Pyx_RefNannySetupContext(\"__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___\", 0);\n\n  /* \"vector.to_py\":61\n * @cname(\"__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___\")\n * cdef object __pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(vector[X]& v):\n *     return [v[i] for i in range(v.size())]             # <<<<<<<<<<<<<<\n * \n * \n */\n  __Pyx_XDECREF(__pyx_r);\n  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 61, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_1);\n  __pyx_t_2 = __pyx_v_v.size();\n  __pyx_t_3 = __pyx_t_2;\n  for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {\n    __pyx_v_i = __pyx_t_4;\n    __pyx_t_5 = __pyx_convert_pair_to_py_int____int((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 61, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_5);\n    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 61, __pyx_L1_error)\n    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;\n  }\n  __pyx_r = __pyx_t_1;\n  __pyx_t_1 = 0;\n  goto __pyx_L0;\n\n  /* \"vector.to_py\":60\n * \n * @cname(\"__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___\")\n * cdef object __pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(vector[X]& v):             # <<<<<<<<<<<<<<\n *     return [v[i] for i in range(v.size())]\n * \n */\n\n  /* function exit code */\n  __pyx_L1_error:;\n  __Pyx_XDECREF(__pyx_t_1);\n  __Pyx_XDECREF(__pyx_t_5);\n  __Pyx_AddTraceback(\"vector.to_py.__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n  __pyx_r = 0;\n  __pyx_L0:;\n  __Pyx_XGIVEREF(__pyx_r);\n  __Pyx_RefNannyFinishContext();\n  return __pyx_r;\n}\n\nstatic PyObject *__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(const std::vector<std::vector<std::pair<int,int> > >  &__pyx_v_v) {\n  size_t __pyx_v_i;\n  PyObject *__pyx_r = NULL;\n  __Pyx_RefNannyDeclarations\n  PyObject *__pyx_t_1 = NULL;\n  size_t __pyx_t_2;\n  size_t __pyx_t_3;\n  size_t __pyx_t_4;\n  PyObject *__pyx_t_5 = NULL;\n  int __pyx_lineno = 0;\n  const char *__pyx_filename = NULL;\n  int __pyx_clineno = 0;\n  __Pyx_RefNannySetupContext(\"__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___\", 0);\n\n  /* \"vector.to_py\":61\n * @cname(\"__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___\")\n * cdef object __pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(vector[X]& v):\n *     return [v[i] for i in range(v.size())]             # <<<<<<<<<<<<<<\n * \n * \n */\n  __Pyx_XDECREF(__pyx_r);\n  __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 61, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_1);\n  __pyx_t_2 = __pyx_v_v.size();\n  __pyx_t_3 = __pyx_t_2;\n  for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {\n    __pyx_v_i = __pyx_t_4;\n    __pyx_t_5 = __pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 61, __pyx_L1_error)\n    __Pyx_GOTREF(__pyx_t_5);\n    if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 61, __pyx_L1_error)\n    __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;\n  }\n  __pyx_r = __pyx_t_1;\n  __pyx_t_1 = 0;\n  goto __pyx_L0;\n\n  /* \"vector.to_py\":60\n * \n * @cname(\"__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___\")\n * cdef object __pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(vector[X]& v):             # <<<<<<<<<<<<<<\n *     return [v[i] for i in range(v.size())]\n * \n */\n\n  /* function exit code */\n  __pyx_L1_error:;\n  __Pyx_XDECREF(__pyx_t_1);\n  __Pyx_XDECREF(__pyx_t_5);\n  __Pyx_AddTraceback(\"vector.to_py.__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n  __pyx_r = 0;\n  __pyx_L0:;\n  __Pyx_XGIVEREF(__pyx_r);\n  __Pyx_RefNannyFinishContext();\n  return __pyx_r;\n}\n\nstatic PyMethodDef __pyx_methods[] = {\n  {0, 0, 0, 0}\n};\n\n#if PY_MAJOR_VERSION >= 3\n#if CYTHON_PEP489_MULTI_PHASE_INIT\nstatic PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/\nstatic int __pyx_pymod_exec_cpp_mstar(PyObject* module); /*proto*/\nstatic PyModuleDef_Slot __pyx_moduledef_slots[] = {\n  {Py_mod_create, (void*)__pyx_pymod_create},\n  {Py_mod_exec, (void*)__pyx_pymod_exec_cpp_mstar},\n  {0, NULL}\n};\n#endif\n\nstatic struct PyModuleDef __pyx_moduledef = {\n    PyModuleDef_HEAD_INIT,\n    \"cpp_mstar\",\n    0, /* m_doc */\n  #if CYTHON_PEP489_MULTI_PHASE_INIT\n    0, /* m_size */\n  #else\n    -1, /* m_size */\n  #endif\n    __pyx_methods /* m_methods */,\n  #if CYTHON_PEP489_MULTI_PHASE_INIT\n    __pyx_moduledef_slots, /* m_slots */\n  #else\n    NULL, /* m_reload */\n  #endif\n    NULL, /* m_traverse */\n    NULL, /* m_clear */\n    NULL /* m_free */\n};\n#endif\n#ifndef CYTHON_SMALL_CODE\n#if defined(__clang__)\n    #define CYTHON_SMALL_CODE\n#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))\n    #define CYTHON_SMALL_CODE __attribute__((cold))\n#else\n    #define CYTHON_SMALL_CODE\n#endif\n#endif\n\nstatic __Pyx_StringTabEntry __pyx_string_tab[] = {\n  {&__pyx_n_s_NoSolutionError, __pyx_k_NoSolutionError, sizeof(__pyx_k_NoSolutionError), 0, 0, 1, 1},\n  {&__pyx_kp_s_No_Solution, __pyx_k_No_Solution, sizeof(__pyx_k_No_Solution), 0, 0, 1, 0},\n  {&__pyx_n_s_OutOfTimeError, __pyx_k_OutOfTimeError, sizeof(__pyx_k_OutOfTimeError), 0, 0, 1, 1},\n  {&__pyx_kp_s_Out_of_Time, __pyx_k_Out_of_Time, sizeof(__pyx_k_Out_of_Time), 0, 0, 1, 0},\n  {&__pyx_n_s_RLIMIT_AS, __pyx_k_RLIMIT_AS, sizeof(__pyx_k_RLIMIT_AS), 0, 0, 1, 1},\n  {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},\n  {&__pyx_n_s_cpp_mstar, __pyx_k_cpp_mstar, sizeof(__pyx_k_cpp_mstar), 0, 0, 1, 1},\n  {&__pyx_kp_s_cython_od_mstar_pyx, __pyx_k_cython_od_mstar_pyx, sizeof(__pyx_k_cython_od_mstar_pyx), 0, 0, 1, 0},\n  {&__pyx_n_s_e, __pyx_k_e, sizeof(__pyx_k_e), 0, 0, 1, 1},\n  {&__pyx_n_s_find_path, __pyx_k_find_path, sizeof(__pyx_k_find_path), 0, 0, 1, 1},\n  {&__pyx_n_s_goals, __pyx_k_goals, sizeof(__pyx_k_goals), 0, 0, 1, 1},\n  {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},\n  {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},\n  {&__pyx_n_s_inflation, __pyx_k_inflation, sizeof(__pyx_k_inflation), 0, 0, 1, 1},\n  {&__pyx_n_s_init_pos, __pyx_k_init_pos, sizeof(__pyx_k_init_pos), 0, 0, 1, 1},\n  {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},\n  {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},\n  {&__pyx_n_s_obs, __pyx_k_obs, sizeof(__pyx_k_obs), 0, 0, 1, 1},\n  {&__pyx_n_s_od_mstar3_col_set_addition, __pyx_k_od_mstar3_col_set_addition, sizeof(__pyx_k_od_mstar3_col_set_addition), 0, 0, 1, 1},\n  {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},\n  {&__pyx_n_s_resource, __pyx_k_resource, sizeof(__pyx_k_resource), 0, 0, 1, 1},\n  {&__pyx_n_s_row, __pyx_k_row, sizeof(__pyx_k_row), 0, 0, 1, 1},\n  {&__pyx_n_s_setrlimit, __pyx_k_setrlimit, sizeof(__pyx_k_setrlimit), 0, 0, 1, 1},\n  {&__pyx_n_s_temp, __pyx_k_temp, sizeof(__pyx_k_temp), 0, 0, 1, 1},\n  {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},\n  {&__pyx_n_s_time_limit, __pyx_k_time_limit, sizeof(__pyx_k_time_limit), 0, 0, 1, 1},\n  {&__pyx_n_s_world, __pyx_k_world, sizeof(__pyx_k_world), 0, 0, 1, 1},\n  {0, 0, 0, 0, 0, 0, 0}\n};\nstatic CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {\n  __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 61, __pyx_L1_error)\n  return 0;\n  __pyx_L1_error:;\n  return -1;\n}\n\nstatic CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"__Pyx_InitCachedConstants\", 0);\n\n  /* \"cython_od_mstar.pyx\":37\n * \n *     import resource\n *     resource.setrlimit(resource.RLIMIT_AS, (2**33,2**33)) # 8Gb             # <<<<<<<<<<<<<<\n * \n *     # convert to boolean.  For some reason coercion doesn't seem to\n */\n  __pyx_tuple_ = PyTuple_Pack(2, __pyx_int_8589934592, __pyx_int_8589934592); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 37, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_tuple_);\n  __Pyx_GIVEREF(__pyx_tuple_);\n\n  /* \"cython_od_mstar.pyx\":16\n *         double inflation, int time_limit) except +\n * \n * def find_path(world, init_pos, goals, inflation, time_limit):             # <<<<<<<<<<<<<<\n *     \"\"\"Finds a path invoking C++ implementation\n * \n */\n  __pyx_tuple__2 = PyTuple_Pack(11, __pyx_n_s_world, __pyx_n_s_init_pos, __pyx_n_s_goals, __pyx_n_s_inflation, __pyx_n_s_time_limit, __pyx_n_s_resource, __pyx_n_s_obs, __pyx_n_s_temp, __pyx_n_s_row, __pyx_n_s_i, __pyx_n_s_e); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 16, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_tuple__2);\n  __Pyx_GIVEREF(__pyx_tuple__2);\n  __pyx_codeobj__3 = (PyObject*)__Pyx_PyCode_New(5, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__2, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_cython_od_mstar_pyx, __pyx_n_s_find_path, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__3)) __PYX_ERR(0, 16, __pyx_L1_error)\n  __Pyx_RefNannyFinishContext();\n  return 0;\n  __pyx_L1_error:;\n  __Pyx_RefNannyFinishContext();\n  return -1;\n}\n\nstatic CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {\n  if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);\n  __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)\n  __pyx_int_8589934592 = PyInt_FromString((char *)\"8589934592\", 0, 0); if (unlikely(!__pyx_int_8589934592)) __PYX_ERR(0, 1, __pyx_L1_error)\n  return 0;\n  __pyx_L1_error:;\n  return -1;\n}\n\nstatic CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/\nstatic CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/\nstatic CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/\nstatic CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/\nstatic CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/\nstatic CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/\nstatic CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/\n\nstatic int __Pyx_modinit_global_init_code(void) {\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"__Pyx_modinit_global_init_code\", 0);\n  /*--- Global init code ---*/\n  __Pyx_RefNannyFinishContext();\n  return 0;\n}\n\nstatic int __Pyx_modinit_variable_export_code(void) {\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"__Pyx_modinit_variable_export_code\", 0);\n  /*--- Variable export code ---*/\n  __Pyx_RefNannyFinishContext();\n  return 0;\n}\n\nstatic int __Pyx_modinit_function_export_code(void) {\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"__Pyx_modinit_function_export_code\", 0);\n  /*--- Function export code ---*/\n  __Pyx_RefNannyFinishContext();\n  return 0;\n}\n\nstatic int __Pyx_modinit_type_init_code(void) {\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"__Pyx_modinit_type_init_code\", 0);\n  /*--- Type init code ---*/\n  __Pyx_RefNannyFinishContext();\n  return 0;\n}\n\nstatic int __Pyx_modinit_type_import_code(void) {\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"__Pyx_modinit_type_import_code\", 0);\n  /*--- Type import code ---*/\n  __Pyx_RefNannyFinishContext();\n  return 0;\n}\n\nstatic int __Pyx_modinit_variable_import_code(void) {\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"__Pyx_modinit_variable_import_code\", 0);\n  /*--- Variable import code ---*/\n  __Pyx_RefNannyFinishContext();\n  return 0;\n}\n\nstatic int __Pyx_modinit_function_import_code(void) {\n  __Pyx_RefNannyDeclarations\n  __Pyx_RefNannySetupContext(\"__Pyx_modinit_function_import_code\", 0);\n  /*--- Function import code ---*/\n  __Pyx_RefNannyFinishContext();\n  return 0;\n}\n\n\n#ifndef CYTHON_NO_PYINIT_EXPORT\n#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC\n#elif PY_MAJOR_VERSION < 3\n#ifdef __cplusplus\n#define __Pyx_PyMODINIT_FUNC extern \"C\" void\n#else\n#define __Pyx_PyMODINIT_FUNC void\n#endif\n#else\n#ifdef __cplusplus\n#define __Pyx_PyMODINIT_FUNC extern \"C\" PyObject *\n#else\n#define __Pyx_PyMODINIT_FUNC PyObject *\n#endif\n#endif\n\n\n#if PY_MAJOR_VERSION < 3\n__Pyx_PyMODINIT_FUNC initcpp_mstar(void) CYTHON_SMALL_CODE; /*proto*/\n__Pyx_PyMODINIT_FUNC initcpp_mstar(void)\n#else\n__Pyx_PyMODINIT_FUNC PyInit_cpp_mstar(void) CYTHON_SMALL_CODE; /*proto*/\n__Pyx_PyMODINIT_FUNC PyInit_cpp_mstar(void)\n#if CYTHON_PEP489_MULTI_PHASE_INIT\n{\n  return PyModuleDef_Init(&__pyx_moduledef);\n}\nstatic CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {\n    #if PY_VERSION_HEX >= 0x030700A1\n    static PY_INT64_T main_interpreter_id = -1;\n    PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);\n    if (main_interpreter_id == -1) {\n        main_interpreter_id = current_id;\n        return (unlikely(current_id == -1)) ? -1 : 0;\n    } else if (unlikely(main_interpreter_id != current_id))\n    #else\n    static PyInterpreterState *main_interpreter = NULL;\n    PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;\n    if (!main_interpreter) {\n        main_interpreter = current_interpreter;\n    } else if (unlikely(main_interpreter != current_interpreter))\n    #endif\n    {\n        PyErr_SetString(\n            PyExc_ImportError,\n            \"Interpreter change detected - this module can only be loaded into one interpreter per process.\");\n        return -1;\n    }\n    return 0;\n}\nstatic CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {\n    PyObject *value = PyObject_GetAttrString(spec, from_name);\n    int result = 0;\n    if (likely(value)) {\n        if (allow_none || value != Py_None) {\n            result = PyDict_SetItemString(moddict, to_name, value);\n        }\n        Py_DECREF(value);\n    } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {\n        PyErr_Clear();\n    } else {\n        result = -1;\n    }\n    return result;\n}\nstatic CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {\n    PyObject *module = NULL, *moddict, *modname;\n    if (__Pyx_check_single_interpreter())\n        return NULL;\n    if (__pyx_m)\n        return __Pyx_NewRef(__pyx_m);\n    modname = PyObject_GetAttrString(spec, \"name\");\n    if (unlikely(!modname)) goto bad;\n    module = PyModule_NewObject(modname);\n    Py_DECREF(modname);\n    if (unlikely(!module)) goto bad;\n    moddict = PyModule_GetDict(module);\n    if (unlikely(!moddict)) goto bad;\n    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, \"loader\", \"__loader__\", 1) < 0)) goto bad;\n    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, \"origin\", \"__file__\", 1) < 0)) goto bad;\n    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, \"parent\", \"__package__\", 1) < 0)) goto bad;\n    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, \"submodule_search_locations\", \"__path__\", 0) < 0)) goto bad;\n    return module;\nbad:\n    Py_XDECREF(module);\n    return NULL;\n}\n\n\nstatic CYTHON_SMALL_CODE int __pyx_pymod_exec_cpp_mstar(PyObject *__pyx_pyinit_module)\n#endif\n#endif\n{\n  PyObject *__pyx_t_1 = NULL;\n  PyObject *__pyx_t_2 = NULL;\n  int __pyx_lineno = 0;\n  const char *__pyx_filename = NULL;\n  int __pyx_clineno = 0;\n  __Pyx_RefNannyDeclarations\n  #if CYTHON_PEP489_MULTI_PHASE_INIT\n  if (__pyx_m) {\n    if (__pyx_m == __pyx_pyinit_module) return 0;\n    PyErr_SetString(PyExc_RuntimeError, \"Module 'cpp_mstar' has already been imported. Re-initialisation is not supported.\");\n    return -1;\n  }\n  #elif PY_MAJOR_VERSION >= 3\n  if (__pyx_m) return __Pyx_NewRef(__pyx_m);\n  #endif\n  #if CYTHON_REFNANNY\n__Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"refnanny\");\nif (!__Pyx_RefNanny) {\n  PyErr_Clear();\n  __Pyx_RefNanny = __Pyx_RefNannyImportAPI(\"Cython.Runtime.refnanny\");\n  if (!__Pyx_RefNanny)\n      Py_FatalError(\"failed to import 'refnanny' module\");\n}\n#endif\n  __Pyx_RefNannySetupContext(\"__Pyx_PyMODINIT_FUNC PyInit_cpp_mstar(void)\", 0);\n  if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #ifdef __Pxy_PyFrame_Initialize_Offsets\n  __Pxy_PyFrame_Initialize_Offsets();\n  #endif\n  __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)\n  __pyx_empty_bytes = PyBytes_FromStringAndSize(\"\", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)\n  __pyx_empty_unicode = PyUnicode_FromStringAndSize(\"\", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)\n  #ifdef __Pyx_CyFunction_USED\n  if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n  #ifdef __Pyx_FusedFunction_USED\n  if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n  #ifdef __Pyx_Coroutine_USED\n  if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n  #ifdef __Pyx_Generator_USED\n  if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n  #ifdef __Pyx_AsyncGen_USED\n  if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n  #ifdef __Pyx_StopAsyncIteration_USED\n  if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n  /*--- Library function declarations ---*/\n  /*--- Threads initialization code ---*/\n  #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS\n  #ifdef WITH_THREAD /* Python build with threading support? */\n  PyEval_InitThreads();\n  #endif\n  #endif\n  /*--- Module creation code ---*/\n  #if CYTHON_PEP489_MULTI_PHASE_INIT\n  __pyx_m = __pyx_pyinit_module;\n  Py_INCREF(__pyx_m);\n  #else\n  #if PY_MAJOR_VERSION < 3\n  __pyx_m = Py_InitModule4(\"cpp_mstar\", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);\n  #else\n  __pyx_m = PyModule_Create(&__pyx_moduledef);\n  #endif\n  if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n  __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)\n  Py_INCREF(__pyx_d);\n  __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)\n  Py_INCREF(__pyx_b);\n  __pyx_cython_runtime = PyImport_AddModule((char *) \"cython_runtime\"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)\n  Py_INCREF(__pyx_cython_runtime);\n  if (PyObject_SetAttrString(__pyx_m, \"__builtins__\", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);\n  /*--- Initialize various global constants etc. ---*/\n  if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)\n  if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n  if (__pyx_module_is_main_cpp_mstar) {\n    if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  }\n  #if PY_MAJOR_VERSION >= 3\n  {\n    PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)\n    if (!PyDict_GetItemString(modules, \"cpp_mstar\")) {\n      if (unlikely(PyDict_SetItemString(modules, \"cpp_mstar\", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)\n    }\n  }\n  #endif\n  /*--- Builtin init code ---*/\n  if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  /*--- Constants init code ---*/\n  if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  /*--- Global type/function init code ---*/\n  (void)__Pyx_modinit_global_init_code();\n  (void)__Pyx_modinit_variable_export_code();\n  (void)__Pyx_modinit_function_export_code();\n  (void)__Pyx_modinit_type_init_code();\n  (void)__Pyx_modinit_type_import_code();\n  (void)__Pyx_modinit_variable_import_code();\n  (void)__Pyx_modinit_function_import_code();\n  /*--- Execution code ---*/\n  #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)\n  if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  #endif\n\n  /* \"cython_od_mstar.pyx\":7\n * from libcpp.pair cimport pair\n * \n * from od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError             # <<<<<<<<<<<<<<\n * \n * cdef extern from \"grid_planning.hpp\" namespace \"mstar\":\n */\n  __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_1);\n  __Pyx_INCREF(__pyx_n_s_OutOfTimeError);\n  __Pyx_GIVEREF(__pyx_n_s_OutOfTimeError);\n  PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_OutOfTimeError);\n  __Pyx_INCREF(__pyx_n_s_NoSolutionError);\n  __Pyx_GIVEREF(__pyx_n_s_NoSolutionError);\n  PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_NoSolutionError);\n  __pyx_t_2 = __Pyx_Import(__pyx_n_s_od_mstar3_col_set_addition, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_2);\n  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;\n  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_OutOfTimeError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_1);\n  if (PyDict_SetItem(__pyx_d, __pyx_n_s_OutOfTimeError, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error)\n  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;\n  __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_NoSolutionError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_1);\n  if (PyDict_SetItem(__pyx_d, __pyx_n_s_NoSolutionError, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error)\n  __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;\n  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;\n\n  /* \"cython_od_mstar.pyx\":16\n *         double inflation, int time_limit) except +\n * \n * def find_path(world, init_pos, goals, inflation, time_limit):             # <<<<<<<<<<<<<<\n *     \"\"\"Finds a path invoking C++ implementation\n * \n */\n  __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_9cpp_mstar_1find_path, NULL, __pyx_n_s_cpp_mstar); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_2);\n  if (PyDict_SetItem(__pyx_d, __pyx_n_s_find_path, __pyx_t_2) < 0) __PYX_ERR(0, 16, __pyx_L1_error)\n  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;\n\n  /* \"cython_od_mstar.pyx\":1\n * # distutils: language = c++             # <<<<<<<<<<<<<<\n * # distutils: sources = policy.cpp col_checker.cpp od_mstar.cpp grid_policy.cpp grid_planning.cpp\n * from libcpp cimport bool\n */\n  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)\n  __Pyx_GOTREF(__pyx_t_2);\n  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)\n  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;\n\n  /* \"vector.to_py\":60\n * \n * @cname(\"__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___\")\n * cdef object __pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(vector[X]& v):             # <<<<<<<<<<<<<<\n *     return [v[i] for i in range(v.size())]\n * \n */\n\n  /*--- Wrapped vars code ---*/\n\n  goto __pyx_L0;\n  __pyx_L1_error:;\n  __Pyx_XDECREF(__pyx_t_1);\n  __Pyx_XDECREF(__pyx_t_2);\n  if (__pyx_m) {\n    if (__pyx_d) {\n      __Pyx_AddTraceback(\"init cpp_mstar\", __pyx_clineno, __pyx_lineno, __pyx_filename);\n    }\n    Py_CLEAR(__pyx_m);\n  } else if (!PyErr_Occurred()) {\n    PyErr_SetString(PyExc_ImportError, \"init cpp_mstar\");\n  }\n  __pyx_L0:;\n  __Pyx_RefNannyFinishContext();\n  #if CYTHON_PEP489_MULTI_PHASE_INIT\n  return (__pyx_m != NULL) ? 0 : -1;\n  #elif PY_MAJOR_VERSION >= 3\n  return __pyx_m;\n  #else\n  return;\n  #endif\n}\n\n/* --- Runtime support code --- */\n/* Refnanny */\n#if CYTHON_REFNANNY\nstatic __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {\n    PyObject *m = NULL, *p = NULL;\n    void *r = NULL;\n    m = PyImport_ImportModule(modname);\n    if (!m) goto end;\n    p = PyObject_GetAttrString(m, \"RefNannyAPI\");\n    if (!p) goto end;\n    r = PyLong_AsVoidPtr(p);\nend:\n    Py_XDECREF(p);\n    Py_XDECREF(m);\n    return (__Pyx_RefNannyAPIStruct *)r;\n}\n#endif\n\n/* RaiseArgTupleInvalid */\nstatic void __Pyx_RaiseArgtupleInvalid(\n    const char* func_name,\n    int exact,\n    Py_ssize_t num_min,\n    Py_ssize_t num_max,\n    Py_ssize_t num_found)\n{\n    Py_ssize_t num_expected;\n    const char *more_or_less;\n    if (num_found < num_min) {\n        num_expected = num_min;\n        more_or_less = \"at least\";\n    } else {\n        num_expected = num_max;\n        more_or_less = \"at most\";\n    }\n    if (exact) {\n        more_or_less = \"exactly\";\n    }\n    PyErr_Format(PyExc_TypeError,\n                 \"%.200s() takes %.8s %\" CYTHON_FORMAT_SSIZE_T \"d positional argument%.1s (%\" CYTHON_FORMAT_SSIZE_T \"d given)\",\n                 func_name, more_or_less, num_expected,\n                 (num_expected == 1) ? \"\" : \"s\", num_found);\n}\n\n/* RaiseDoubleKeywords */\nstatic void __Pyx_RaiseDoubleKeywordsError(\n    const char* func_name,\n    PyObject* kw_name)\n{\n    PyErr_Format(PyExc_TypeError,\n        #if PY_MAJOR_VERSION >= 3\n        \"%s() got multiple values for keyword argument '%U'\", func_name, kw_name);\n        #else\n        \"%s() got multiple values for keyword argument '%s'\", func_name,\n        PyString_AsString(kw_name));\n        #endif\n}\n\n/* ParseKeywords */\nstatic int __Pyx_ParseOptionalKeywords(\n    PyObject *kwds,\n    PyObject **argnames[],\n    PyObject *kwds2,\n    PyObject *values[],\n    Py_ssize_t num_pos_args,\n    const char* function_name)\n{\n    PyObject *key = 0, *value = 0;\n    Py_ssize_t pos = 0;\n    PyObject*** name;\n    PyObject*** first_kw_arg = argnames + num_pos_args;\n    while (PyDict_Next(kwds, &pos, &key, &value)) {\n        name = first_kw_arg;\n        while (*name && (**name != key)) name++;\n        if (*name) {\n            values[name-argnames] = value;\n            continue;\n        }\n        name = first_kw_arg;\n        #if PY_MAJOR_VERSION < 3\n        if (likely(PyString_Check(key))) {\n            while (*name) {\n                if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))\n                        && _PyString_Eq(**name, key)) {\n                    values[name-argnames] = value;\n                    break;\n                }\n                name++;\n            }\n            if (*name) continue;\n            else {\n                PyObject*** argname = argnames;\n                while (argname != first_kw_arg) {\n                    if ((**argname == key) || (\n                            (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))\n                             && _PyString_Eq(**argname, key))) {\n                        goto arg_passed_twice;\n                    }\n                    argname++;\n                }\n            }\n        } else\n        #endif\n        if (likely(PyUnicode_Check(key))) {\n            while (*name) {\n                int cmp = (**name == key) ? 0 :\n                #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3\n                    (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :\n                #endif\n                    PyUnicode_Compare(**name, key);\n                if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;\n                if (cmp == 0) {\n                    values[name-argnames] = value;\n                    break;\n                }\n                name++;\n            }\n            if (*name) continue;\n            else {\n                PyObject*** argname = argnames;\n                while (argname != first_kw_arg) {\n                    int cmp = (**argname == key) ? 0 :\n                    #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3\n                        (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :\n                    #endif\n                        PyUnicode_Compare(**argname, key);\n                    if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;\n                    if (cmp == 0) goto arg_passed_twice;\n                    argname++;\n                }\n            }\n        } else\n            goto invalid_keyword_type;\n        if (kwds2) {\n            if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;\n        } else {\n            goto invalid_keyword;\n        }\n    }\n    return 0;\narg_passed_twice:\n    __Pyx_RaiseDoubleKeywordsError(function_name, key);\n    goto bad;\ninvalid_keyword_type:\n    PyErr_Format(PyExc_TypeError,\n        \"%.200s() keywords must be strings\", function_name);\n    goto bad;\ninvalid_keyword:\n    PyErr_Format(PyExc_TypeError,\n    #if PY_MAJOR_VERSION < 3\n        \"%.200s() got an unexpected keyword argument '%.200s'\",\n        function_name, PyString_AsString(key));\n    #else\n        \"%s() got an unexpected keyword argument '%U'\",\n        function_name, key);\n    #endif\nbad:\n    return -1;\n}\n\n/* PyObjectGetAttrStr */\n#if CYTHON_USE_TYPE_SLOTS\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {\n    PyTypeObject* tp = Py_TYPE(obj);\n    if (likely(tp->tp_getattro))\n        return tp->tp_getattro(obj, attr_name);\n#if PY_MAJOR_VERSION < 3\n    if (likely(tp->tp_getattr))\n        return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));\n#endif\n    return PyObject_GetAttr(obj, attr_name);\n}\n#endif\n\n/* Import */\nstatic PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {\n    PyObject *empty_list = 0;\n    PyObject *module = 0;\n    PyObject *global_dict = 0;\n    PyObject *empty_dict = 0;\n    PyObject *list;\n    #if PY_MAJOR_VERSION < 3\n    PyObject *py_import;\n    py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);\n    if (!py_import)\n        goto bad;\n    #endif\n    if (from_list)\n        list = from_list;\n    else {\n        empty_list = PyList_New(0);\n        if (!empty_list)\n            goto bad;\n        list = empty_list;\n    }\n    global_dict = PyModule_GetDict(__pyx_m);\n    if (!global_dict)\n        goto bad;\n    empty_dict = PyDict_New();\n    if (!empty_dict)\n        goto bad;\n    {\n        #if PY_MAJOR_VERSION >= 3\n        if (level == -1) {\n            if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {\n                module = PyImport_ImportModuleLevelObject(\n                    name, global_dict, empty_dict, list, 1);\n                if (!module) {\n                    if (!PyErr_ExceptionMatches(PyExc_ImportError))\n                        goto bad;\n                    PyErr_Clear();\n                }\n            }\n            level = 0;\n        }\n        #endif\n        if (!module) {\n            #if PY_MAJOR_VERSION < 3\n            PyObject *py_level = PyInt_FromLong(level);\n            if (!py_level)\n                goto bad;\n            module = PyObject_CallFunctionObjArgs(py_import,\n                name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);\n            Py_DECREF(py_level);\n            #else\n            module = PyImport_ImportModuleLevelObject(\n                name, global_dict, empty_dict, list, level);\n            #endif\n        }\n    }\nbad:\n    #if PY_MAJOR_VERSION < 3\n    Py_XDECREF(py_import);\n    #endif\n    Py_XDECREF(empty_list);\n    Py_XDECREF(empty_dict);\n    return module;\n}\n\n/* PyFunctionFastCall */\n#if CYTHON_FAST_PYCALL\nstatic PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,\n                                               PyObject *globals) {\n    PyFrameObject *f;\n    PyThreadState *tstate = __Pyx_PyThreadState_Current;\n    PyObject **fastlocals;\n    Py_ssize_t i;\n    PyObject *result;\n    assert(globals != NULL);\n    /* XXX Perhaps we should create a specialized\n       PyFrame_New() that doesn't take locals, but does\n       take builtins without sanity checking them.\n       */\n    assert(tstate != NULL);\n    f = PyFrame_New(tstate, co, globals, NULL);\n    if (f == NULL) {\n        return NULL;\n    }\n    fastlocals = __Pyx_PyFrame_GetLocalsplus(f);\n    for (i = 0; i < na; i++) {\n        Py_INCREF(*args);\n        fastlocals[i] = *args++;\n    }\n    result = PyEval_EvalFrameEx(f,0);\n    ++tstate->recursion_depth;\n    Py_DECREF(f);\n    --tstate->recursion_depth;\n    return result;\n}\n#if 1 || PY_VERSION_HEX < 0x030600B1\nstatic PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {\n    PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);\n    PyObject *globals = PyFunction_GET_GLOBALS(func);\n    PyObject *argdefs = PyFunction_GET_DEFAULTS(func);\n    PyObject *closure;\n#if PY_MAJOR_VERSION >= 3\n    PyObject *kwdefs;\n#endif\n    PyObject *kwtuple, **k;\n    PyObject **d;\n    Py_ssize_t nd;\n    Py_ssize_t nk;\n    PyObject *result;\n    assert(kwargs == NULL || PyDict_Check(kwargs));\n    nk = kwargs ? PyDict_Size(kwargs) : 0;\n    if (Py_EnterRecursiveCall((char*)\" while calling a Python object\")) {\n        return NULL;\n    }\n    if (\n#if PY_MAJOR_VERSION >= 3\n            co->co_kwonlyargcount == 0 &&\n#endif\n            likely(kwargs == NULL || nk == 0) &&\n            co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {\n        if (argdefs == NULL && co->co_argcount == nargs) {\n            result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);\n            goto done;\n        }\n        else if (nargs == 0 && argdefs != NULL\n                 && co->co_argcount == Py_SIZE(argdefs)) {\n            /* function called with no arguments, but all parameters have\n               a default value: use default values as arguments .*/\n            args = &PyTuple_GET_ITEM(argdefs, 0);\n            result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);\n            goto done;\n        }\n    }\n    if (kwargs != NULL) {\n        Py_ssize_t pos, i;\n        kwtuple = PyTuple_New(2 * nk);\n        if (kwtuple == NULL) {\n            result = NULL;\n            goto done;\n        }\n        k = &PyTuple_GET_ITEM(kwtuple, 0);\n        pos = i = 0;\n        while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {\n            Py_INCREF(k[i]);\n            Py_INCREF(k[i+1]);\n            i += 2;\n        }\n        nk = i / 2;\n    }\n    else {\n        kwtuple = NULL;\n        k = NULL;\n    }\n    closure = PyFunction_GET_CLOSURE(func);\n#if PY_MAJOR_VERSION >= 3\n    kwdefs = PyFunction_GET_KW_DEFAULTS(func);\n#endif\n    if (argdefs != NULL) {\n        d = &PyTuple_GET_ITEM(argdefs, 0);\n        nd = Py_SIZE(argdefs);\n    }\n    else {\n        d = NULL;\n        nd = 0;\n    }\n#if PY_MAJOR_VERSION >= 3\n    result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,\n                               args, (int)nargs,\n                               k, (int)nk,\n                               d, (int)nd, kwdefs, closure);\n#else\n    result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,\n                               args, (int)nargs,\n                               k, (int)nk,\n                               d, (int)nd, closure);\n#endif\n    Py_XDECREF(kwtuple);\ndone:\n    Py_LeaveRecursiveCall();\n    return result;\n}\n#endif\n#endif\n\n/* PyCFunctionFastCall */\n#if CYTHON_FAST_PYCCALL\nstatic CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {\n    PyCFunctionObject *func = (PyCFunctionObject*)func_obj;\n    PyCFunction meth = PyCFunction_GET_FUNCTION(func);\n    PyObject *self = PyCFunction_GET_SELF(func);\n    int flags = PyCFunction_GET_FLAGS(func);\n    assert(PyCFunction_Check(func));\n    assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));\n    assert(nargs >= 0);\n    assert(nargs == 0 || args != NULL);\n    /* _PyCFunction_FastCallDict() must not be called with an exception set,\n       because it may clear it (directly or indirectly) and so the\n       caller loses its exception */\n    assert(!PyErr_Occurred());\n    if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {\n        return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);\n    } else {\n        return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);\n    }\n}\n#endif\n\n/* PyObjectCall */\n#if CYTHON_COMPILING_IN_CPYTHON\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {\n    PyObject *result;\n    ternaryfunc call = func->ob_type->tp_call;\n    if (unlikely(!call))\n        return PyObject_Call(func, arg, kw);\n    if (unlikely(Py_EnterRecursiveCall((char*)\" while calling a Python object\")))\n        return NULL;\n    result = (*call)(func, arg, kw);\n    Py_LeaveRecursiveCall();\n    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {\n        PyErr_SetString(\n            PyExc_SystemError,\n            \"NULL result without error in PyObject_Call\");\n    }\n    return result;\n}\n#endif\n\n/* PyIntCompare */\nstatic CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) {\n    if (op1 == op2) {\n        Py_RETURN_TRUE;\n    }\n    #if PY_MAJOR_VERSION < 3\n    if (likely(PyInt_CheckExact(op1))) {\n        const long b = intval;\n        long a = PyInt_AS_LONG(op1);\n        if (a == b) Py_RETURN_TRUE; else Py_RETURN_FALSE;\n    }\n    #endif\n    #if CYTHON_USE_PYLONG_INTERNALS\n    if (likely(PyLong_CheckExact(op1))) {\n        int unequal;\n        unsigned long uintval;\n        Py_ssize_t size = Py_SIZE(op1);\n        const digit* digits = ((PyLongObject*)op1)->ob_digit;\n        if (intval == 0) {\n            if (size == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE;\n        } else if (intval < 0) {\n            if (size >= 0)\n                Py_RETURN_FALSE;\n            intval = -intval;\n            size = -size;\n        } else {\n            if (size <= 0)\n                Py_RETURN_FALSE;\n        }\n        uintval = (unsigned long) intval;\n#if PyLong_SHIFT * 4 < SIZEOF_LONG*8\n        if (uintval >> (PyLong_SHIFT * 4)) {\n            unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))\n                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));\n        } else\n#endif\n#if PyLong_SHIFT * 3 < SIZEOF_LONG*8\n        if (uintval >> (PyLong_SHIFT * 3)) {\n            unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))\n                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));\n        } else\n#endif\n#if PyLong_SHIFT * 2 < SIZEOF_LONG*8\n        if (uintval >> (PyLong_SHIFT * 2)) {\n            unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))\n                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));\n        } else\n#endif\n#if PyLong_SHIFT * 1 < SIZEOF_LONG*8\n        if (uintval >> (PyLong_SHIFT * 1)) {\n            unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))\n                 | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));\n        } else\n#endif\n            unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK));\n        if (unequal == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE;\n    }\n    #endif\n    if (PyFloat_CheckExact(op1)) {\n        const long b = intval;\n        double a = PyFloat_AS_DOUBLE(op1);\n        if ((double)a == (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE;\n    }\n    return (\n        PyObject_RichCompare(op1, op2, Py_EQ));\n}\n\n/* GetTopmostException */\n#if CYTHON_USE_EXC_INFO_STACK\nstatic _PyErr_StackItem *\n__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)\n{\n    _PyErr_StackItem *exc_info = tstate->exc_info;\n    while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&\n           exc_info->previous_item != NULL)\n    {\n        exc_info = exc_info->previous_item;\n    }\n    return exc_info;\n}\n#endif\n\n/* SaveResetException */\n#if CYTHON_FAST_THREAD_STATE\nstatic CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {\n    #if CYTHON_USE_EXC_INFO_STACK\n    _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);\n    *type = exc_info->exc_type;\n    *value = exc_info->exc_value;\n    *tb = exc_info->exc_traceback;\n    #else\n    *type = tstate->exc_type;\n    *value = tstate->exc_value;\n    *tb = tstate->exc_traceback;\n    #endif\n    Py_XINCREF(*type);\n    Py_XINCREF(*value);\n    Py_XINCREF(*tb);\n}\nstatic CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {\n    PyObject *tmp_type, *tmp_value, *tmp_tb;\n    #if CYTHON_USE_EXC_INFO_STACK\n    _PyErr_StackItem *exc_info = tstate->exc_info;\n    tmp_type = exc_info->exc_type;\n    tmp_value = exc_info->exc_value;\n    tmp_tb = exc_info->exc_traceback;\n    exc_info->exc_type = type;\n    exc_info->exc_value = value;\n    exc_info->exc_traceback = tb;\n    #else\n    tmp_type = tstate->exc_type;\n    tmp_value = tstate->exc_value;\n    tmp_tb = tstate->exc_traceback;\n    tstate->exc_type = type;\n    tstate->exc_value = value;\n    tstate->exc_traceback = tb;\n    #endif\n    Py_XDECREF(tmp_type);\n    Py_XDECREF(tmp_value);\n    Py_XDECREF(tmp_tb);\n}\n#endif\n\n/* PyErrExceptionMatches */\n#if CYTHON_FAST_THREAD_STATE\nstatic int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {\n    Py_ssize_t i, n;\n    n = PyTuple_GET_SIZE(tuple);\n#if PY_MAJOR_VERSION >= 3\n    for (i=0; i<n; i++) {\n        if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;\n    }\n#endif\n    for (i=0; i<n; i++) {\n        if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;\n    }\n    return 0;\n}\nstatic CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {\n    PyObject *exc_type = tstate->curexc_type;\n    if (exc_type == err) return 1;\n    if (unlikely(!exc_type)) return 0;\n    if (unlikely(PyTuple_Check(err)))\n        return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);\n    return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);\n}\n#endif\n\n/* GetException */\n#if CYTHON_FAST_THREAD_STATE\nstatic int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)\n#else\nstatic int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)\n#endif\n{\n    PyObject *local_type, *local_value, *local_tb;\n#if CYTHON_FAST_THREAD_STATE\n    PyObject *tmp_type, *tmp_value, *tmp_tb;\n    local_type = tstate->curexc_type;\n    local_value = tstate->curexc_value;\n    local_tb = tstate->curexc_traceback;\n    tstate->curexc_type = 0;\n    tstate->curexc_value = 0;\n    tstate->curexc_traceback = 0;\n#else\n    PyErr_Fetch(&local_type, &local_value, &local_tb);\n#endif\n    PyErr_NormalizeException(&local_type, &local_value, &local_tb);\n#if CYTHON_FAST_THREAD_STATE\n    if (unlikely(tstate->curexc_type))\n#else\n    if (unlikely(PyErr_Occurred()))\n#endif\n        goto bad;\n    #if PY_MAJOR_VERSION >= 3\n    if (local_tb) {\n        if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))\n            goto bad;\n    }\n    #endif\n    Py_XINCREF(local_tb);\n    Py_XINCREF(local_type);\n    Py_XINCREF(local_value);\n    *type = local_type;\n    *value = local_value;\n    *tb = local_tb;\n#if CYTHON_FAST_THREAD_STATE\n    #if CYTHON_USE_EXC_INFO_STACK\n    {\n        _PyErr_StackItem *exc_info = tstate->exc_info;\n        tmp_type = exc_info->exc_type;\n        tmp_value = exc_info->exc_value;\n        tmp_tb = exc_info->exc_traceback;\n        exc_info->exc_type = local_type;\n        exc_info->exc_value = local_value;\n        exc_info->exc_traceback = local_tb;\n    }\n    #else\n    tmp_type = tstate->exc_type;\n    tmp_value = tstate->exc_value;\n    tmp_tb = tstate->exc_traceback;\n    tstate->exc_type = local_type;\n    tstate->exc_value = local_value;\n    tstate->exc_traceback = local_tb;\n    #endif\n    Py_XDECREF(tmp_type);\n    Py_XDECREF(tmp_value);\n    Py_XDECREF(tmp_tb);\n#else\n    PyErr_SetExcInfo(local_type, local_value, local_tb);\n#endif\n    return 0;\nbad:\n    *type = 0;\n    *value = 0;\n    *tb = 0;\n    Py_XDECREF(local_type);\n    Py_XDECREF(local_value);\n    Py_XDECREF(local_tb);\n    return -1;\n}\n\n/* PyObjectCallMethO */\n#if CYTHON_COMPILING_IN_CPYTHON\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {\n    PyObject *self, *result;\n    PyCFunction cfunc;\n    cfunc = PyCFunction_GET_FUNCTION(func);\n    self = PyCFunction_GET_SELF(func);\n    if (unlikely(Py_EnterRecursiveCall((char*)\" while calling a Python object\")))\n        return NULL;\n    result = cfunc(self, arg);\n    Py_LeaveRecursiveCall();\n    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {\n        PyErr_SetString(\n            PyExc_SystemError,\n            \"NULL result without error in PyObject_Call\");\n    }\n    return result;\n}\n#endif\n\n/* PyObjectCallOneArg */\n#if CYTHON_COMPILING_IN_CPYTHON\nstatic PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {\n    PyObject *result;\n    PyObject *args = PyTuple_New(1);\n    if (unlikely(!args)) return NULL;\n    Py_INCREF(arg);\n    PyTuple_SET_ITEM(args, 0, arg);\n    result = __Pyx_PyObject_Call(func, args, NULL);\n    Py_DECREF(args);\n    return result;\n}\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {\n#if CYTHON_FAST_PYCALL\n    if (PyFunction_Check(func)) {\n        return __Pyx_PyFunction_FastCall(func, &arg, 1);\n    }\n#endif\n    if (likely(PyCFunction_Check(func))) {\n        if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {\n            return __Pyx_PyObject_CallMethO(func, arg);\n#if CYTHON_FAST_PYCCALL\n        } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {\n            return __Pyx_PyCFunction_FastCall(func, &arg, 1);\n#endif\n        }\n    }\n    return __Pyx__PyObject_CallOneArg(func, arg);\n}\n#else\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {\n    PyObject *result;\n    PyObject *args = PyTuple_Pack(1, arg);\n    if (unlikely(!args)) return NULL;\n    result = __Pyx_PyObject_Call(func, args, NULL);\n    Py_DECREF(args);\n    return result;\n}\n#endif\n\n/* BytesEquals */\nstatic CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {\n#if CYTHON_COMPILING_IN_PYPY\n    return PyObject_RichCompareBool(s1, s2, equals);\n#else\n    if (s1 == s2) {\n        return (equals == Py_EQ);\n    } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {\n        const char *ps1, *ps2;\n        Py_ssize_t length = PyBytes_GET_SIZE(s1);\n        if (length != PyBytes_GET_SIZE(s2))\n            return (equals == Py_NE);\n        ps1 = PyBytes_AS_STRING(s1);\n        ps2 = PyBytes_AS_STRING(s2);\n        if (ps1[0] != ps2[0]) {\n            return (equals == Py_NE);\n        } else if (length == 1) {\n            return (equals == Py_EQ);\n        } else {\n            int result;\n#if CYTHON_USE_UNICODE_INTERNALS\n            Py_hash_t hash1, hash2;\n            hash1 = ((PyBytesObject*)s1)->ob_shash;\n            hash2 = ((PyBytesObject*)s2)->ob_shash;\n            if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {\n                return (equals == Py_NE);\n            }\n#endif\n            result = memcmp(ps1, ps2, (size_t)length);\n            return (equals == Py_EQ) ? (result == 0) : (result != 0);\n        }\n    } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {\n        return (equals == Py_NE);\n    } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {\n        return (equals == Py_NE);\n    } else {\n        int result;\n        PyObject* py_result = PyObject_RichCompare(s1, s2, equals);\n        if (!py_result)\n            return -1;\n        result = __Pyx_PyObject_IsTrue(py_result);\n        Py_DECREF(py_result);\n        return result;\n    }\n#endif\n}\n\n/* UnicodeEquals */\nstatic CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {\n#if CYTHON_COMPILING_IN_PYPY\n    return PyObject_RichCompareBool(s1, s2, equals);\n#else\n#if PY_MAJOR_VERSION < 3\n    PyObject* owned_ref = NULL;\n#endif\n    int s1_is_unicode, s2_is_unicode;\n    if (s1 == s2) {\n        goto return_eq;\n    }\n    s1_is_unicode = PyUnicode_CheckExact(s1);\n    s2_is_unicode = PyUnicode_CheckExact(s2);\n#if PY_MAJOR_VERSION < 3\n    if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {\n        owned_ref = PyUnicode_FromObject(s2);\n        if (unlikely(!owned_ref))\n            return -1;\n        s2 = owned_ref;\n        s2_is_unicode = 1;\n    } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {\n        owned_ref = PyUnicode_FromObject(s1);\n        if (unlikely(!owned_ref))\n            return -1;\n        s1 = owned_ref;\n        s1_is_unicode = 1;\n    } else if (((!s2_is_unicode) & (!s1_is_unicode))) {\n        return __Pyx_PyBytes_Equals(s1, s2, equals);\n    }\n#endif\n    if (s1_is_unicode & s2_is_unicode) {\n        Py_ssize_t length;\n        int kind;\n        void *data1, *data2;\n        if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))\n            return -1;\n        length = __Pyx_PyUnicode_GET_LENGTH(s1);\n        if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {\n            goto return_ne;\n        }\n#if CYTHON_USE_UNICODE_INTERNALS\n        {\n            Py_hash_t hash1, hash2;\n        #if CYTHON_PEP393_ENABLED\n            hash1 = ((PyASCIIObject*)s1)->hash;\n            hash2 = ((PyASCIIObject*)s2)->hash;\n        #else\n            hash1 = ((PyUnicodeObject*)s1)->hash;\n            hash2 = ((PyUnicodeObject*)s2)->hash;\n        #endif\n            if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {\n                goto return_ne;\n            }\n        }\n#endif\n        kind = __Pyx_PyUnicode_KIND(s1);\n        if (kind != __Pyx_PyUnicode_KIND(s2)) {\n            goto return_ne;\n        }\n        data1 = __Pyx_PyUnicode_DATA(s1);\n        data2 = __Pyx_PyUnicode_DATA(s2);\n        if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {\n            goto return_ne;\n        } else if (length == 1) {\n            goto return_eq;\n        } else {\n            int result = memcmp(data1, data2, (size_t)(length * kind));\n            #if PY_MAJOR_VERSION < 3\n            Py_XDECREF(owned_ref);\n            #endif\n            return (equals == Py_EQ) ? (result == 0) : (result != 0);\n        }\n    } else if ((s1 == Py_None) & s2_is_unicode) {\n        goto return_ne;\n    } else if ((s2 == Py_None) & s1_is_unicode) {\n        goto return_ne;\n    } else {\n        int result;\n        PyObject* py_result = PyObject_RichCompare(s1, s2, equals);\n        #if PY_MAJOR_VERSION < 3\n        Py_XDECREF(owned_ref);\n        #endif\n        if (!py_result)\n            return -1;\n        result = __Pyx_PyObject_IsTrue(py_result);\n        Py_DECREF(py_result);\n        return result;\n    }\nreturn_eq:\n    #if PY_MAJOR_VERSION < 3\n    Py_XDECREF(owned_ref);\n    #endif\n    return (equals == Py_EQ);\nreturn_ne:\n    #if PY_MAJOR_VERSION < 3\n    Py_XDECREF(owned_ref);\n    #endif\n    return (equals == Py_NE);\n#endif\n}\n\n/* GetBuiltinName */\nstatic PyObject *__Pyx_GetBuiltinName(PyObject *name) {\n    PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);\n    if (unlikely(!result)) {\n        PyErr_Format(PyExc_NameError,\n#if PY_MAJOR_VERSION >= 3\n            \"name '%U' is not defined\", name);\n#else\n            \"name '%.200s' is not defined\", PyString_AS_STRING(name));\n#endif\n    }\n    return result;\n}\n\n/* PyDictVersioning */\n#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS\nstatic CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {\n    PyObject *dict = Py_TYPE(obj)->tp_dict;\n    return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;\n}\nstatic CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {\n    PyObject **dictptr = NULL;\n    Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;\n    if (offset) {\n#if CYTHON_COMPILING_IN_CPYTHON\n        dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);\n#else\n        dictptr = _PyObject_GetDictPtr(obj);\n#endif\n    }\n    return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;\n}\nstatic CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {\n    PyObject *dict = Py_TYPE(obj)->tp_dict;\n    if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))\n        return 0;\n    return obj_dict_version == __Pyx_get_object_dict_version(obj);\n}\n#endif\n\n/* GetModuleGlobalName */\n#if CYTHON_USE_DICT_VERSIONS\nstatic PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)\n#else\nstatic CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)\n#endif\n{\n    PyObject *result;\n#if !CYTHON_AVOID_BORROWED_REFS\n#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1\n    result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);\n    __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)\n    if (likely(result)) {\n        return __Pyx_NewRef(result);\n    } else if (unlikely(PyErr_Occurred())) {\n        return NULL;\n    }\n#else\n    result = PyDict_GetItem(__pyx_d, name);\n    __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)\n    if (likely(result)) {\n        return __Pyx_NewRef(result);\n    }\n#endif\n#else\n    result = PyObject_GetItem(__pyx_d, name);\n    __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)\n    if (likely(result)) {\n        return __Pyx_NewRef(result);\n    }\n    PyErr_Clear();\n#endif\n    return __Pyx_GetBuiltinName(name);\n}\n\n/* PyObjectCallNoArg */\n#if CYTHON_COMPILING_IN_CPYTHON\nstatic CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {\n#if CYTHON_FAST_PYCALL\n    if (PyFunction_Check(func)) {\n        return __Pyx_PyFunction_FastCall(func, NULL, 0);\n    }\n#endif\n#ifdef __Pyx_CyFunction_USED\n    if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))\n#else\n    if (likely(PyCFunction_Check(func)))\n#endif\n    {\n        if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {\n            return __Pyx_PyObject_CallMethO(func, NULL);\n        }\n    }\n    return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);\n}\n#endif\n\n/* PyErrFetchRestore */\n#if CYTHON_FAST_THREAD_STATE\nstatic CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {\n    PyObject *tmp_type, *tmp_value, *tmp_tb;\n    tmp_type = tstate->curexc_type;\n    tmp_value = tstate->curexc_value;\n    tmp_tb = tstate->curexc_traceback;\n    tstate->curexc_type = type;\n    tstate->curexc_value = value;\n    tstate->curexc_traceback = tb;\n    Py_XDECREF(tmp_type);\n    Py_XDECREF(tmp_value);\n    Py_XDECREF(tmp_tb);\n}\nstatic CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {\n    *type = tstate->curexc_type;\n    *value = tstate->curexc_value;\n    *tb = tstate->curexc_traceback;\n    tstate->curexc_type = 0;\n    tstate->curexc_value = 0;\n    tstate->curexc_traceback = 0;\n}\n#endif\n\n/* RaiseException */\n#if PY_MAJOR_VERSION < 3\nstatic void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,\n                        CYTHON_UNUSED PyObject *cause) {\n    __Pyx_PyThreadState_declare\n    Py_XINCREF(type);\n    if (!value || value == Py_None)\n        value = NULL;\n    else\n        Py_INCREF(value);\n    if (!tb || tb == Py_None)\n        tb = NULL;\n    else {\n        Py_INCREF(tb);\n        if (!PyTraceBack_Check(tb)) {\n            PyErr_SetString(PyExc_TypeError,\n                \"raise: arg 3 must be a traceback or None\");\n            goto raise_error;\n        }\n    }\n    if (PyType_Check(type)) {\n#if CYTHON_COMPILING_IN_PYPY\n        if (!value) {\n            Py_INCREF(Py_None);\n            value = Py_None;\n        }\n#endif\n        PyErr_NormalizeException(&type, &value, &tb);\n    } else {\n        if (value) {\n            PyErr_SetString(PyExc_TypeError,\n                \"instance exception may not have a separate value\");\n            goto raise_error;\n        }\n        value = type;\n        type = (PyObject*) Py_TYPE(type);\n        Py_INCREF(type);\n        if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {\n            PyErr_SetString(PyExc_TypeError,\n                \"raise: exception class must be a subclass of BaseException\");\n            goto raise_error;\n        }\n    }\n    __Pyx_PyThreadState_assign\n    __Pyx_ErrRestore(type, value, tb);\n    return;\nraise_error:\n    Py_XDECREF(value);\n    Py_XDECREF(type);\n    Py_XDECREF(tb);\n    return;\n}\n#else\nstatic void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {\n    PyObject* owned_instance = NULL;\n    if (tb == Py_None) {\n        tb = 0;\n    } else if (tb && !PyTraceBack_Check(tb)) {\n        PyErr_SetString(PyExc_TypeError,\n            \"raise: arg 3 must be a traceback or None\");\n        goto bad;\n    }\n    if (value == Py_None)\n        value = 0;\n    if (PyExceptionInstance_Check(type)) {\n        if (value) {\n            PyErr_SetString(PyExc_TypeError,\n                \"instance exception may not have a separate value\");\n            goto bad;\n        }\n        value = type;\n        type = (PyObject*) Py_TYPE(value);\n    } else if (PyExceptionClass_Check(type)) {\n        PyObject *instance_class = NULL;\n        if (value && PyExceptionInstance_Check(value)) {\n            instance_class = (PyObject*) Py_TYPE(value);\n            if (instance_class != type) {\n                int is_subclass = PyObject_IsSubclass(instance_class, type);\n                if (!is_subclass) {\n                    instance_class = NULL;\n                } else if (unlikely(is_subclass == -1)) {\n                    goto bad;\n                } else {\n                    type = instance_class;\n                }\n            }\n        }\n        if (!instance_class) {\n            PyObject *args;\n            if (!value)\n                args = PyTuple_New(0);\n            else if (PyTuple_Check(value)) {\n                Py_INCREF(value);\n                args = value;\n            } else\n                args = PyTuple_Pack(1, value);\n            if (!args)\n                goto bad;\n            owned_instance = PyObject_Call(type, args, NULL);\n            Py_DECREF(args);\n            if (!owned_instance)\n                goto bad;\n            value = owned_instance;\n            if (!PyExceptionInstance_Check(value)) {\n                PyErr_Format(PyExc_TypeError,\n                             \"calling %R should have returned an instance of \"\n                             \"BaseException, not %R\",\n                             type, Py_TYPE(value));\n                goto bad;\n            }\n        }\n    } else {\n        PyErr_SetString(PyExc_TypeError,\n            \"raise: exception class must be a subclass of BaseException\");\n        goto bad;\n    }\n    if (cause) {\n        PyObject *fixed_cause;\n        if (cause == Py_None) {\n            fixed_cause = NULL;\n        } else if (PyExceptionClass_Check(cause)) {\n            fixed_cause = PyObject_CallObject(cause, NULL);\n            if (fixed_cause == NULL)\n                goto bad;\n        } else if (PyExceptionInstance_Check(cause)) {\n            fixed_cause = cause;\n            Py_INCREF(fixed_cause);\n        } else {\n            PyErr_SetString(PyExc_TypeError,\n                            \"exception causes must derive from \"\n                            \"BaseException\");\n            goto bad;\n        }\n        PyException_SetCause(value, fixed_cause);\n    }\n    PyErr_SetObject(type, value);\n    if (tb) {\n#if CYTHON_COMPILING_IN_PYPY\n        PyObject *tmp_type, *tmp_value, *tmp_tb;\n        PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);\n        Py_INCREF(tb);\n        PyErr_Restore(tmp_type, tmp_value, tb);\n        Py_XDECREF(tmp_tb);\n#else\n        PyThreadState *tstate = __Pyx_PyThreadState_Current;\n        PyObject* tmp_tb = tstate->curexc_traceback;\n        if (tb != tmp_tb) {\n            Py_INCREF(tb);\n            tstate->curexc_traceback = tb;\n            Py_XDECREF(tmp_tb);\n        }\n#endif\n    }\nbad:\n    Py_XDECREF(owned_instance);\n    return;\n}\n#endif\n\n/* RaiseTooManyValuesToUnpack */\nstatic CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {\n    PyErr_Format(PyExc_ValueError,\n                 \"too many values to unpack (expected %\" CYTHON_FORMAT_SSIZE_T \"d)\", expected);\n}\n\n/* RaiseNeedMoreValuesToUnpack */\nstatic CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {\n    PyErr_Format(PyExc_ValueError,\n                 \"need more than %\" CYTHON_FORMAT_SSIZE_T \"d value%.1s to unpack\",\n                 index, (index == 1) ? \"\" : \"s\");\n}\n\n/* IterFinish */\nstatic CYTHON_INLINE int __Pyx_IterFinish(void) {\n#if CYTHON_FAST_THREAD_STATE\n    PyThreadState *tstate = __Pyx_PyThreadState_Current;\n    PyObject* exc_type = tstate->curexc_type;\n    if (unlikely(exc_type)) {\n        if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {\n            PyObject *exc_value, *exc_tb;\n            exc_value = tstate->curexc_value;\n            exc_tb = tstate->curexc_traceback;\n            tstate->curexc_type = 0;\n            tstate->curexc_value = 0;\n            tstate->curexc_traceback = 0;\n            Py_DECREF(exc_type);\n            Py_XDECREF(exc_value);\n            Py_XDECREF(exc_tb);\n            return 0;\n        } else {\n            return -1;\n        }\n    }\n    return 0;\n#else\n    if (unlikely(PyErr_Occurred())) {\n        if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {\n            PyErr_Clear();\n            return 0;\n        } else {\n            return -1;\n        }\n    }\n    return 0;\n#endif\n}\n\n/* UnpackItemEndCheck */\nstatic int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {\n    if (unlikely(retval)) {\n        Py_DECREF(retval);\n        __Pyx_RaiseTooManyValuesError(expected);\n        return -1;\n    } else {\n        return __Pyx_IterFinish();\n    }\n    return 0;\n}\n\n/* ImportFrom */\nstatic PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {\n    PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);\n    if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {\n        PyErr_Format(PyExc_ImportError,\n        #if PY_MAJOR_VERSION < 3\n            \"cannot import name %.230s\", PyString_AS_STRING(name));\n        #else\n            \"cannot import name %S\", name);\n        #endif\n    }\n    return value;\n}\n\n/* CLineInTraceback */\n#ifndef CYTHON_CLINE_IN_TRACEBACK\nstatic int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {\n    PyObject *use_cline;\n    PyObject *ptype, *pvalue, *ptraceback;\n#if CYTHON_COMPILING_IN_CPYTHON\n    PyObject **cython_runtime_dict;\n#endif\n    if (unlikely(!__pyx_cython_runtime)) {\n        return c_line;\n    }\n    __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);\n#if CYTHON_COMPILING_IN_CPYTHON\n    cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);\n    if (likely(cython_runtime_dict)) {\n        __PYX_PY_DICT_LOOKUP_IF_MODIFIED(\n            use_cline, *cython_runtime_dict,\n            __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))\n    } else\n#endif\n    {\n      PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);\n      if (use_cline_obj) {\n        use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;\n        Py_DECREF(use_cline_obj);\n      } else {\n        PyErr_Clear();\n        use_cline = NULL;\n      }\n    }\n    if (!use_cline) {\n        c_line = 0;\n        PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);\n    }\n    else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {\n        c_line = 0;\n    }\n    __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);\n    return c_line;\n}\n#endif\n\n/* CodeObjectCache */\nstatic int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {\n    int start = 0, mid = 0, end = count - 1;\n    if (end >= 0 && code_line > entries[end].code_line) {\n        return count;\n    }\n    while (start < end) {\n        mid = start + (end - start) / 2;\n        if (code_line < entries[mid].code_line) {\n            end = mid;\n        } else if (code_line > entries[mid].code_line) {\n             start = mid + 1;\n        } else {\n            return mid;\n        }\n    }\n    if (code_line <= entries[mid].code_line) {\n        return mid;\n    } else {\n        return mid + 1;\n    }\n}\nstatic PyCodeObject *__pyx_find_code_object(int code_line) {\n    PyCodeObject* code_object;\n    int pos;\n    if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {\n        return NULL;\n    }\n    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);\n    if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {\n        return NULL;\n    }\n    code_object = __pyx_code_cache.entries[pos].code_object;\n    Py_INCREF(code_object);\n    return code_object;\n}\nstatic void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {\n    int pos, i;\n    __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;\n    if (unlikely(!code_line)) {\n        return;\n    }\n    if (unlikely(!entries)) {\n        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));\n        if (likely(entries)) {\n            __pyx_code_cache.entries = entries;\n            __pyx_code_cache.max_count = 64;\n            __pyx_code_cache.count = 1;\n            entries[0].code_line = code_line;\n            entries[0].code_object = code_object;\n            Py_INCREF(code_object);\n        }\n        return;\n    }\n    pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);\n    if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {\n        PyCodeObject* tmp = entries[pos].code_object;\n        entries[pos].code_object = code_object;\n        Py_DECREF(tmp);\n        return;\n    }\n    if (__pyx_code_cache.count == __pyx_code_cache.max_count) {\n        int new_max = __pyx_code_cache.max_count + 64;\n        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(\n            __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));\n        if (unlikely(!entries)) {\n            return;\n        }\n        __pyx_code_cache.entries = entries;\n        __pyx_code_cache.max_count = new_max;\n    }\n    for (i=__pyx_code_cache.count; i>pos; i--) {\n        entries[i] = entries[i-1];\n    }\n    entries[pos].code_line = code_line;\n    entries[pos].code_object = code_object;\n    __pyx_code_cache.count++;\n    Py_INCREF(code_object);\n}\n\n/* AddTraceback */\n#include \"compile.h\"\n#include \"frameobject.h\"\n#include \"traceback.h\"\nstatic PyCodeObject* __Pyx_CreateCodeObjectForTraceback(\n            const char *funcname, int c_line,\n            int py_line, const char *filename) {\n    PyCodeObject *py_code = 0;\n    PyObject *py_srcfile = 0;\n    PyObject *py_funcname = 0;\n    #if PY_MAJOR_VERSION < 3\n    py_srcfile = PyString_FromString(filename);\n    #else\n    py_srcfile = PyUnicode_FromString(filename);\n    #endif\n    if (!py_srcfile) goto bad;\n    if (c_line) {\n        #if PY_MAJOR_VERSION < 3\n        py_funcname = PyString_FromFormat( \"%s (%s:%d)\", funcname, __pyx_cfilenm, c_line);\n        #else\n        py_funcname = PyUnicode_FromFormat( \"%s (%s:%d)\", funcname, __pyx_cfilenm, c_line);\n        #endif\n    }\n    else {\n        #if PY_MAJOR_VERSION < 3\n        py_funcname = PyString_FromString(funcname);\n        #else\n        py_funcname = PyUnicode_FromString(funcname);\n        #endif\n    }\n    if (!py_funcname) goto bad;\n    py_code = __Pyx_PyCode_New(\n        0,\n        0,\n        0,\n        0,\n        0,\n        __pyx_empty_bytes, /*PyObject *code,*/\n        __pyx_empty_tuple, /*PyObject *consts,*/\n        __pyx_empty_tuple, /*PyObject *names,*/\n        __pyx_empty_tuple, /*PyObject *varnames,*/\n        __pyx_empty_tuple, /*PyObject *freevars,*/\n        __pyx_empty_tuple, /*PyObject *cellvars,*/\n        py_srcfile,   /*PyObject *filename,*/\n        py_funcname,  /*PyObject *name,*/\n        py_line,\n        __pyx_empty_bytes  /*PyObject *lnotab*/\n    );\n    Py_DECREF(py_srcfile);\n    Py_DECREF(py_funcname);\n    return py_code;\nbad:\n    Py_XDECREF(py_srcfile);\n    Py_XDECREF(py_funcname);\n    return NULL;\n}\nstatic void __Pyx_AddTraceback(const char *funcname, int c_line,\n                               int py_line, const char *filename) {\n    PyCodeObject *py_code = 0;\n    PyFrameObject *py_frame = 0;\n    PyThreadState *tstate = __Pyx_PyThreadState_Current;\n    if (c_line) {\n        c_line = __Pyx_CLineForTraceback(tstate, c_line);\n    }\n    py_code = __pyx_find_code_object(c_line ? -c_line : py_line);\n    if (!py_code) {\n        py_code = __Pyx_CreateCodeObjectForTraceback(\n            funcname, c_line, py_line, filename);\n        if (!py_code) goto bad;\n        __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);\n    }\n    py_frame = PyFrame_New(\n        tstate,            /*PyThreadState *tstate,*/\n        py_code,           /*PyCodeObject *code,*/\n        __pyx_d,    /*PyObject *globals,*/\n        0                  /*PyObject *locals*/\n    );\n    if (!py_frame) goto bad;\n    __Pyx_PyFrame_SetLineNumber(py_frame, py_line);\n    PyTraceBack_Here(py_frame);\nbad:\n    Py_XDECREF(py_code);\n    Py_XDECREF(py_frame);\n}\n\n/* CIntFromPyVerify */\n#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\\\n    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)\n#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\\\n    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)\n#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\\\n    {\\\n        func_type value = func_value;\\\n        if (sizeof(target_type) < sizeof(func_type)) {\\\n            if (unlikely(value != (func_type) (target_type) value)) {\\\n                func_type zero = 0;\\\n                if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\\\n                    return (target_type) -1;\\\n                if (is_unsigned && unlikely(value < zero))\\\n                    goto raise_neg_overflow;\\\n                else\\\n                    goto raise_overflow;\\\n            }\\\n        }\\\n        return (target_type) value;\\\n    }\n\n/* CIntToPy */\nstatic CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {\n    const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;\n    const int is_unsigned = neg_one > const_zero;\n    if (is_unsigned) {\n        if (sizeof(int) < sizeof(long)) {\n            return PyInt_FromLong((long) value);\n        } else if (sizeof(int) <= sizeof(unsigned long)) {\n            return PyLong_FromUnsignedLong((unsigned long) value);\n#ifdef HAVE_LONG_LONG\n        } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {\n            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);\n#endif\n        }\n    } else {\n        if (sizeof(int) <= sizeof(long)) {\n            return PyInt_FromLong((long) value);\n#ifdef HAVE_LONG_LONG\n        } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {\n            return PyLong_FromLongLong((PY_LONG_LONG) value);\n#endif\n        }\n    }\n    {\n        int one = 1; int little = (int)*(unsigned char *)&one;\n        unsigned char *bytes = (unsigned char *)&value;\n        return _PyLong_FromByteArray(bytes, sizeof(int),\n                                     little, !is_unsigned);\n    }\n}\n\n/* CIntFromPy */\nstatic CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {\n    const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;\n    const int is_unsigned = neg_one > const_zero;\n#if PY_MAJOR_VERSION < 3\n    if (likely(PyInt_Check(x))) {\n        if (sizeof(int) < sizeof(long)) {\n            __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))\n        } else {\n            long val = PyInt_AS_LONG(x);\n            if (is_unsigned && unlikely(val < 0)) {\n                goto raise_neg_overflow;\n            }\n            return (int) val;\n        }\n    } else\n#endif\n    if (likely(PyLong_Check(x))) {\n        if (is_unsigned) {\n#if CYTHON_USE_PYLONG_INTERNALS\n            const digit* digits = ((PyLongObject*)x)->ob_digit;\n            switch (Py_SIZE(x)) {\n                case  0: return (int) 0;\n                case  1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])\n                case 2:\n                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {\n                            return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));\n                        }\n                    }\n                    break;\n                case 3:\n                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {\n                            return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));\n                        }\n                    }\n                    break;\n                case 4:\n                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {\n                            return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));\n                        }\n                    }\n                    break;\n            }\n#endif\n#if CYTHON_COMPILING_IN_CPYTHON\n            if (unlikely(Py_SIZE(x) < 0)) {\n                goto raise_neg_overflow;\n            }\n#else\n            {\n                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);\n                if (unlikely(result < 0))\n                    return (int) -1;\n                if (unlikely(result == 1))\n                    goto raise_neg_overflow;\n            }\n#endif\n            if (sizeof(int) <= sizeof(unsigned long)) {\n                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))\n#ifdef HAVE_LONG_LONG\n            } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {\n                __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))\n#endif\n            }\n        } else {\n#if CYTHON_USE_PYLONG_INTERNALS\n            const digit* digits = ((PyLongObject*)x)->ob_digit;\n            switch (Py_SIZE(x)) {\n                case  0: return (int) 0;\n                case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))\n                case  1: __PYX_VERIFY_RETURN_INT(int,  digit, +digits[0])\n                case -2:\n                    if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {\n                            return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));\n                        }\n                    }\n                    break;\n                case 2:\n                    if (8 * sizeof(int) > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {\n                            return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));\n                        }\n                    }\n                    break;\n                case -3:\n                    if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {\n                            return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));\n                        }\n                    }\n                    break;\n                case 3:\n                    if (8 * sizeof(int) > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {\n                            return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));\n                        }\n                    }\n                    break;\n                case -4:\n                    if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {\n                            return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));\n                        }\n                    }\n                    break;\n                case 4:\n                    if (8 * sizeof(int) > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {\n                            return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));\n                        }\n                    }\n                    break;\n            }\n#endif\n            if (sizeof(int) <= sizeof(long)) {\n                __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))\n#ifdef HAVE_LONG_LONG\n            } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {\n                __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))\n#endif\n            }\n        }\n        {\n#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)\n            PyErr_SetString(PyExc_RuntimeError,\n                            \"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers\");\n#else\n            int val;\n            PyObject *v = __Pyx_PyNumber_IntOrLong(x);\n #if PY_MAJOR_VERSION < 3\n            if (likely(v) && !PyLong_Check(v)) {\n                PyObject *tmp = v;\n                v = PyNumber_Long(tmp);\n                Py_DECREF(tmp);\n            }\n #endif\n            if (likely(v)) {\n                int one = 1; int is_little = (int)*(unsigned char *)&one;\n                unsigned char *bytes = (unsigned char *)&val;\n                int ret = _PyLong_AsByteArray((PyLongObject *)v,\n                                              bytes, sizeof(val),\n                                              is_little, !is_unsigned);\n                Py_DECREF(v);\n                if (likely(!ret))\n                    return val;\n            }\n#endif\n            return (int) -1;\n        }\n    } else {\n        int val;\n        PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);\n        if (!tmp) return (int) -1;\n        val = __Pyx_PyInt_As_int(tmp);\n        Py_DECREF(tmp);\n        return val;\n    }\nraise_overflow:\n    PyErr_SetString(PyExc_OverflowError,\n        \"value too large to convert to int\");\n    return (int) -1;\nraise_neg_overflow:\n    PyErr_SetString(PyExc_OverflowError,\n        \"can't convert negative value to int\");\n    return (int) -1;\n}\n\n/* CIntFromPy */\nstatic CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) {\n    const size_t neg_one = (size_t) ((size_t) 0 - (size_t) 1), const_zero = (size_t) 0;\n    const int is_unsigned = neg_one > const_zero;\n#if PY_MAJOR_VERSION < 3\n    if (likely(PyInt_Check(x))) {\n        if (sizeof(size_t) < sizeof(long)) {\n            __PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x))\n        } else {\n            long val = PyInt_AS_LONG(x);\n            if (is_unsigned && unlikely(val < 0)) {\n                goto raise_neg_overflow;\n            }\n            return (size_t) val;\n        }\n    } else\n#endif\n    if (likely(PyLong_Check(x))) {\n        if (is_unsigned) {\n#if CYTHON_USE_PYLONG_INTERNALS\n            const digit* digits = ((PyLongObject*)x)->ob_digit;\n            switch (Py_SIZE(x)) {\n                case  0: return (size_t) 0;\n                case  1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0])\n                case 2:\n                    if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) {\n                            return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n                        }\n                    }\n                    break;\n                case 3:\n                    if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) {\n                            return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n                        }\n                    }\n                    break;\n                case 4:\n                    if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) {\n                            return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n                        }\n                    }\n                    break;\n            }\n#endif\n#if CYTHON_COMPILING_IN_CPYTHON\n            if (unlikely(Py_SIZE(x) < 0)) {\n                goto raise_neg_overflow;\n            }\n#else\n            {\n                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);\n                if (unlikely(result < 0))\n                    return (size_t) -1;\n                if (unlikely(result == 1))\n                    goto raise_neg_overflow;\n            }\n#endif\n            if (sizeof(size_t) <= sizeof(unsigned long)) {\n                __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x))\n#ifdef HAVE_LONG_LONG\n            } else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) {\n                __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))\n#endif\n            }\n        } else {\n#if CYTHON_USE_PYLONG_INTERNALS\n            const digit* digits = ((PyLongObject*)x)->ob_digit;\n            switch (Py_SIZE(x)) {\n                case  0: return (size_t) 0;\n                case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0]))\n                case  1: __PYX_VERIFY_RETURN_INT(size_t,  digit, +digits[0])\n                case -2:\n                    if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {\n                            return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));\n                        }\n                    }\n                    break;\n                case 2:\n                    if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {\n                            return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));\n                        }\n                    }\n                    break;\n                case -3:\n                    if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {\n                            return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));\n                        }\n                    }\n                    break;\n                case 3:\n                    if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {\n                            return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));\n                        }\n                    }\n                    break;\n                case -4:\n                    if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) {\n                            return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));\n                        }\n                    }\n                    break;\n                case 4:\n                    if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) {\n                            return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));\n                        }\n                    }\n                    break;\n            }\n#endif\n            if (sizeof(size_t) <= sizeof(long)) {\n                __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x))\n#ifdef HAVE_LONG_LONG\n            } else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) {\n                __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x))\n#endif\n            }\n        }\n        {\n#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)\n            PyErr_SetString(PyExc_RuntimeError,\n                            \"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers\");\n#else\n            size_t val;\n            PyObject *v = __Pyx_PyNumber_IntOrLong(x);\n #if PY_MAJOR_VERSION < 3\n            if (likely(v) && !PyLong_Check(v)) {\n                PyObject *tmp = v;\n                v = PyNumber_Long(tmp);\n                Py_DECREF(tmp);\n            }\n #endif\n            if (likely(v)) {\n                int one = 1; int is_little = (int)*(unsigned char *)&one;\n                unsigned char *bytes = (unsigned char *)&val;\n                int ret = _PyLong_AsByteArray((PyLongObject *)v,\n                                              bytes, sizeof(val),\n                                              is_little, !is_unsigned);\n                Py_DECREF(v);\n                if (likely(!ret))\n                    return val;\n            }\n#endif\n            return (size_t) -1;\n        }\n    } else {\n        size_t val;\n        PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);\n        if (!tmp) return (size_t) -1;\n        val = __Pyx_PyInt_As_size_t(tmp);\n        Py_DECREF(tmp);\n        return val;\n    }\nraise_overflow:\n    PyErr_SetString(PyExc_OverflowError,\n        \"value too large to convert to size_t\");\n    return (size_t) -1;\nraise_neg_overflow:\n    PyErr_SetString(PyExc_OverflowError,\n        \"can't convert negative value to size_t\");\n    return (size_t) -1;\n}\n\n/* CIntToPy */\nstatic CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {\n    const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;\n    const int is_unsigned = neg_one > const_zero;\n    if (is_unsigned) {\n        if (sizeof(long) < sizeof(long)) {\n            return PyInt_FromLong((long) value);\n        } else if (sizeof(long) <= sizeof(unsigned long)) {\n            return PyLong_FromUnsignedLong((unsigned long) value);\n#ifdef HAVE_LONG_LONG\n        } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {\n            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);\n#endif\n        }\n    } else {\n        if (sizeof(long) <= sizeof(long)) {\n            return PyInt_FromLong((long) value);\n#ifdef HAVE_LONG_LONG\n        } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {\n            return PyLong_FromLongLong((PY_LONG_LONG) value);\n#endif\n        }\n    }\n    {\n        int one = 1; int little = (int)*(unsigned char *)&one;\n        unsigned char *bytes = (unsigned char *)&value;\n        return _PyLong_FromByteArray(bytes, sizeof(long),\n                                     little, !is_unsigned);\n    }\n}\n\n/* CIntFromPy */\nstatic CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {\n    const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;\n    const int is_unsigned = neg_one > const_zero;\n#if PY_MAJOR_VERSION < 3\n    if (likely(PyInt_Check(x))) {\n        if (sizeof(long) < sizeof(long)) {\n            __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))\n        } else {\n            long val = PyInt_AS_LONG(x);\n            if (is_unsigned && unlikely(val < 0)) {\n                goto raise_neg_overflow;\n            }\n            return (long) val;\n        }\n    } else\n#endif\n    if (likely(PyLong_Check(x))) {\n        if (is_unsigned) {\n#if CYTHON_USE_PYLONG_INTERNALS\n            const digit* digits = ((PyLongObject*)x)->ob_digit;\n            switch (Py_SIZE(x)) {\n                case  0: return (long) 0;\n                case  1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])\n                case 2:\n                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {\n                            return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));\n                        }\n                    }\n                    break;\n                case 3:\n                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {\n                            return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));\n                        }\n                    }\n                    break;\n                case 4:\n                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {\n                            return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));\n                        }\n                    }\n                    break;\n            }\n#endif\n#if CYTHON_COMPILING_IN_CPYTHON\n            if (unlikely(Py_SIZE(x) < 0)) {\n                goto raise_neg_overflow;\n            }\n#else\n            {\n                int result = PyObject_RichCompareBool(x, Py_False, Py_LT);\n                if (unlikely(result < 0))\n                    return (long) -1;\n                if (unlikely(result == 1))\n                    goto raise_neg_overflow;\n            }\n#endif\n            if (sizeof(long) <= sizeof(unsigned long)) {\n                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))\n#ifdef HAVE_LONG_LONG\n            } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {\n                __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))\n#endif\n            }\n        } else {\n#if CYTHON_USE_PYLONG_INTERNALS\n            const digit* digits = ((PyLongObject*)x)->ob_digit;\n            switch (Py_SIZE(x)) {\n                case  0: return (long) 0;\n                case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))\n                case  1: __PYX_VERIFY_RETURN_INT(long,  digit, +digits[0])\n                case -2:\n                    if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {\n                            return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));\n                        }\n                    }\n                    break;\n                case 2:\n                    if (8 * sizeof(long) > 1 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {\n                            return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));\n                        }\n                    }\n                    break;\n                case -3:\n                    if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {\n                            return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));\n                        }\n                    }\n                    break;\n                case 3:\n                    if (8 * sizeof(long) > 2 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {\n                            return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));\n                        }\n                    }\n                    break;\n                case -4:\n                    if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {\n                            return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));\n                        }\n                    }\n                    break;\n                case 4:\n                    if (8 * sizeof(long) > 3 * PyLong_SHIFT) {\n                        if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {\n                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))\n                        } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {\n                            return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));\n                        }\n                    }\n                    break;\n            }\n#endif\n            if (sizeof(long) <= sizeof(long)) {\n                __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))\n#ifdef HAVE_LONG_LONG\n            } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {\n                __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))\n#endif\n            }\n        }\n        {\n#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)\n            PyErr_SetString(PyExc_RuntimeError,\n                            \"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers\");\n#else\n            long val;\n            PyObject *v = __Pyx_PyNumber_IntOrLong(x);\n #if PY_MAJOR_VERSION < 3\n            if (likely(v) && !PyLong_Check(v)) {\n                PyObject *tmp = v;\n                v = PyNumber_Long(tmp);\n                Py_DECREF(tmp);\n            }\n #endif\n            if (likely(v)) {\n                int one = 1; int is_little = (int)*(unsigned char *)&one;\n                unsigned char *bytes = (unsigned char *)&val;\n                int ret = _PyLong_AsByteArray((PyLongObject *)v,\n                                              bytes, sizeof(val),\n                                              is_little, !is_unsigned);\n                Py_DECREF(v);\n                if (likely(!ret))\n                    return val;\n            }\n#endif\n            return (long) -1;\n        }\n    } else {\n        long val;\n        PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);\n        if (!tmp) return (long) -1;\n        val = __Pyx_PyInt_As_long(tmp);\n        Py_DECREF(tmp);\n        return val;\n    }\nraise_overflow:\n    PyErr_SetString(PyExc_OverflowError,\n        \"value too large to convert to long\");\n    return (long) -1;\nraise_neg_overflow:\n    PyErr_SetString(PyExc_OverflowError,\n        \"can't convert negative value to long\");\n    return (long) -1;\n}\n\n/* FastTypeChecks */\n#if CYTHON_COMPILING_IN_CPYTHON\nstatic int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {\n    while (a) {\n        a = a->tp_base;\n        if (a == b)\n            return 1;\n    }\n    return b == &PyBaseObject_Type;\n}\nstatic CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {\n    PyObject *mro;\n    if (a == b) return 1;\n    mro = a->tp_mro;\n    if (likely(mro)) {\n        Py_ssize_t i, n;\n        n = PyTuple_GET_SIZE(mro);\n        for (i = 0; i < n; i++) {\n            if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)\n                return 1;\n        }\n        return 0;\n    }\n    return __Pyx_InBases(a, b);\n}\n#if PY_MAJOR_VERSION == 2\nstatic int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {\n    PyObject *exception, *value, *tb;\n    int res;\n    __Pyx_PyThreadState_declare\n    __Pyx_PyThreadState_assign\n    __Pyx_ErrFetch(&exception, &value, &tb);\n    res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;\n    if (unlikely(res == -1)) {\n        PyErr_WriteUnraisable(err);\n        res = 0;\n    }\n    if (!res) {\n        res = PyObject_IsSubclass(err, exc_type2);\n        if (unlikely(res == -1)) {\n            PyErr_WriteUnraisable(err);\n            res = 0;\n        }\n    }\n    __Pyx_ErrRestore(exception, value, tb);\n    return res;\n}\n#else\nstatic CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {\n    int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;\n    if (!res) {\n        res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);\n    }\n    return res;\n}\n#endif\nstatic int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {\n    Py_ssize_t i, n;\n    assert(PyExceptionClass_Check(exc_type));\n    n = PyTuple_GET_SIZE(tuple);\n#if PY_MAJOR_VERSION >= 3\n    for (i=0; i<n; i++) {\n        if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;\n    }\n#endif\n    for (i=0; i<n; i++) {\n        PyObject *t = PyTuple_GET_ITEM(tuple, i);\n        #if PY_MAJOR_VERSION < 3\n        if (likely(exc_type == t)) return 1;\n        #endif\n        if (likely(PyExceptionClass_Check(t))) {\n            if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;\n        } else {\n        }\n    }\n    return 0;\n}\nstatic CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {\n    if (likely(err == exc_type)) return 1;\n    if (likely(PyExceptionClass_Check(err))) {\n        if (likely(PyExceptionClass_Check(exc_type))) {\n            return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);\n        } else if (likely(PyTuple_Check(exc_type))) {\n            return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);\n        } else {\n        }\n    }\n    return PyErr_GivenExceptionMatches(err, exc_type);\n}\nstatic CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {\n    assert(PyExceptionClass_Check(exc_type1));\n    assert(PyExceptionClass_Check(exc_type2));\n    if (likely(err == exc_type1 || err == exc_type2)) return 1;\n    if (likely(PyExceptionClass_Check(err))) {\n        return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);\n    }\n    return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));\n}\n#endif\n\n/* CheckBinaryVersion */\nstatic int __Pyx_check_binary_version(void) {\n    char ctversion[4], rtversion[4];\n    PyOS_snprintf(ctversion, 4, \"%d.%d\", PY_MAJOR_VERSION, PY_MINOR_VERSION);\n    PyOS_snprintf(rtversion, 4, \"%s\", Py_GetVersion());\n    if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {\n        char message[200];\n        PyOS_snprintf(message, sizeof(message),\n                      \"compiletime version %s of module '%.100s' \"\n                      \"does not match runtime version %s\",\n                      ctversion, __Pyx_MODULE_NAME, rtversion);\n        return PyErr_WarnEx(NULL, message, 1);\n    }\n    return 0;\n}\n\n/* InitStrings */\nstatic int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {\n    while (t->p) {\n        #if PY_MAJOR_VERSION < 3\n        if (t->is_unicode) {\n            *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);\n        } else if (t->intern) {\n            *t->p = PyString_InternFromString(t->s);\n        } else {\n            *t->p = PyString_FromStringAndSize(t->s, t->n - 1);\n        }\n        #else\n        if (t->is_unicode | t->is_str) {\n            if (t->intern) {\n                *t->p = PyUnicode_InternFromString(t->s);\n            } else if (t->encoding) {\n                *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);\n            } else {\n                *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);\n            }\n        } else {\n            *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);\n        }\n        #endif\n        if (!*t->p)\n            return -1;\n        if (PyObject_Hash(*t->p) == -1)\n            return -1;\n        ++t;\n    }\n    return 0;\n}\n\nstatic CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {\n    return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));\n}\nstatic CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {\n    Py_ssize_t ignore;\n    return __Pyx_PyObject_AsStringAndSize(o, &ignore);\n}\n#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT\n#if !CYTHON_PEP393_ENABLED\nstatic const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {\n    char* defenc_c;\n    PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);\n    if (!defenc) return NULL;\n    defenc_c = PyBytes_AS_STRING(defenc);\n#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII\n    {\n        char* end = defenc_c + PyBytes_GET_SIZE(defenc);\n        char* c;\n        for (c = defenc_c; c < end; c++) {\n            if ((unsigned char) (*c) >= 128) {\n                PyUnicode_AsASCIIString(o);\n                return NULL;\n            }\n        }\n    }\n#endif\n    *length = PyBytes_GET_SIZE(defenc);\n    return defenc_c;\n}\n#else\nstatic CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {\n    if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;\n#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII\n    if (likely(PyUnicode_IS_ASCII(o))) {\n        *length = PyUnicode_GET_LENGTH(o);\n        return PyUnicode_AsUTF8(o);\n    } else {\n        PyUnicode_AsASCIIString(o);\n        return NULL;\n    }\n#else\n    return PyUnicode_AsUTF8AndSize(o, length);\n#endif\n}\n#endif\n#endif\nstatic CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {\n#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT\n    if (\n#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII\n            __Pyx_sys_getdefaultencoding_not_ascii &&\n#endif\n            PyUnicode_Check(o)) {\n        return __Pyx_PyUnicode_AsStringAndSize(o, length);\n    } else\n#endif\n#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))\n    if (PyByteArray_Check(o)) {\n        *length = PyByteArray_GET_SIZE(o);\n        return PyByteArray_AS_STRING(o);\n    } else\n#endif\n    {\n        char* result;\n        int r = PyBytes_AsStringAndSize(o, &result, length);\n        if (unlikely(r < 0)) {\n            return NULL;\n        } else {\n            return result;\n        }\n    }\n}\nstatic CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {\n   int is_true = x == Py_True;\n   if (is_true | (x == Py_False) | (x == Py_None)) return is_true;\n   else return PyObject_IsTrue(x);\n}\nstatic CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {\n    int retval;\n    if (unlikely(!x)) return -1;\n    retval = __Pyx_PyObject_IsTrue(x);\n    Py_DECREF(x);\n    return retval;\n}\nstatic PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {\n#if PY_MAJOR_VERSION >= 3\n    if (PyLong_Check(result)) {\n        if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,\n                \"__int__ returned non-int (type %.200s).  \"\n                \"The ability to return an instance of a strict subclass of int \"\n                \"is deprecated, and may be removed in a future version of Python.\",\n                Py_TYPE(result)->tp_name)) {\n            Py_DECREF(result);\n            return NULL;\n        }\n        return result;\n    }\n#endif\n    PyErr_Format(PyExc_TypeError,\n                 \"__%.4s__ returned non-%.4s (type %.200s)\",\n                 type_name, type_name, Py_TYPE(result)->tp_name);\n    Py_DECREF(result);\n    return NULL;\n}\nstatic CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {\n#if CYTHON_USE_TYPE_SLOTS\n  PyNumberMethods *m;\n#endif\n  const char *name = NULL;\n  PyObject *res = NULL;\n#if PY_MAJOR_VERSION < 3\n  if (likely(PyInt_Check(x) || PyLong_Check(x)))\n#else\n  if (likely(PyLong_Check(x)))\n#endif\n    return __Pyx_NewRef(x);\n#if CYTHON_USE_TYPE_SLOTS\n  m = Py_TYPE(x)->tp_as_number;\n  #if PY_MAJOR_VERSION < 3\n  if (m && m->nb_int) {\n    name = \"int\";\n    res = m->nb_int(x);\n  }\n  else if (m && m->nb_long) {\n    name = \"long\";\n    res = m->nb_long(x);\n  }\n  #else\n  if (likely(m && m->nb_int)) {\n    name = \"int\";\n    res = m->nb_int(x);\n  }\n  #endif\n#else\n  if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {\n    res = PyNumber_Int(x);\n  }\n#endif\n  if (likely(res)) {\n#if PY_MAJOR_VERSION < 3\n    if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {\n#else\n    if (unlikely(!PyLong_CheckExact(res))) {\n#endif\n        return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);\n    }\n  }\n  else if (!PyErr_Occurred()) {\n    PyErr_SetString(PyExc_TypeError,\n                    \"an integer is required\");\n  }\n  return res;\n}\nstatic CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {\n  Py_ssize_t ival;\n  PyObject *x;\n#if PY_MAJOR_VERSION < 3\n  if (likely(PyInt_CheckExact(b))) {\n    if (sizeof(Py_ssize_t) >= sizeof(long))\n        return PyInt_AS_LONG(b);\n    else\n        return PyInt_AsSsize_t(b);\n  }\n#endif\n  if (likely(PyLong_CheckExact(b))) {\n    #if CYTHON_USE_PYLONG_INTERNALS\n    const digit* digits = ((PyLongObject*)b)->ob_digit;\n    const Py_ssize_t size = Py_SIZE(b);\n    if (likely(__Pyx_sst_abs(size) <= 1)) {\n        ival = likely(size) ? digits[0] : 0;\n        if (size == -1) ival = -ival;\n        return ival;\n    } else {\n      switch (size) {\n         case 2:\n           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {\n             return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n           }\n           break;\n         case -2:\n           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {\n             return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n           }\n           break;\n         case 3:\n           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {\n             return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n           }\n           break;\n         case -3:\n           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {\n             return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n           }\n           break;\n         case 4:\n           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {\n             return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n           }\n           break;\n         case -4:\n           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {\n             return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));\n           }\n           break;\n      }\n    }\n    #endif\n    return PyLong_AsSsize_t(b);\n  }\n  x = PyNumber_Index(b);\n  if (!x) return -1;\n  ival = PyInt_AsSsize_t(x);\n  Py_DECREF(x);\n  return ival;\n}\nstatic CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {\n  return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);\n}\nstatic CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {\n    return PyInt_FromSize_t(ival);\n}\n\n\n#endif /* Py_PYTHON_H */\n"
  },
  {
    "path": "od_mstar3/cython_od_mstar.pyx",
    "content": "# distutils: language = c++\n# distutils: sources = policy.cpp col_checker.cpp od_mstar.cpp grid_policy.cpp grid_planning.cpp\nfrom libcpp cimport bool\nfrom libcpp.vector cimport vector\nfrom libcpp.pair cimport pair\n\nfrom od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError\n\ncdef extern from \"grid_planning.hpp\" namespace \"mstar\":\n    vector[vector[pair[int, int]]] find_grid_path(\n        const vector[vector[bool]] &obstacles,\n        const vector[pair[int, int]] &init_pos,\n        const vector[pair[int, int]] &goals,\n        double inflation, int time_limit) except +\n\ndef find_path(world, init_pos, goals, inflation, time_limit):\n    \"\"\"Finds a path invoking C++ implementation\n\n    Uses recursive ODrM* to explore a 4 connected grid\n\n    world - matrix specifying obstacles, 1 for obstacle, 0 for free\n    init_pos  - [[x, y], ...] specifying start position for each robot\n    goals     - [[x, y], ...] specifying goal position for each robot\n    inflation - inflation factor for heuristic\n    time_limit - time until failure in seconds\n\n    returns:\n    [[[x1, y1], ...], [[x2, y2], ...], ...] path in the joint\n    configuration space\n\n    raises:\n    NoSolutionError if problem has no solution\n    OutOfTimeError if the planner ran out of time\n    \"\"\"\n\n    import resource\n    resource.setrlimit(resource.RLIMIT_AS, (2**33,2**33)) # 8Gb\n\n    # convert to boolean.  For some reason coercion doesn't seem to\n    # work properly\n    cdef vector[vector[bool]] obs\n    cdef vector[bool] temp\n    for row in world:\n        temp = vector[bool]()\n        for i in row:\n            temp.push_back(i == 1)\n        obs.push_back(temp)\n    try:\n        return find_grid_path(obs, init_pos, goals, inflation, time_limit)\n    except Exception as e:\n        if str(e) == \"Out of Time\":\n            raise OutOfTimeError()\n        elif str(e) == \"No Solution\":\n            raise NoSolutionError()\n        else:\n            raise e\n"
  },
  {
    "path": "od_mstar3/grid_planning.cpp",
    "content": "#include <vector>\n#include <utility>\n#include <memory>\n\n#include \"grid_planning.hpp\"\n#include \"grid_policy.hpp\"\n#include \"od_mstar.hpp\"\n#include \"mstar_type_defs.hpp\"\n\nusing namespace mstar;\n\n/**\n * Converts from (row, column) coordinates to vertex index\n */\nOdCoord to_internal(std::vector<std::pair<int, int>> coord,\n\t\t\t\t  int cols){\n  std::vector<RobCoord> out;\n  for (auto &c: coord){\n    out.push_back(c.first * cols + c.second);\n  }\n  return OdCoord(out, {});\n};\n\n/**\n * Converts from vertex index to (row, column) format\n */\nstd::vector<std::pair<int, int>> from_internal(OdCoord coord,\n\t\t\t\t\t       int cols){\n  std::vector<std::pair<int, int>> out;\n  for (auto &c: coord.coord){\n    out.push_back({c / cols, c % cols});\n  }\n  return out;\n};\n\nstd::vector<std::vector<std::pair<int, int>>> mstar::find_grid_path(\n  const std::vector<std::vector<bool>> &obstacles,\n  const std::vector<std::pair<int, int>> &init_pos,\n  const std::vector<std::pair<int, int>> &goals,\n  double inflation, int time_limit){\n  // compute time limit first, as the policies fully compute \n  // Need to convert time limit to std::chrono format\n  time_point t = std::chrono::system_clock::now();\n  t += Clock::duration(std::chrono::seconds(time_limit));\n\n  int cols = (int) obstacles[0].size();\n  OdCoord _init = to_internal(init_pos, cols);\n  OdCoord _goal = to_internal(goals, cols);\n  std::vector<std::shared_ptr<Policy>> policies = {};\n  for (const auto &goal: goals){\n    policies.push_back(std::shared_ptr<Policy>(\n\t\t\t grid_policy_ptr(obstacles, goal)));\n  }\n  OdMstar planner(policies, _goal, inflation, t,\n\t\t  std::shared_ptr<ColChecker>(new SimpleGraphColCheck()));\n  OdPath path = planner.find_path(_init);\n  std::vector<std::vector<std::pair<int, int>>> out;\n  for (auto &coord: path){\n    out.push_back(from_internal(coord, cols));\n  }\n  return out;\n}\n"
  },
  {
    "path": "od_mstar3/grid_planning.hpp",
    "content": "#ifndef MSTAR_GRID_PLANNING_H\n#define MSTAR_GRID_PLANNING_H\n\n#include <vector>\n#include <utility>\n\n/*********************************************************************\n * Provides convienence functions for planning on 4-connected graphs\n ********************************************************************/\n\nnamespace mstar{\n  /**\n   * Helper function for finding paths in 4 connected paths\n   *\n   * The world is specified as a matrix where true indicates the presence\n   * of obstacles and false indicates a clear space.  Coordinates for\n   * individual robots are indicated as (row, column)\n   *\n   * @param obstacles matrix indicating obstacle positions.  True is obstacle\n   * @param init_pos list of (row, column) pairs definining the initial\n   *                 position of the robots\n   * @param goals list of (row, column) pairs defining the goal configuration\n   *              of the robots\n   * @param inflation inflation factor used to weight the heuristic\n   * @param time_limit seconds until the code declares failure\n   *\n   * @return Path in the joint configuration space.  Each configuration is\n   *         a vector of (row, col) pairs specifying the position of\n   *         individual robots\n   */\n  std::vector<std::vector<std::pair<int, int> > > find_grid_path(\n    const std::vector<std::vector<bool> > &obstacles,\n    const std::vector<std::pair<int, int> > &init_pos,\n    const std::vector<std::pair<int, int> > &goals,\n    double inflation, int time_limit);\n}\n\n#endif\n\n\n\n\n\n\n\n"
  },
  {
    "path": "od_mstar3/grid_policy.cpp",
    "content": "#include \"grid_policy.hpp\"\n\nusing namespace mstar;\n\nGraph get_graph(const std::vector<std::vector<bool>> &world_map,\n\t\tconst std::pair<int, int> &goal){\n  int rows = (int) world_map.size();\n  int columns = (int) world_map[0].size();\n  typedef std::pair<int,int> E;\n  std::vector<E> edges;\n  std::vector<double> weights;\n\n  std::vector<std::pair<int, int>> offsets = {{-1, 0}, {0, 1}, {1, 0},\n\t\t\t\t\t      {0, -1}, {0, 0}};\n  for (int row = 0; row < rows; ++row){\n    for (int col = 0; col < columns; ++col){\n      if (world_map[row][col]){\n\tcontinue;\n      }\n      for (auto &off: offsets){\n\tint r = row + off.first;\n\tint c = col + off.second;\n\tif( r >= 0 && r < rows && c >= 0 && c < columns && ! world_map[r][c]){\n\t  // edge from (row, col) to (r, c)\n\t  // should be a more direct way, but boost is hating me\n\t  edges.push_back({row * columns + col, r * columns + c});\n\t  if (row == r && col == c && r == goal.first && c == goal.second){\n\t    weights.push_back(0.);\n\t  }else{\n\t    weights.push_back(1.);\n\t  }\n\t}\n      }\n    }\n  }\n  return Graph(edges.begin(), edges.end(), weights.begin(), rows * columns);\n}\n\n/**\n * Generates a policy for a 4 connected grid\n *\n * The internal coordinates are of the form row * num_rows + col\n * Allows for weighting at the goal for free\n *\n * @param world_map matrix of values describing grid true for obstacle,\n *                  false for clear\n * @param goal (row, column) of goal\n *\n * @return Policy object describing problem\n */\nPolicy mstar::grid_policy(const std::vector<std::vector<bool>> &world_map,\n\t\t   const std::pair<int, int> &goal){\n  int columns = (int) world_map[0].size();\n  return Policy(get_graph(world_map, goal), goal.first * columns + goal.second);\n}\n\nPolicy* mstar::grid_policy_ptr(const std::vector<std::vector<bool>> &world_map,\n\t\t\tconst std::pair<int, int> &goal){\n  int columns = (int) world_map[0].size();\n  return new Policy(get_graph(world_map, goal),\n\t\t    goal.first * columns + goal.second);\n}\n"
  },
  {
    "path": "od_mstar3/grid_policy.hpp",
    "content": "#ifndef MSTAR_GRID_POLICY_H\n#define MSTAR_GRID_POLICY_H\n\n/**************************************************************************\n * Generates policy for grid maps\n **************************************************************************/\n\n#include <vector>\n#include <utility>\n\n#include \"mstar_type_defs.hpp\"\n#include \"policy.hpp\"\n#include <boost/graph/adjacency_list.hpp>\n#include <boost/graph/graph_traits.hpp>\n\nnamespace mstar{\n\n  /**\n   * Generates a policy for a 4 connected grid\n   *\n   * The internal coordinates are of the form row * num_rows + col\n   * Allows for weighting at the goal for free\n   *\n   * @param world_map matrix of values describing grid true for obstacle,\n   *                  false for clear\n   * @param goal (row, column) of goal\n   *\n   * @return Policy object describing problem\n   */\n  Policy grid_policy(const std::vector<std::vector<bool>> &world_map,\n\t\t     const std::pair<int, int> &goal);\n\n  Policy* grid_policy_ptr(const std::vector<std::vector<bool>> &world_map,\n\t\t\t  const std::pair<int, int> &goal);\n}\n\n#endif\n"
  },
  {
    "path": "od_mstar3/interface.py",
    "content": "\"\"\"This module defines interfaces for the low-level graphs and\r\npolicies used in Mstar. In general terms, these classes represent:\r\n\r\n    1.  Graphs representing the configuration space.  These graphs are\r\n        structured so that each node in the graph represents a\r\n        configuration, and each edge represents a permissible transition\r\n        between two different configurations.\r\n\r\n        *All of these graphs subclass the Graph_Interface class\r\n\r\n    2.  Policies, which define paths in a configuration space from an\r\n        initial configuration to a goal configuration.  Policies are\r\n        comprised of nodes, each of which represents a configuration\r\n        in the configuration space.  Each node in a policy has a pointer\r\n        to its optimal neighbor, i.e., the next node in the optimal path\r\n        to the goal node.  Policy classes compute optimal paths by using\r\n        some search algorithm to search the graphs generated in the\r\n        classes described above.\r\n\r\n        *All of these graphs subclass the Policy_Interface class\r\n\r\n    3.  Configuration graph edge checking, which determines whether\r\n        moving between two configurations is permissible.  For example,\r\n        configuration graph edge checking should not allow a robot to\r\n        move out of bounds of the workspace.\r\n\r\n    4.  Planner edge checking, which determines whether moving between\r\n        two states of robot positions will result in any collisions.\r\n        For example, planner edge checking should check to see if two\r\n        robots pass through each other as they move between positions.\r\n\"\"\"\r\n\r\n\r\nclass Graph_Interface(object):\r\n\r\n    \"\"\"Interface for configuration space generators\r\n\r\n    This graph interface enumerates the methods that any\r\n    configuration space generator should implement.  These graphs are\r\n    used by policy graphs such as A*.\r\n    \"\"\"\r\n\r\n    def get_edge_cost(self, coord1, coord2):\r\n        \"\"\"Returns edge_cost of going from coord1 to coord2.\"\"\"\r\n        raise NotImplementedError\r\n\r\n    def get_neighbors(self, coord):\r\n        \"\"\"Returns the collision free neighbors of the specified coord.\r\n\r\n        Return value is a list of tuples each of which are a coordinate\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    # This is a function to return the in neighbors of a coordinate.\r\n    # Designed by default to handle un-directed graphs\r\n    get_in_neighbors = get_neighbors\r\n\r\n\r\nclass Policy_Interface(object):\r\n\r\n    \"\"\"Interface showing required implemented functions for all policies\r\n\r\n    This interface enumerates the functions that must be exposed by\r\n    policies for M* to function correctly. A policy object with this\r\n    interface provides a route for a single robot.  Underneath the policy\r\n    interface is a graph object which describes the configuration space\r\n    through which robots can move.  The underlying graph object does all\r\n    of the work of calculating the configuration space based on the\r\n    actual environment in which the robot is moving\r\n\r\n    **All config inputs must be hashable**\r\n    \"\"\"\r\n\r\n    def get_cost(self, config):\r\n        \"\"\"Returns the cost of moving from given position to goal\"\"\"\r\n        raise NotImplementedError\r\n\r\n    def get_edge_cost(self, config1, config2):\r\n        \"\"\"Returns the cost of traversing an edge in the underlying\r\n        graph\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def get_step(self, config):\r\n        \"\"\"Returns the configurations of the optimal neighbor of config\"\"\"\r\n        raise NotImplementedError\r\n\r\n    def get_neighbors(self, config):\r\n        \"\"\"Returns neighboring configurations of config\r\n\r\n        This function returns the configurations which are next to\r\n        config\r\n\r\n        Return list of tuples, each of which is a coordinate\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def get_graph_size(self, correct_for_size=True):\r\n        \"\"\"Returns number of nodes in graph\"\"\"\r\n        raise NotImplementedError\r\n\r\n    def get_limited_offset_neighbors(self, config, max_offset, min_offset=0):\r\n        \"\"\"Returns set of neighbors between the offset arguments\"\"\"\r\n        raise NotImplementedError\r\n\r\n    def get_offset_neighbors(self, config, offset):\r\n        \"\"\"Returns neighbors of coord with offset specified by argument\"\"\"\r\n        raise NotImplementedError\r\n\r\n    def get_offsets(self, config):\r\n        \"\"\"Return the offsets of the neighbors\"\"\"\r\n        raise NotImplementedError\r\n\r\n\r\nclass Config_Edge_Checker(object):\r\n    \"\"\"Checks robot collisions with objects and edges of workspace\"\"\"\r\n\r\n    def col_check(self, state, recursive):\r\n        \"\"\"Checks for collisions at a single state\r\n\r\n        state     - list of coordinates of robots\r\n        recursive - generate collisions sets for rM*\r\n\r\n        Returns:\r\n        M* collision set in type set if recursive false\r\n        rM* collision set in type set if recursive true\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n\r\nclass Planner_Edge_Checker(object):\r\n    \"\"\"Checks for robot collisions on an edge in a planner's graph\r\n\r\n    Currently, no methods have to be implemented because the collision\r\n    methods change based on the graph.\r\n    \"\"\"\r\n\r\n    def pass_through(self, state1, state2, recursive=False):\r\n        \"\"\"Detects pass through collisions\r\n\r\n        state1 - list of robot coordinates describing initial state\r\n        state2 - list of robot coordinates describing final state,\r\n\r\n        Returns:\r\n            M* collision set in type set if recursive false\r\n            rM* collision set in type set if recursive true\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def col_check(self, state, recursive):\r\n        \"\"\"Checks for collisions at a single state\r\n\r\n        state     - list of coordinates of robots\r\n        recursive - generate collisions sets for rM*\r\n\r\n        Returns:\r\n            M* collision set in type set if recursive false\r\n            rM* collision set in type set if recursive true\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def cross_over(self, state1, state2, recursive=False):\r\n        \"\"\"Detects cross over and pass through collisions\r\n\r\n\r\n        state1 - list of robot coordinates describing initial state\r\n        state2 - list of robot coordinates describing final state\r\n\r\n        Returns:\r\n            M* collision set in type set if recursive false\r\n            rM* collision set in type set if recursive true\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def simple_pass_through(self, state1, state2):\r\n        \"\"\"Check for pass through collisions\r\n\r\n        state1 - list of robot coordinates describing initial state\r\n        state2 - list of robot coordinates describing final state\r\n\r\n        Returns:\r\n        True if pass through collision\r\n        False otherwise\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def simple_col_check(self, state):\r\n        \"\"\"Checks for robot-robot collisions at state,\r\n\r\n        state - list of robot coordinates\r\n\r\n        returns:\r\n        True if collision\r\n        False otherwise\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def simple_cross_over(self, state1, state2):\r\n        \"\"\"Check for cross over collisions in 8-connected worlds\r\n\r\n        state1 - list of robot coordinates describing initial state\r\n        state2 - list of robot coordinates describing final state\r\n\r\n        returns:\r\n        True if collision exists\r\n        False otherwise\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def simple_incremental_cross_over(self, state1, state2):\r\n        \"\"\"Check for cross over collisions in 8-connected worlds.\r\n\r\n        Assumes that collision checking has been performed for everything\r\n        but the last robot in the coordinates.  To be used to save a bit\r\n        of time for partial expansion approaches\r\n\r\n        state1 - list of robot coordinates describing initial state\r\n        state2 - list of robot coordinates describing final state\r\n\r\n        returns:\r\n        True if collision exists\r\n        False otherwise\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def simple_incremental_col_check(self, state1):\r\n        \"\"\"Checks for robot-robot collisions at c1,\r\n\r\n        Assumes that collision checking has been performed for everything\r\n        but the last robot in the coordinates.  To be used to save a bit\r\n        of time for partial expansion approaches\r\n\r\n        state1 - list of robot coordinates\r\n\r\n        returns:\r\n        True if collision exists\r\n        False otherwise\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def single_bot_outpath_check(self, cur_coord, prev_coord, cur_t, paths):\r\n        \"\"\"Tests for collisions from prev_coord to cur_coord\r\n\r\n        Checks for cross over collisions and collisions at the same\r\n        location when moving from cur_coord to prev_coord while robots\r\n        are moving in paths\r\n\r\n        cur_coord - position of a single robot\r\n\r\n        Returns:\r\n\r\n        True if collision exists\r\n        False otherwise\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def simple_prio_col_check(self, coord, t, paths, pcoord=None,\r\n                              conn_8=False):\r\n        \"\"\"Returns true, if collision is detected, false otherwise\r\n        at the moment only used to check the obstacle collisions, but\r\n        didn't want to reject the other code already\r\n\r\n        coord - coord of potential new neighbor\r\n        t - current time step\r\n        paths - previously found paths\r\n        pcoord - previous coordinate of the path\r\n\r\n        Returns:\r\n        True if collision exists\r\n        False otherwise\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def incremental_col_check(self, state, recursive):\r\n        \"\"\"Checks for robot-robot collisions in state\r\n\r\n        state     - list of coordinates of robots\r\n        recursive - generate collisions sets for rM*\r\n\r\n        Only checks whether the last robot is\r\n        involved in a collision, for use with incremental methods\r\n\r\n        Returns:\r\n            M* collision set in type set if recursive false\r\n            rM* collision set in type set if recursive true\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def incremental_cross_over(self, state1, state2, recursive=False):\r\n        \"\"\"Detects cross over collisions as well as pass through\r\n        collisions.\r\n\r\n        Only checks if the last robot is involved in a collision, for use\r\n        with partial expansion approaches.\r\n\r\n        state1 - list of robot coordinates describing initial state\r\n        state2 - list of robot coordinates describing final state,\r\n\r\n        Returns:\r\n            M* collision set in type set if recursive false\r\n            rM* collision set in type set if recursive true\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def single_bot_cross_over(self, coord1, pcoord1, coord2, pcoord2):\r\n        \"\"\"Checks for cross-over and collisions between robots 1 and 2\r\n\r\n        Robots are moving from pcoord to coord\r\n\r\n        pcoord1 - first position of first robot\r\n        coord1  - second position of first robot\r\n        pcoord2 - first position of second robot\r\n        coord2  - second position of second robot\r\n\r\n        Returns:\r\n        True if collision\r\n        False otherwise\r\n        \"\"\"\r\n        raise NotImplementedError\r\n\r\n    def prio_col_check(self, coord, pcoord, t, paths=None, conn_8=False,\r\n                       recursive=False):\r\n        \"\"\"Collision checking with paths passed as constraints\r\n\r\n        coord  - current node\r\n        pcoord - previous node\r\n        t      - timestep\r\n        paths  - paths that need to be avoided\r\n\r\n        Returns: (collision sets are of type set)\r\n            M* collision set if collision exists and recursive is false\r\n            rM* collision set if collision exists and recursive is true\r\n            None if no collision exists\r\n        \"\"\"\r\n        raise NotImplementedError\r\n"
  },
  {
    "path": "od_mstar3/mstar_type_defs.hpp",
    "content": "#ifndef MSTAR_TYPE_DEFS\n#define MSTAR_TYPE_DEFS\n\n/**************************************************************************\n * Provides type defs that are used in multiple files\n *************************************************************************/\n\n#include <boost/graph/graph_traits.hpp>\n#include <boost/graph/adjacency_list.hpp>\n#include <vector>\n#include <chrono>\n\nnamespace mstar{\n  /**\n   * Defines the graph type for individual robots.\n   *\n   * Assumes robot positions are indicated by integers, costs by doubles,\n   * and assumes that the edge_weight property is filled\n   */\n  typedef boost::adjacency_list<\n    boost::vecS, boost::vecS, boost::bidirectionalS,  boost::no_property,\n    boost::property<boost::edge_weight_t, double>> Graph;\n\n  // type that defines the position of the robot\n  typedef int RobCoord;\n\n  // represents the coordinate of an OD node, also used to index graphs\n  struct OdCoord{\n    std::vector<RobCoord> coord, move_tuple;\n\n    OdCoord(std::vector<RobCoord> in_coord, std::vector<RobCoord> in_move){\n      coord = in_coord;\n      move_tuple = in_move;\n    }\n\n    OdCoord(): coord(), move_tuple(){}\n\n    bool operator==(const OdCoord &other) const{\n      return (coord == other.coord) && (move_tuple == other.move_tuple);\n    }\n\n    bool is_standard() const{\n      return move_tuple.size() == 0;\n    }\n  };\n\n  // Holds a path in the joint configuration space\n  typedef std::vector<OdCoord> OdPath;\n\n  // defines a single set of mutually colliding robots.\n  // Must be sorted in order of increasing value for logic to hold\n  typedef std::set<uint> ColSetElement;\n\n  // Defines a full collision set\n  typedef std::vector<ColSetElement> ColSet;\n\n  // defines times for checking purposes\n  typedef std::chrono::system_clock Clock;\n  typedef Clock::time_point time_point;\n}\n\n#endif\n"
  },
  {
    "path": "od_mstar3/mstar_utils.hpp",
    "content": "#ifndef MSTAR_UTILS_H\n#define MSTAR_UTILS_H\n\n/**\n * Defines convinence functions for testing or other purposes not directly\n * related to the actual planning\n */\n\n#include <iostream>\n\n#include \"mstar_type_defs.hpp\"\n\nnamespace mstar{\n  void print_od_path(const OdPath &path){\n    for (const OdCoord &pos: path){\n      std::cout << \"{\";\n      for (const RobCoord &i: pos.coord){\n\tstd::cout << i << \" \";\n      }\n      std::cout << \"}\" << std::endl;\n    }\n  };\n\n  void print_path(const std::vector<std::vector<std::pair<int, int>>> &path){\n    for (const auto &coord: path){\n      std::cout << \"{\";\n      for (const auto &c: coord){\n\tstd::cout << \"(\" << c.first << \", \" << c.second << \") \";\n      }\n      std::cout << \"}\" << std::endl;\n    }\n  };\n};\n\n#endif\n"
  },
  {
    "path": "od_mstar3/od_mstar.cpp",
    "content": "#include <chrono>\n#include <cassert>\n\n#include \"od_mstar.hpp\"\n\nusing namespace mstar;\n\nOdMstar::OdMstar(std::vector<std::shared_ptr<Policy>> policies,\n\t\t OdCoord goals, double inflation,\n\t\t time_point end_time, std::shared_ptr<ColChecker> col_checker){\n  subplanners_ = new std::unordered_map<ColSetElement,\n\t\t\t\t\tstd::shared_ptr<OdMstar>>();\n  policies_ = policies;\n  // top-level planner, so construct a set of all robot ids\n  for (int i = 0; i < (int) goals.coord.size(); ++i){\n    ids_.push_back(i);\n  }\n  goals_ = goals;\n  end_time_ = end_time;\n  inflation_ = inflation;\n  planning_iter_ = 0;\n  num_bots_ = (int) ids_.size();\n  col_checker_ = col_checker;\n  top_level_ = true;\n}\n\nOdMstar::OdMstar(const ColSetElement &robots, OdMstar &parent){\n  subplanners_ = parent.subplanners_;\n  policies_ = parent.policies_;\n  for (int i: robots){\n    ids_.push_back(parent.ids_[i]);\n    goals_.coord.push_back(parent.goals_.coord[i]);\n  }\n  end_time_ = parent.end_time_;\n  inflation_ = parent.inflation_;\n  planning_iter_ = 0;\n  num_bots_ = (int) ids_.size();\n  col_checker_ = parent.col_checker_;\n  top_level_ = false;\n}\n\nOdMstar::~OdMstar(){\n  if (top_level_){\n    delete subplanners_;\n  }\n}\n\nOdPath OdMstar::find_path(OdCoord init_pos){\n  reset();\n\n  // Configure the initial vertex\n  // identified by setting the back_ptr to itself\n  OdVertex *first = get_vertex(init_pos);\n  first->reset(planning_iter_);\n  first->back_ptr = first;\n  first->cost = 0;\n  first->open = true;\n\n  OpenList open_list;\n  open_list.push(first);\n\n  while (open_list.size() > 0){\n    if (std::chrono::system_clock::now() > end_time_){\n      throw OutOfTimeError();\n    }\n\n    OdVertex *vert = open_list.top();\n    open_list.pop();\n    vert->open = false;\n    if (vert->closed){\n      continue;\n    }\n\n    // check if this is the goal vertex\n    if (vert->coord == goals_){\n      vert->forwards_ptr = vert;\n    }\n    if (vert->forwards_ptr != nullptr){\n      // Either the goal or on a previous found path to the goal\n      return trace_path(vert);\n    }\n\n    expand(vert, open_list);\n  }\n  throw NoSolutionError();\n}\n\nvoid OdMstar::reset(){\n  planning_iter_++;\n}\n\ndouble OdMstar::heuristic(const OdCoord &coord){\n  // Heuristic is computed from the assigned move for elements of the\n  // move tuple, and from the base coordinate for all others\n  double h = 0;\n  uint i = 0;\n  while (i < coord.move_tuple.size()){\n    h += policies_[ids_[i]]->get_cost(coord.move_tuple[i]);\n    ++i;\n  }\n  while (i < coord.coord.size()){\n    h += policies_[ids_[i]]->get_cost(coord.coord[i]);\n    ++i;\n  }\n  return h * inflation_;\n}\n\nOdVertex* OdMstar::get_vertex(const OdCoord &coord){\n  // returns a pair with the first element an interator to a <key, vertex>\n  // pair and the second to a bool which is true if there was not a\n  // preexisting value\n  auto p = graph_.emplace(coord, coord);\n  p.first->second.reset(planning_iter_);\n  if (p.second){\n    // new vertex, so need to set heuristic\n    p.first->second.h = heuristic(coord);\n  }\n  return &p.first->second;\n}\n\nOdCoord get_vertex_step(OdVertex * vert){\n  assert(vert != nullptr);\n  while (1){\n    if (vert->forwards_ptr->coord.is_standard()){\n      return vert->forwards_ptr->coord;\n    }\n    vert = vert->forwards_ptr;\n    assert(vert != nullptr);\n  }\n}\n\nOdCoord OdMstar::get_step(const OdCoord &init_pos){\n  OdVertex* vert = OdMstar::get_vertex(init_pos);\n  if (vert->forwards_ptr != nullptr){\n    return get_vertex_step(vert);\n  }\n  find_path(init_pos);\n  return get_vertex_step(vert);\n}\n\nvoid OdMstar::expand(OdVertex *vertex, OpenList &open_list){\n  vertex->closed = true;\n  ColSet gen_set = col_set_to_expand(vertex->col_set, vertex->gen_set);\n  if (gen_set.size() == 1 && (int) gen_set[0].size() == num_bots_){\n    // the generating collision set contains all robots, so no caching\n    // would be possible.  Therefore, don't use\n    gen_set = vertex->col_set;\n  }\n\n  std::vector<OdCoord> neighbors = get_neighbors(vertex->coord, gen_set);\n\n  // accumulates the collision sets that occur while trying to move to\n  // any of the neighbors\n  ColSet col_set;\n  for (OdCoord &new_coord: neighbors){\n    ColSet new_col = col_checker_->check_edge(vertex->coord, new_coord, ids_);\n    if (!new_col.empty()){\n      // State not accessible due to collisions\n      add_col_set_in_place(new_col, col_set);\n      continue;\n    }\n    \n    OdVertex *new_vert = get_vertex(new_coord);\n    new_vert->back_prop_set.insert(vertex);\n    // Always need to at the collision set of any vertex we can reach\n    // to its successors, as otherwise we would need to wait for another\n    // robot to collide downstream before triggering back propagation\n    add_col_set_in_place(new_vert->col_set, col_set);\n\n    if (new_vert->closed){\n      continue;\n    }\n\n    double new_cost = vertex->cost + edge_cost(vertex->coord, new_coord);\n    if (new_cost >= new_vert->cost){\n      continue;\n    }\n    new_vert->cost = new_cost;\n    new_vert->back_ptr = vertex;\n    new_vert->open = true;\n    new_vert->gen_set = gen_set;\n    open_list.push(new_vert);\n\n    // Add an intermediate vertex's parent's col_set to its col_set, so\n    // moves for later robots can be explored.  Not necessary, but should\n    // reduce thrashing\n    if (!new_vert->coord.is_standard()){\n      add_col_set_in_place(vertex->col_set, new_vert->col_set);\n    }\n  }\n  back_prop_col_set(vertex, col_set, open_list);\n}\n\nstd::vector<OdCoord> OdMstar::get_neighbors(const OdCoord &coord,\n\t\t\t\t\t    const ColSet &col_set){\n  // If the collision set contains all robots, invoke the non-recursive\n  // base case\n  if (col_set.size() == 1 && (int) col_set[0].size() == num_bots_){\n    return get_all_neighbors(coord);\n  }\n  \n  assert(coord.is_standard());\n\n  // Generate the step along the joint policy\n  std::vector<RobCoord> policy_step;\n  for (int i = 0; i < num_bots_; i++){\n    policy_step.push_back(policies_[ids_[i]]->get_step(coord.coord[i]));\n  }\n\n  // Iterate over colliding sets of robots, and integrate the results\n  // of the sub planning for each set\n  for (const ColSetElement &elem: col_set){\n    // The collision set contains the local ids (relative to the robots in\n    // this subplanner) of the robots in collision\n    // To properly index child subplanners, need to convert to global robot\n    // ids, so that the subplanners will be properly globally accessible\n    ColSetElement global_col;\n    for (auto &local_id: elem){\n      global_col.insert(ids_[local_id]);\n    }\n    // Get, and if necessary construct, the appropriate subplanner.\n    // returns a pair <p, bool> where bool is true if a new subplanner\n    // was generated, and p is an iterator to a pair <key, val>\n    if (subplanners_->find(global_col) == subplanners_->end()){\n      subplanners_->insert(\n\t{global_col, std::shared_ptr<OdMstar>(new OdMstar(elem, *this))});\n    }\n    OdMstar *planner = subplanners_->at(global_col).get();\n    // create the query point\n    std::vector<RobCoord> new_base;\n    for (const int &i: elem){\n      new_base.push_back(coord.coord[i]);\n    }\n\n    OdCoord step;\n    try{\n      step = planner->get_step(OdCoord(new_base, {}));\n    } catch(NoSolutionError &e){\n      // no solution for that subset of robots, so return no neighbors\n      // only likely to be relevant on directed graphs\n      return {};\n    }\n\n    int elem_dex = 0;\n    // now need to copy into the relevant positions in policy_step\n    for (auto i: elem){\n      policy_step[i] = step.coord[elem_dex];\n      ++elem_dex; // could play with post appending, but don't want to\n    }\n  }\n  return {OdCoord({policy_step}, {})};\n}\n\nstd::vector<OdCoord> OdMstar::get_all_neighbors(const OdCoord &coord){\n  // get the coordinate of the robot to assign a new move\n  uint move_index = coord.move_tuple.size();\n  std::vector<std::vector<RobCoord>> new_moves;\n  for (RobCoord &move: policies_[ids_[move_index]]->get_out_neighbors(\n\t coord.coord[move_index])){\n    std::vector<RobCoord> new_move(coord.move_tuple);\n    new_move.push_back(move);\n    new_moves.push_back(new_move);\n  }\n  std::vector<OdCoord> ret;\n  if (move_index + 1 < coord.coord.size()){\n    // generating intermediate vertices\n    for (auto &move_tuple: new_moves){\n      ret.push_back(OdCoord(coord.coord, move_tuple));\n    }\n  } else {\n    // generating standard vertices\n    for (auto &move_tuple: new_moves){\n      ret.push_back(OdCoord(move_tuple, {}));\n    }\n  }\n  return ret;\n}\n\ndouble OdMstar::edge_cost(const OdCoord &source, const OdCoord &target){\n  if (source.is_standard() && target.is_standard()){\n    // transition between standard vertex, so all robots are assigned moves and\n    // incur costs\n    double cost = 0;\n    for (int i = 0; i < num_bots_; ++i){\n      cost += policies_[ids_[i]]->get_edge_cost(source.coord[i],\n\t\t\t\t\t\ttarget.coord[i]);\n    }\n    return cost;\n  } else {\n    // transition from intermediate vertex, so only one robot is assigned\n    // a move and incurs cost\n    uint move_index = source.move_tuple.size();\n    if (target.is_standard()){\n      return policies_[ids_[move_index]]->get_edge_cost(\n\tsource.coord[move_index], target.coord[move_index]);\n    } else{\n      return policies_[ids_[move_index]]->get_edge_cost(\n\tsource.coord[move_index], target.move_tuple[move_index]);\n    }\n  }\n}\n\nOdPath OdMstar::trace_path(OdVertex *vert){\n  OdPath path;\n  back_trace_path(vert, vert->forwards_ptr, path);\n  forwards_trace_path(vert, path);\n  return path;\n}\n\nvoid OdMstar::back_trace_path(OdVertex *vert, OdVertex *successor,\n\t\t\t      OdPath &path){\n  vert->forwards_ptr = successor;\n  // check if this is the final, terminal state, which is not required\n  // to have a zero-cost self loop, so could get problems\n  if (vert != successor){\n    vert->h = successor->h + edge_cost(vert->coord, successor->coord);\n  } else{\n    vert->h = 0;\n  }\n  if (vert->coord.is_standard()){\n    path.insert(path.begin(), vert->coord);\n  }\n  if (vert->back_ptr != vert){\n    back_trace_path(vert->back_ptr, vert, path);\n  }\n}\n\nvoid OdMstar::forwards_trace_path(OdVertex *vert, OdPath &path){\n  if (vert->forwards_ptr != vert){\n    if (vert->forwards_ptr->coord.is_standard()){\n      path.push_back(vert->forwards_ptr->coord);\n    }\n    forwards_trace_path(vert->forwards_ptr, path);\n  }\n}\n\nvoid OdMstar::back_prop_col_set(OdVertex *vert, const ColSet &col_set,\n\t\t\t\tOpenList &open_list){\n  bool further = add_col_set_in_place(col_set, vert->col_set);\n  if (further){\n    vert->closed = false;\n    if (! vert->open){\n      vert->open = true;\n      open_list.push(vert);\n    }\n\n    for(OdVertex *predecessor: vert->back_prop_set){\n      back_prop_col_set(predecessor, vert->col_set, open_list);\n    }\n  }\n}\n"
  },
  {
    "path": "od_mstar3/od_mstar.hpp",
    "content": "#ifndef MSTAR_OD_MSTAR_H\n#define MSTAR_OD_MSTAR_H\n\n#include <unordered_map>\n#include <functional>\n#include <queue>\n#include <memory>\n#include <exception>\n\n#include <boost/functional/hash_fwd.hpp>\n\n#include \"mstar_type_defs.hpp\"\n#include \"col_set.hpp\"\n#include \"od_vertex.hpp\"\n#include \"col_checker.hpp\"\n#include \"policy.hpp\"\n\nnamespace std{\n  template <> struct hash<mstar::OdCoord>{\n    size_t operator()(const mstar::OdCoord &val) const{\n      size_t hash = boost::hash_range(val.coord.cbegin(), val.coord.cend());\n      boost::hash_combine<size_t>(\n\thash,\n\tboost::hash_range(val.move_tuple.cbegin(), val.move_tuple.cend()));\n      return hash;\n    }\n  };\n\n  template <> struct hash<std::vector<int>>{\n    size_t operator()(const std::vector<int> &val) const{\n      return boost::hash_range(val.cbegin(), val.cend());\n    }\n  };\n\n  template <> struct hash<mstar::ColSetElement>{\n    size_t operator()(const mstar::ColSetElement &val) const{\n      return boost::hash_range(val.cbegin(), val.cend());\n    }\n  };\n}\n\n\nnamespace mstar{\n\n  struct greater_cost{\n    bool operator()(const mstar::OdVertex *x, const mstar::OdVertex *y) const{\n      if (x == nullptr || y == nullptr){\n\treturn true;\n      }\n      return *x > *y;\n    }\n  };\n\n  // Sort in decreasing order to give cheap access to the cheapest elements\n  typedef std::priority_queue<OdVertex*, std::vector<OdVertex*>,\n\t\t\t      greater_cost> OpenList;\n\n  class OdMstar {    \n  public:\n    /**\n     * Constructs a new, top level M* planner\n     *\n     * @param policies pointer to vector of policies.\n     *                 OdMstar does not take ownership\n     * @param goals goal configuration of entire system\n     * @param inflation inflation factor\n     * @param end_time time at which M* will declare failure\n     * @param checker collision checking object\n     */\n    OdMstar(\n      std::vector<std::shared_ptr<Policy>> policies,\n      OdCoord goals, double inflation, time_point end_time,\n      std::shared_ptr<ColChecker> col_checker);\n\n    /**\n     * Creates a subplanner for a subsest of the robots\n     *\n     * robots is a collision set element in the frame of parent, not global\n     * robot ids\n     */\n    OdMstar(const ColSetElement &robots, OdMstar &parent);\n\n    ~OdMstar();\n\n    /**\n     * Computes the optimal path to the goal from init_pos\n     *\n     * @param init_pos coordinate of the initial joint configuration\n     *\n     * @return the path in the joint configuration graph to the goal\n     *\n     * @throws OutOfTimeError ran out of planning time\n     * @throws NoSolutionError no path to goal from init_pos\n     */\n    OdPath find_path(OdCoord init_pos);\n\n  private:\n    /**TODO: fix\n     * This is kind of horrifying, but I cannot store the OdMstar objects\n     * directly in the unordered map, as I get ungodly errors that look\n     * like they come from an allocator.  Adding copy constructor and\n     * assignment operator doesn't work, so its something involved about\n     * STL.  Think this works, but annoying\n     */\n    std::unordered_map<ColSetElement, std::shared_ptr<OdMstar>> *subplanners_;\n    std::vector<std::shared_ptr<Policy>> policies_;\n    // ids of the robots this planner handles.  Assumed to be in ascending\n    // order\n    std::vector<int> ids_;\n    OdCoord goals_;\n    // holds the nodes in the joint configuration space\n    std::unordered_map<OdCoord, OdVertex> graph_;\n    time_point end_time_; // When planning will be halted\n    double inflation_; // inflation factor for heuristic\n    int planning_iter_; // current planning iteration\n    int num_bots_;\n    std::shared_ptr<ColChecker> col_checker_;\n    bool top_level_; // tracks if the top level planner\n\n    OdMstar(const OdMstar &that) = delete;\n\n    /**\n     * Resets planning for a new planning iteration.\n     *\n     * Does not reset forwards_ptrs, as those should be valid across\n     * iterations\n     */\n    void reset();\n\n    /**\n     * Computes the heuristic value of a vertex at a given coordinate\n     *\n     * @param coord coordinate for which to compute a heuristic value\n     *\n     * @return the (inflated) heuristic value\n     */\n    double heuristic(const OdCoord &coord);\n\n    /**\n     * Returns a reference to the vertex at a given coordinate\n     *\n     * this->graph retains ownership of the vertex.  Will create the vertex\n     * if it does not already exist.\n     *\n     * @param coord coordinate of the desired vertex\n     *\n     * @return pointer to the vertex at coord.\n     */\n    OdVertex* get_vertex(const OdCoord &coord);\n\n    /**\n     * Returns the optimal next step from init_pos\n     *\n     * Will compute the full path if necessary, but preferentially uses\n     * cached results in forwards_ptrs.  Expected to only be called from\n     * a standard coordinate, and to only return a standard coordinate\n     *\n     * @param init_pos coordinate to compute the optimal next step from\n     *\n     * @returns the coordinate of the optimal next step towards the goal\n     */\n    OdCoord get_step(const OdCoord &init_pos);\n\n    /**\n     * Generates the neighbors of vertex and add them to the open list\n     *\n     * @param vertex OdVertex to expand\n     * @param open_list the sorted open list being used\n     */\n    void expand(OdVertex *vertex, OpenList &open_list);\n\n    /**\n     * Returns the limited neighbors of coord using recursive calculation\n     *\n     * @param coord Coordinates of vertex to generate neighbor thereof\n     * @param col_set collision set of vertex to generate neighbors\n     *\n     * @return list of limited neighbors\n     */\n    std::vector<OdCoord> get_neighbors(\n      const OdCoord &coord, const ColSet &col_set);\n\n    /**\n     * Returns the limited neighbors of coord using non-recursive computation\n     *\n     * Called when the collision set contains all of the robots, as a base\n     * case for get_neighbors, thus always generate all possible neighbors\n     *\n     * @param coord Coordinates of vertex to generate neighbor thereof\n     *\n     * @return list of limited neighbors\n     */\n     std::vector<OdCoord> get_all_neighbors(\n       const OdCoord &coord);\n\n    /**\n     * Returns the cost of traversing a given edge\n     *\n     * @param source coordinate of the source vertex\n     * @param target coordinate of the target vertex\n     *\n     * @return the cost of the edge\n     */\n    double edge_cost(const OdCoord &source, const OdCoord &target);\n\n    /**\n     * Returns the path through a vertex\n     *\n     * Assumes that back_ptr and forwards_ptr are set and non-none at vert\n     * Identifies each end of the path by looking for a back_ptr/forwards_ptr\n     * pointed at the holder\n     *\n     * @param vert the vertex to trace a path through\n     *\n     * @return the path passing through vert containing only standard vertices\n     */\n    OdPath trace_path(OdVertex *vert);\n\n    /**\n     * Generates the path to the specified vertex\n     *\n     * Sets forward_ptrs to cache the path, and updates the heuristic\n     * values of the vertices on the path so we can end the moment a\n     * vertex on a cached path is expanded.\n     *\n     * TODO: double check that making the heuristic inconsistent in this\n     * fashion is OK.\n     *\n     * @param vert the vertex to trace the path to\n     * @param successor the successor of vert on the path\n     * @param path place to construct path\n     */\n    void back_trace_path(OdVertex *vert, OdVertex *successor, OdPath &path);\n\n    /**\n     * Genertes the path from the specified vertex to the goal\n     *\n     * Non-trivial only if vert lies on a previously cached path\n     *\n     * @param vert the vertex to trace the path from\n     * @param path place to construct path\n     */\n    void forwards_trace_path(OdVertex *vert, OdPath &path);\n\n    /**\n     * Backpropagates collision set information to all predecessors of a\n     * vertex.\n     *\n     * Adds vertices whose collision set changes back to the open list\n     *\n     * @param vertex pointer to the vertex to back propagate from\n     * @param col_set the collision set that triggered backpropagation\n     * @param open_list the current open list\n     */\n    void back_prop_col_set(OdVertex *vert, const ColSet &col_set,\n\t\t\t   OpenList &open_list);\n  };  \n\n  struct OutOfTimeError : public std::exception{\n    const char * what () const throw(){\n      return \"Out of Time\";\n    }\n  };\n\n  struct NoSolutionError : public std::exception{\n    const char * what () const throw(){\n      return \"No Solution\";\n    }\n  };\n\n};\n\n#endif\n"
  },
  {
    "path": "od_mstar3/od_mstar.py",
    "content": "\"\"\"Implementation of subdimensional expansion using operator\ndecomposition instead of vanilla A*, with better graph abstraction.\nAll coordinates are to be tuples and all collision sets are to be lists\nof immutable sets (frozenset). This partial rewrite will focus on\nconverting everything that can be immutable into an immutable structure\n\nIntended to support both mstar and rMstar.\"\"\"\n\n\nfrom od_mstar3 import workspace_graph\nimport sys\nimport time as timer  # So that we can use the time command in ipython\nfrom od_mstar3 import SortedCollection\nfrom od_mstar3.col_set_addition import add_col_set_recursive, add_col_set\nfrom od_mstar3.col_set_addition import effective_col_set\nfrom od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError, col_set_add\ntry:\n    import ipdb as pdb\nexcept ImportError:\n    # Default to pdb\n    import pdb\n\n\nMAX_COST = workspace_graph.MAX_COST\nPER_ROBOT_COST = 1  # Cost a robot accrues for not being at its goal position\nPOSITION = 0\nMOVE_TUPLE = 1  # Tuple of destination coordinate tuples for each robot's move\n\nglobal_move_list = []  # Used for visualization\n\n\ndef find_path(obs_map, init_pos, goals, recursive=True, inflation=1.0,\n              time_limit=5 * 60.0, astar=False, get_obj=False, connect_8=False,\n              full_space=False, return_memory=False, flood_fill_policy=False,\n              col_checker=None, epemstar=False, makespan=False,\n              col_set_memory=True):\n    \"\"\"Finds a path in the specified obstacle environment from the\n    initial position to the goal.\n\n    obs_map           - obstacle map,  matrix with 0 for free,  1 for\n                        obstacle\n    init_pos          - ((x1, y1), (x2, y2), ...) coordinates of the\n                        initial state, should be tuples\n    goals             - ((x1, y1), (x2, y2), ...) coordinates of the goal\n                        should be tuples\n    recursive         - True for rM*,  false for basic M*\n    inflation         - factor by which the metric will be inflated\n    time_limit        - how long to run before raising an error\n                        (declaring timeout)\n    astar             - use basic A* instead of operator decomposition to\n                        search the graph produced by M* (i.e. run M* not\n                        ODM*)\n    get_obj           - Return the Od_Mstar instance used in path\n                        planning, default False\n    connect_8         - True (default) for 8 connected graph,  False for\n                        4 connected graph\n    full_space        - If True,  run pure A* or OD (depending on the\n                        astar flag) instead of subdimensional expansion.\n                        Default False\n    return_memory     - Returns information on memory useage.\n                        Default False\n    flood_fill_policy - compute policy with flood fill instead of\n                        resumable A*\n    col_checker       - Optional custom collision checker object,  used\n                        for searching non-grid graphs.  Default None\n    epemstar          - Use EPEA* to search the graph rather than A* or\n                        OD\n    makespan          - minimize makespan (time to solution),\n                        instead of minimizing time robots spend away\n                        from their robots\n    col_set_memory    - remember previous step collision set, intended\n                        to provide more efficient cached path\n                        utillization.  True by default\n    \"\"\"\n    global global_move_list\n    if (col_checker is None or isinstance(col_checker,\n                                          workspace_graph.Edge_Checker)):\n        goals = tuple(map(tuple, goals))\n        init_pos = tuple(map(tuple, init_pos))\n    global_move_list = []\n    o = Od_Mstar(obs_map, goals, recursive=recursive, inflation=inflation,\n                 astar=astar, connect_8=connect_8, full_space=full_space,\n                 flood_fill_policy=flood_fill_policy, col_checker=col_checker,\n                 epeastar=epemstar, makespan=makespan,\n                 col_set_memory=col_set_memory)\n    # Need to make sure that the recursion limit is great enough to\n    # actually construct the path\n    longest = max([o.sub_search[(i, )].get_cost(init_pos[i])\n                   for i in range(len(init_pos))])\n    # Guess that the longest path will not be any longer than 5 times the\n    # longest individual robot path\n    sys.setrecursionlimit(max(sys.getrecursionlimit(), longest * 5 *\n                              len(init_pos)))\n    path = o.find_path(init_pos, time_limit=time_limit)\n    num_nodes = o.get_memory_useage(False)\n    corrected_mem = o.get_memory_useage(True)\n    if get_obj:\n        return path, o\n    if return_memory:\n        return path, num_nodes, corrected_mem\n    return path\n\n\nclass Od_Mstar(object):\n    \"\"\"Implements M* and rM* using operator decomposition instead of\n    basic M* as the base computation.\n\n    \"\"\"\n    def __init__(self, obs_map, goals, recursive, sub_search=None,\n                 col_checker=None, rob_id=None, inflation=1.0,\n                 end_time=10 ** 15, connect_8=False, astar=False,\n                 full_space=False, flood_fill_policy=False, epeastar=False,\n                 offset_increment=1, makespan=False, col_set_memory=False):\n        \"\"\"\n        obs_map           - obstacle map,  matrix with 0 for free,  1\n                            for obstacle\n        goals             - ((x1, y1), (x2, y2), ...) coordinates of the\n                            goal, should be tuples\n        recursive         - True for rM*, false for basic M*\n        sub_search        - Sub planners, should be None for the full\n                            configuration space\n        col_checker       - object to handle robot-robot collision\n                            checking.  Should implement the same\n                            interface as workspace_graph.Edge_Checker\n        rob_id            - maps local robot identity to full\n                            configuration space identity,  should be\n                            None for the full configuration space\n                            instance\n        inflation         - how much the metric should be inflated by\n        end_time          - when the search should be terminated\n        connect_8         - True for 8 connected graph,  False for 4\n                            connected graph\n        astar             - use basic A* instead of operator\n                            decomposition\n        full_space        - whether to perform a full configuration\n                            space search\n        flood_fill_policy - compute policy with flood fill instead of\n                            resumable A*\n        epeastar          - Uses EPEA* instead of OD or A* for graph\n                            search\n        offset_increment  - how much to increase the EPEA* offset after\n                            every expansion\n        makespan          - minimize makespan (time to solution),\n                            instead of minimizing time robots spend away\n                            from their robots\n        col_set_memory    - remember previous step collision set,\n                            intended to provide more efficient cached\n                            path utillization.  False by default\n        \"\"\"\n        # visualize - turn on visualization code - DISABLED\n        self.obs_map = obs_map\n        self.recursive = recursive\n        self.sub_search = sub_search\n        # Stores the global ids of the robots in order of their position\n        # in coord\n        self.rob_id = rob_id\n        self.goals = goals\n        # Graph that holds the graph representing the joint configuration space\n        self.graph = {}\n        self.end_time = end_time\n        self.inflation = float(inflation)\n        self.connect_8 = connect_8\n        self.astar = astar\n        self.epeastar = epeastar\n        self.offset_increment = offset_increment\n        self._makespan = makespan\n\n        # Store some useful values\n        self.updated = 0\n        self.num_bots = len(goals)\n        # self.visualize = visualize\n        self.full_space = full_space\n        # Need a different key incorporating the offset for EPEM*\n        if self.epeastar:\n            self.open_list_key = lambda x: (-x.cost - x.h * self.inflation -\n                                            x.offset)\n        else:\n            self.open_list_key = lambda x: -x.cost - x.h * self.inflation\n        if self.rob_id is None:\n            self.rob_id = tuple(range(len(goals)))\n        self.col_checker = col_checker\n        if self.col_checker is None:\n            self.col_checker = workspace_graph.Edge_Checker()\n        self.flood_fill_policy = flood_fill_policy\n        # Making everything that can be immutable,  immutable\n        self._col_set_memory = col_set_memory\n        self.gen_policy_planners(sub_search, self.obs_map, self.goals)\n\n    def gen_policy_planners(self, sub_search, obs_map, goals):\n        \"\"\"Creates the sub-planners and necessary policy keys.  This is\n        because pretty much every sub-planner I've made requires\n        adjusting the graph used to create the policies and passing\n        around dummy sub_searches\n\n        side effects to generate self.sub_search and self.policy_keys\n        \"\"\"\n        self.policy_keys = tuple([(i, ) for i in self.rob_id])\n        self.sub_search = sub_search\n        if self.sub_search is None:\n            self.sub_search = {}\n            # Wrapping the robot number in a tuple so we can use the same\n            # structure for planners for compound robots\n            if self.flood_fill_policy:\n                for dex, key in enumerate(self.policy_keys):\n                    self.sub_search[key] = workspace_graph.Workspace_Graph(\n                        obs_map, goals[dex], connect_8=self.connect_8)\n            else:\n                for dex, key in enumerate(self.policy_keys):\n                    self.sub_search[key] = workspace_graph.Astar_Graph(\n                        obs_map, goals[dex], connect_8=self.connect_8,\n                        makespan=self._makespan)\n\n    def get_graph_size(self, correct_for_size=True):\n        \"\"\"Returns the number of nodes in the current graph\"\"\"\n        if correct_for_size:\n            return len(self.graph) * len(self.rob_id)\n        return len(self.graph)\n\n    def get_memory_useage(self, correct_for_size=True):\n        \"\"\"Returns the total number of nodes allocated in this planner\n        and any subplanners.\n        \"\"\"\n        temp_sum = self.get_graph_size(correct_for_size)\n        for i in self.sub_search:\n            temp_sum += self.sub_search[i].get_graph_size()\n        return temp_sum\n\n    def reset(self):\n        \"\"\"resets the map for later searches,  does not remove\n        forwards_pointer\n        \"\"\"\n        self.updated += 1\n\n    def heuristic(self, coord, standard_node):\n        \"\"\"Returns the heuristic value of the specified coordinate.\n\n        Does not handle inflation naturally so we can update the\n        heuristic properly\n\n        coord         - coordinate of the node at which to compute the\n                        heuristic\n        standard_node - whether this is a standard node\n        \"\"\"\n        if standard_node:\n            cost = sum(self.sub_search[key].get_cost(coord[dex])\n                       for dex, key in enumerate(self.policy_keys))\n            # return self.inflation * cost\n            return cost\n        else:\n            # Compute heuristic for robots which have moved\n            cost = sum(self.sub_search[key].get_cost(coord[MOVE_TUPLE][dex])\n                       for dex, key in enumerate(\n                           self.policy_keys[:len(coord[MOVE_TUPLE])]))\n            # compute heuristic for robots which have not moved\n            cost += sum(self.sub_search[key].get_cost(\n                coord[POSITION][dex + len(coord[MOVE_TUPLE])])\n                for dex, key in enumerate(self.policy_keys[len(coord[\n                    MOVE_TUPLE]):]))\n            return cost\n\n    def pass_through(self, coord1, coord2):\n        \"\"\"Tests for a collision during transition from coord 1 to coord\n        2.\n\n        coord1, coord2 - joint coordinates of multirobot system\n\n        returns:\n\n        collision set for the edge,  empty list if there are no\n        collisions\n        \"\"\"\n        # return self.col_checker.pass_through(coord1, coord2, self.recursive)\n        return self.col_checker.cross_over(coord1, coord2, self.recursive)\n\n    def incremental_col_check(self, start_coord, new_coord):\n        \"\"\"Performs an incremental collision check for new coord.\n\n        Assumes that the position of a single new robot has been added to\n        a list of coordinates of robots.  Checks whether adding this new\n        robot will lead to a collision.  Start coord is the joint state\n        before the action being built in new_coord,  and may contain more\n        robots than new_coord. counts on the implementation of the\n        incremental collision checks to be intelligent to avoid issues\n\n        start_coord - coordinate at which the system starts\n        new_coord   - coordinate to which the system moves\n\n        returns:\n\n        collision_set formed form the colliding robots during the move\n        \"\"\"\n        col_set = self.col_checker.incremental_cross_over(\n            start_coord, new_coord, self.recursive)\n        if col_set:\n            return col_set\n        return self.col_checker.incremental_col_check(\n            new_coord, self.recursive)\n\n    def get_node(self, coord, standard_node):\n        \"\"\"Returns the node at the specified coordinates.\n\n        Remember intermediate nodes are of the form\n        (base_coord, move_tuple)\n\n        coord         - coordinates of the node,  potentially an\n                        intermediate node\n        standard_node - whether this is a standard node or an\n                        intermediate node\n        \"\"\"\n        if coord in self.graph:\n            # Node already exists.  reset if necessary\n            t_node = self.graph[coord]\n            t_node.reset(self.updated)\n            return t_node\n        # Need to instantiate the node\n        if standard_node:\n            col = self.col_checker.col_check(coord, self.recursive)\n        else:\n            # Only check for collisions between robots whose move has\n            # been determined\n            col = self.col_checker.col_check(coord[MOVE_TUPLE], self.recursive)\n        free = (len(col) == 0)\n        t_node = mstar_node(coord, free, self.recursive, standard_node)\n        # Cache the resultant col_set\n        t_node.col_set = col\n        t_node.updated = self.updated\n        t_node.h = self.heuristic(coord, standard_node)\n        # Add the node to the graph\n        self.graph[coord] = t_node\n        return t_node\n\n    def get_step(self, init_pos, standard_node=True):\n        \"\"\"Get the optimal step from init_pos.\n\n        Computes the entire optimal path if necessary, but preferentially\n        relying on the cached paths stored in mstar_node.forwards_ptr.\n\n        init_pos      - coordinate of the node to compute the step from\n        standard_node - standard_node whether init_pos represents a\n                        standard node\n\n        returns:\n\n        coordinate of the optimal step towards the goal\n        \"\"\"\n        cur_node = self.get_node(init_pos, standard_node)\n        temp = cur_node.get_step()\n        if temp is not None:\n            return temp\n        # Use a zero time limit,  so the end time will not be modified\n        path = self.find_path(init_pos, time_limit=-1)\n        return cur_node.get_step()\n\n    def gen_init_nodes(self, init_pos):\n        \"\"\"Generate the initial search nodes.\n\n        Potentially more than one node is generated, but in practice\n        will usually just one will be generated\n\n        init_pos - initial position\n\n        returns:\n\n        list of initial nodes\n        \"\"\"\n        first = self.get_node(init_pos, True)\n        first.open = True\n        first.cost = 0\n        first.back_ptr = first\n        return [first]\n\n    def find_path(self, init_pos, time_limit=5 * 60):\n        \"\"\"Finds a path from init_pos to the goal specified when self\n        was instantiated.\n\n        init_pos   - ((x1, y1), (x2, y2), ...) coordinates of initial\n                     position\n        time_limit - time allocated to find a solution.  Will raise an\n                     exception if a path cannot be found within this time\n                     period\n        \"\"\"\n        self.reset()\n        if time_limit > 0:\n            self.end_time = timer.time() + time_limit\n            # For replanning to work correctly, need to update the end\n            # time for all subplanners.  Otherwise, the end time of the\n            # subplanners will never be updated, so if you make a query\n            # more than the original time_limit seconds after the first\n            # query to this object, you will always get a timeout,\n            # regardless of the time limit used on the second query\n            for planner in self.sub_search.values():\n                if hasattr(planner, 'end_time'):\n                    planner.end_time = self.end_time\n\n        # Configure the goal node\n        goal_node = self.get_node(self.goals, True)\n        goal_node.forwards_ptr = goal_node\n        # Use the negation of the cost,  so SortedCollection will put the\n        # lowest value item at the right of its internal list\n        init_nodes = self.gen_init_nodes(init_pos)\n        open_list = SortedCollection.SortedCollection(init_nodes,\n                                                      key=self.open_list_key)\n\n        while len(open_list) > 0:\n            if timer.time() > self.end_time:\n                raise OutOfTimeError(timer.time())\n            node, consistent = open_list.consistent_pop()\n            if not consistent:\n                continue\n            node.open = False\n            if self.solution_condition(node):\n                path = node.get_path()\n                return tuple(path)\n            self.expand(node, open_list)\n        raise NoSolutionError()\n\n    def solution_condition(self, node):\n        \"\"\"Checks whether we have finished finding a path when node has\n        been reached\n\n        Checks whether node.forwards_ptr indicates that a path to the\n        goal has been found\n\n        node - node to check for indicating a path to the goal\n\n        returns:\n\n        True if goal has been reached or a cached path to the goal has\n        been reached, else False\n        \"\"\"\n        if node.forwards_ptr is not None:\n            return True\n\n        return False\n\n    def expand(self, node, open_list):\n        \"\"\"Handles the expansion of the given node and the addition of\n        its neighbors to the open list\n\n        node      - node to expand\n        open_list - open list used during the search\n        \"\"\"\n        node.closed = True\n        # ASSUMES THAT get_neighbors HANDLES UPDATING NEIGHBOR COST,\n        # AND DOES NOT RETURN NEIGHBORS FOR WHICH THERE IS ALREADY A\n        # PATH AT LEAST AS GOOD\n        if self.recursive:\n            neighbors,  col_set = self.get_neighbors_recursive(node)\n        else:\n            neighbors,  col_set = self.get_neighbors_nonrecursive(node)\n\n        # node is the only element in the backpropagation sets of\n        # neighbors that has changed,  so we can backpropagate from here\n        old_col_set = node.col_set\n        if not self.full_space:\n            node.back_prop_col_set(col_set, open_list, epeastar=self.epeastar)\n        for i in neighbors:\n            i.back_ptr = node\n            # Even if the node is already in the open list,  removing if\n            # from its old position (given by the old cost value) is too\n            # expensive, requiring an O(N) operation to delete.  Simply\n            # add the new value and reject the old copy (which will be\n            # marked as closed),  when you come to it\n            i.open = True\n            open_list.insert_right(i)\n        if self.epeastar:\n            # if running epeastar\n            if old_col_set == node.col_set:\n                # If the collision set changed,  then adding the node\n                # back to the open list with properly updated collision\n                # set has been handled by the backprop function\n                node.offset += self.offset_increment\n                open_list.insert(node)\n\n    def od_mstar_neighbors(self, node):\n        \"\"\"Generates the free neighbors of the given node for the\n        non-recursive case, using operator decomposition\n\n        Also returns the associated collision set due to neighbors\n        which are non-free due to robot-robot collisions.  Only returns\n        nodes which can be most cheaply reached through node\n\n        node - node to determine neighbors\n\n        returns:\n\n        (neighbors, col_set)\n        neighbors - collision free neighbors which can most efficiently\n                    be reached from node\n        col_set   - collision set for neighbors which are not collision\n                    free\n        \"\"\"\n        col_set = ()\n        if not node.free:\n            # Can't have an out neighbor for a node in collision\n            return col_set, node.col_set\n        rob_dex = 0  # Keeps track of the robot to move in this step\n\n        # split the coordinates into the start coordinate and the move\n        # list if the node is standard,  doing this so variables are\n        # initialized in  the preferred namespace,  which is probably not\n        # necessary\n        move_list = ()\n        start_coord = node.coord\n        if not node.standard_node:\n            start_coord = node.coord[POSITION]\n            move_list = node.coord[MOVE_TUPLE]\n            rob_dex = len(node.coord[MOVE_TUPLE])\n        if ((len(node.col_set) > 0 and rob_dex in node.col_set[0]) or\n                self.full_space):\n            # This robot is in the collision set,  so consider all\n            # possible neighbors\n            neighbors = self.sub_search[\n                self.policy_keys[rob_dex]].get_neighbors(start_coord[rob_dex])\n        else:\n            neighbors = [self.sub_search[self.policy_keys[rob_dex]].get_step(\n                start_coord[rob_dex])]\n        # check if this is the last robot to be moved\n        filled = (rob_dex == (self.num_bots - 1))\n\n        new_neighbors = []\n        # visualize_holder = []\n        for i in neighbors:\n            # Generate the move list with the new robot position\n            new_moves = list(move_list)\n            new_moves.append(i)\n            new_moves = tuple(new_moves)\n            # Check for collisions in the transition to the new\n            # position, only need to consider the robots in the move list\n            # pass through\n            pass_col = self.pass_through(start_coord[:rob_dex + 1], new_moves)\n            if len(pass_col) > 0:\n                # Have robot-robot collisions\n                col_set = col_set_add(pass_col, col_set, self.recursive)\n                continue\n            # Need to branch on whether we have filled the move list\n            if filled:\n                # Generate a standard node.  Static collisions are found\n                # in self.get_node()\n                new_node = self.get_node(new_moves, True)\n            else:\n                # Generate an intermediate node\n                new_node = self.get_node((start_coord, new_moves), False)\n            if node not in new_node.back_prop_set:\n                new_node.back_prop_set.append(node)\n            # Always need to add the col_set of any vertex that we can\n            # actually reach,  as otherwise,  we would need to wait for\n            # another robot to collide downstream of the reached vertex\n            # before that vertex would back propagate its col_set\n            col_set = col_set_add(new_node.col_set, col_set, self.recursive)\n            if not new_node.free:\n                continue\n            # Skip if closed\n            if new_node.closed:\n                continue\n            # Handle costs, which depends soely on the move list,\n            # function to allow for alternate cost functions\n            temp_cost = self.od_mstar_transition_cost(start_coord, node.cost,\n                                                      i, rob_dex)\n            if temp_cost >= new_node.cost:\n                continue\n            new_node.cost = temp_cost\n            new_neighbors.append(new_node)\n            # Set the intermediate nod's col_set equal to its parent,\n            # so later elements will actually be explored.  Not\n            # technically required but will cut back on thrashing\n            if not new_node.standard_node:\n                new_node.add_col_set(node.col_set)\n        return new_neighbors, col_set\n\n    def od_mstar_transition_cost(self, start_coord, prev_cost, neighbor,\n                                 rob_dex):\n        \"\"\"Computes the transition cost for a single robot in od_mstar\n        neighbor generation\n\n        start_coord - base position of robots (prior to move assignment)\n        prev_cost   - cost of base node\n        neighbor    - proposed move assignmetn\n        rob_dex     - robot move is assigned to\n\n        returns:\n\n        cost of a single robot transitioning state\n        \"\"\"\n        prev_cost += self.sub_search[self.policy_keys[rob_dex]].get_edge_cost(\n            start_coord[rob_dex], neighbor)\n        return prev_cost\n\n    def gen_epeastar_coords(self, node):\n        \"\"\"Helper function for generating neighbors of a node using EPEA*\n\n        Uses a two step process. First the incremental costs are\n        computed, then the neighbors fitting those incremental costs.\n        More directly matches what was done in the EPEA* paper.  Performs\n        incremental collision checking during the generation of\n        neighbors,  to prune out as many invalid nodes as early as\n        possible\n\n        node - node for which to generate neighbors\n        \"\"\"\n        adder = add_col_set\n        if self.recursive:\n            adder = add_col_set_recursive\n        offset = node.offset\n        coord = node.coord\n        if len(node.col_set) == 0:\n            # have empty collision set\n            new_coord = tuple(\n                self.sub_search[self.policy_keys[dex]].get_step(\n                    coord[dex]) for dex in range(self.num_bots))\n            pass_col = self.pass_through(coord, new_coord)\n            if pass_col:\n                return [], pass_col\n            col = self.col_checker.col_check(new_coord, self.recursive)\n            if col:\n                return [], col\n            return [new_coord], []\n        search_list = [(0, ())]\n        assert len(node.col_set) == 1\n        node_col = node.col_set[0]\n        for rob_dex in range(self.num_bots):\n            if rob_dex in node_col:\n                offsets = self.sub_search[\n                    self.policy_keys[rob_dex]].get_offsets(coord[rob_dex])\n            else:\n                offsets = (0, )\n            new_list = []\n            for cost, pos in search_list:\n                for off in offsets:\n                    if rob_dex < self.num_bots - 1:\n                        if off + cost <= offset:\n                            new_list.append((off + cost, pos + (off, )))\n                    elif off + cost == offset:\n                        # For the last robot,  only want to keep costs which\n                        # match perfectly\n                        new_list.append((off + cost, pos + (off, )))\n                search_list = new_list\n        neighbors = []\n        col_set = []\n        for offset, costs in search_list:\n            gen_list = [()]\n            for dex, c in enumerate(costs):\n                if dex in node_col:\n                    neib = (self.sub_search[\n                            self.policy_keys[dex]].get_offset_neighbors(\n                            coord[dex], c))\n                else:\n                    neib = ((0, self.sub_search[\n                        self.policy_keys[dex]].get_step(coord[dex])),)\n                new_list = []\n                for _, n in neib:\n                    for old in gen_list:\n                        new_coord = old + (n, )\n                        # Perform collision checking\n                        tcol = self.incremental_col_check(coord, new_coord)\n                        if tcol:\n                            col_set = adder(col_set, tcol)\n                            continue\n                        new_list.append(new_coord)\n                gen_list = new_list\n            neighbors.extend(gen_list)\n        return neighbors, col_set\n\n    def get_epeastar_neighbors(self, node):\n        \"\"\"Generates the free neighbors of the given node for the\n        non-recursive case.\n\n        Also returns the associated collision set due to neighbors\n        which are non-free due to robot-robot collisions.  Only returns\n        nodes which can be most cheaply reached through node\n\n        node - node to be expanded\n\n        returns:\n        (neighbors, col_set)\n        neighbors - neighbors that can most be efficiently reached from\n                    node, that are collision free\n        col_set   - collisions incurred when trying to reach\n                    non-collision free nodes\n        \"\"\"\n        if not node.free:\n            # Can't have an out neighbor for a node in collision\n            return [], node.col_set\n        start_coord = node.coord\n        neighbor_coords, col_set = self.gen_epeastar_coords(node)\n        neighbors = []\n        for i in neighbor_coords:\n            new_node = self.get_node(i, True)\n            if node not in new_node.back_prop_set:\n                new_node.back_prop_set.append(node)\n            if not new_node.free:\n                continue\n            # update costs\n            if new_node.closed:\n                continue\n            t_cost = self.epeastar_transition_cost(start_coord, node.cost, i)\n            if t_cost < new_node.cost:\n                new_node.cost = t_cost\n                new_node.back_ptr = node\n                neighbors.append(new_node)\n        return neighbors, col_set\n\n    def epeastar_transition_cost(self, start_coord, prev_cost, new_coord):\n        \"\"\"Computes the cost of a new node at the specified coordinates,\n        starting from the given position and cost\n\n        start_coord - node at which the system starts\n        prev_cost   - cost of the node at start_coord\n        new_coord   - destination node\n        \"\"\"\n        for dex, (source, target) in enumerate(zip(start_coord, new_coord)):\n            prev_cost += self.sub_search[self.policy_keys[dex]].get_edge_cost(\n                source, target)\n        return prev_cost\n\n    def get_neighbors_nonrecursive(self, node):\n        \"\"\"Generates neighbors using a non-recursive method.  Note that\n        collision sets will still be generated in the style specified by\n        self.recursive\n\n        node - node for which to generate neighbors\n        \"\"\"\n        if self.astar:\n            return self.get_astar_neighbors(node)\n        elif self.epeastar:\n            return self.get_epeastar_neighbors(node)\n        return self.od_mstar_neighbors(node)\n\n    def create_sub_search(self, new_goals, rob_id):\n        \"\"\"Creates a new instance of a subsearch for recursive search\n\n        new_goals - goals for the subset of the robots\n        rob_ids   - ids of the robots involved in the subsearch\n\n        returns:\n\n        new OD_Mstar instance to perform search for the specified subset\n        of robots\"\"\"\n        return Od_Mstar(self.obs_map, new_goals, self.recursive,\n                        sub_search=self.sub_search,\n                        col_checker=self.col_checker, rob_id=rob_id,\n                        inflation=self.inflation,\n                        end_time=self.end_time, connect_8=self.connect_8,\n                        astar=self.astar, full_space=self.full_space,\n                        epeastar=self.epeastar, makespan=self._makespan,\n                        col_set_memory=self._col_set_memory)\n\n    def get_subplanner_keys(self, col_set):\n        \"\"\"Returns keys to subplanners required for planning for some\n        subset of robots.\n\n        col_set - collision set to be solved\n\n        returns:\n\n        keys for the necessary subplanners in self.sub_search\n        \"\"\"\n        # Convert the collision sets into the global indicies,  and\n        # convert to tuples.  Assumes self.rob_id is sorted\n        global_col = list(map(lambda y: tuple(map(lambda x: self.rob_id[x], y)),\n                         col_set))\n        # generate the sub planners,  if necessary\n        for dex, gc in enumerate(global_col):\n            if gc not in self.sub_search:\n                t_goals = tuple([self.goals[k] for k in col_set[dex]])\n                self.sub_search[gc] = self.create_sub_search(t_goals, gc)\n        return global_col\n\n    def get_neighbors_recursive(self, node):\n        \"\"\"Get the neighbors of node for recursive M*.\n\n        Uses operator decomposition style expansion when necessary,  may\n        fail when called on an intermediate node\n\n        node - node for which to generate neighbors\n\n\n        returns:\n        (neighbors, col_set)\n        neighbors - list of coordinates for neighboring, reachable\n                    nodes\n        col_set   - collisions generated by trying to transition to\n                    non-reachable neighbors\n        \"\"\"\n        # Handle collision set memory if necessary\n        # use_memory = False\n        if self._col_set_memory:\n            col_set = effective_col_set(node.col_set, node.prev_col_set)\n            effective_set = col_set\n            # if set(col_set) != set(node.col_set):\n            #     # using memory\n            #     use_memory = True\n            # Sort the collision set,  which also converts them into\n            # lists\n            col_set = list(map(sorted, col_set))\n        else:\n            # Sort the collision set,  which also converts them into lists\n            col_set = list(map(sorted, node.col_set))\n        # Use standard operator decomposition,  if appropriate\n        if len(col_set) == 1 and len(col_set[0]) == self.num_bots:\n            # At base of recursion case\n            return self.get_neighbors_nonrecursive(node)\n        start_coord = node.coord\n        if not node.standard_node:\n            assert False\n        # Generate subplanners for new coupled groups of robots and get\n        # their sub_search keys\n        coupled_keys = self.get_subplanner_keys(col_set)\n        # Generate the individually optimal step\n        new_coord = [self.sub_search[self.policy_keys[i]].get_step(\n            start_coord[i]) for i in range(self.num_bots)]\n        # Iterate over the colliding sets of robots,  and integrate the\n        # results of the sup planning for each set\n        for i in range(len(col_set)):\n            # if use_memory and frozenset(col_set[i]) in node.prev_col_set:\n                # assert self.sub_search[\n                #     coupled_keys[i]].graph[\n                #         tuple([start_coord[j]\n                #                for j in col_set[i]])].forwards_ptr != None\n            try:\n                new_step = self.sub_search[coupled_keys[i]].get_step(\n                    tuple([start_coord[j] for j in col_set[i]]))\n            except NoSolutionError:\n                # Can't get to the goal from here\n                return [], []\n            # Copy the step into position\n            for j in range(len(col_set[i])):\n                new_coord[col_set[i][j]] = new_step[j]\n\n        new_coord = tuple(new_coord)\n        # process the neighbor\n        pass_col = self.pass_through(start_coord, new_coord)\n        if len(pass_col) > 0:\n            # Have collisions before reaching node\n            return [], pass_col\n        new_node = self.get_node(new_coord, True)\n        if node not in new_node.back_prop_set:\n            new_node.back_prop_set.append(node)\n        if not new_node.free:\n            return [],  new_node.col_set\n        # Skip if closed\n        if new_node.closed:\n            return [],  new_node.col_set\n        # Compute the costs. THIS MAY NOT WORK IF node IS AN INTERMEDIATE\n        # NODE\n        t_cost = self.get_node(start_coord, True).cost\n        t_cost = self.od_rmstar_transition_cost(start_coord, t_cost,\n                                                new_node.coord)\n        if t_cost < new_node.cost:\n            new_node.cost = t_cost\n            if self._col_set_memory:\n                new_node.prev_col_set = effective_set\n            return [new_node], new_node.col_set\n        return [], new_node.col_set\n\n    def od_rmstar_transition_cost(self, start_coord, prev_cost, new_coord):\n        \"\"\"Computes the transition cost for a single robot in od_rmstar\n        neighbor generation\n\n        start_coord - base position of robots (prior to move assignment)\n        prev_cost   - cost of base node\n        new_coord    - proposed move assignmetn\n\n        returns:\n\n        total cost of reaching new_coord via start_coord\n        \"\"\"\n        for dex, (source, target) in enumerate(zip(start_coord, new_coord)):\n            prev_cost += self.sub_search[self.policy_keys[dex]].get_edge_cost(\n                source, target)\n        return prev_cost\n\n    def alt_get_astar_neighbors(self, node):\n        \"\"\"Gets neighbors of a specified node using the standard A*\n        approach.\n\n\n        assumes working with standard nodes\n\n        node - node for which to generate neighbors\n\n        returns:\n        (neighbors, col_set)\n        neighbors - coordinates of collision free neighboring nodes\n        col_set   - collisions resulting from trying to reach\n                    non-collision free neighbors\n        \"\"\"\n        start_coord = node.coord\n        # Generate the individually optimal setp\n        base_coord = [self.sub_search[self.policy_keys[i]].get_step(\n            start_coord[i]) for i in range(self.num_bots)]\n        old_coords = [base_coord]\n        assert len(node.col_set) <= 1\n        to_explore = node.col_set\n        if self.full_space:\n            to_explore = [range(self.num_bots)]\n        for i in to_explore:\n            for bot in i:\n                new_coords = []\n                neighbors = self.sub_search[self.policy_keys[bot]]\\\n                                .get_neighbors(start_coord[bot])\n                for neigh in neighbors:\n                    for k in old_coords:\n                        temp = k[:]\n                        temp[bot] = neigh\n                        new_coords.append(temp)\n                old_coords = new_coords\n        col_set = []\n        neighbors = []\n        old_coords = list(map(tuple, old_coords))\n        for i in old_coords:\n            # Check if we can get there\n            pass_col = self.pass_through(start_coord, i)\n            if len(pass_col) > 0:\n                col_set = col_set_add(pass_col, col_set, self.recursive)\n                continue\n            new_node = self.get_node(i, True)\n            col_set = col_set_add(new_node.col_set, col_set, self.recursive)\n            if node not in new_node.back_prop_set:\n                new_node.back_prop_set.append(node)\n            if not new_node.free:\n                continue\n            # update costs\n            if new_node.closed:\n                continue\n            t_cost = node.cost\n            for j in range(len(start_coord)):\n                t_cost += self.sub_search[self.policy_keys[j]].get_edge_cost(\n                    start_coord[i], new_node.coord[j])\n            if t_cost < new_node.cost:\n                new_node.cost = t_cost\n                new_node.back_ptr = node\n                neighbors.append(new_node)\n        return neighbors, col_set\n\n    def get_astar_neighbors(self, node):\n        \"\"\"Gets neighbors of a specified node using the standard A*\n        approach,\n\n        assumes working with standard nodes\n\n        node - node for which to generate neighbors\n\n        returns:\n        (neighbors, col_set)\n        neighbors - coordinates of collision free neighboring nodes\n        col_set   - collisions resulting from trying to reach\n                    non-collision free neighbors\n        \"\"\"\n        start_coord = node.coord\n        # Generate the individually optimal setp\n        base_coord = [self.sub_search[self.policy_keys[i]].get_step(\n            start_coord[i]) for i in range(self.num_bots)]\n        old_coords = [base_coord]\n        assert len(node.col_set) <= 1\n        to_explore = node.col_set\n        if self.full_space:\n            to_explore = [range(self.num_bots)]\n        for i in to_explore:\n            for bot in i:\n                new_coords = []\n                neighbors = self.sub_search[self.policy_keys[bot]]\\\n                                .get_neighbors(start_coord[bot])\n                for neigh in neighbors:\n                    for k in old_coords:\n                        temp = k[:]\n                        temp[bot] = neigh\n                        new_coords.append(temp)\n                old_coords = new_coords\n        col_set = []\n        neighbors = []\n        old_coords = list(map(tuple, old_coords))\n        for i in old_coords:\n            # First check if this path is relevant.  I.e. if there is already a\n            # better path to the node,  then the search will never try to use\n            # that route,  so we don't need to consider collisions\n            new_node = self.get_node(i, True)\n            if node.free:\n                t_cost = node.cost\n                for j in range(len(start_coord)):\n                    t_cost += self.sub_search[\n                        self.policy_keys[j]].get_edge_cost(start_coord[j],\n                                                           i[j])\n                if t_cost >= new_node.cost:\n                    continue\n            # Check if we can get there\n            pass_col = self.pass_through(start_coord, i)\n            if len(pass_col) > 0:\n                col_set = col_set_add(pass_col, col_set, self.recursive)\n                continue\n            new_node = self.get_node(i, True)\n            col_set = col_set_add(new_node.col_set, col_set, self.recursive)\n            if node not in new_node.back_prop_set:\n                new_node.back_prop_set.append(node)\n            if not new_node.free:\n                continue\n            # update costs\n            if new_node.closed:\n                continue\n            if t_cost < new_node.cost:\n                new_node.cost = t_cost\n                new_node.back_ptr = node\n                neighbors.append(new_node)\n        return neighbors, col_set\n\n\nclass mstar_node(object):\n    \"\"\"Holds the data needed for a single node in operator decomposition\n    m* coord should be a tuple of tuples.  Standard nodes have\n    coordinates of the form ((x1, y1), (x2, y2), ...),  while\n    intermediate nodes have coordinates of the form (((x1, y1), ...),\n    move_tuple)\n    \"\"\"\n\n    __slots__ = ['free', 'coord', 'updated', 'open', 'closed', 'standard_node',\n                 'h', 'cost', 'back_ptr', 'back_prop_set', 'col_set',\n                 'recursive', 'forwards_ptr', 'assignment', 'colset_changed',\n                 'offset', 'prev_col_set']\n\n    def __init__(self, coord, free, recursive, standard_node, back_ptr=None,\n                 forwards_ptr=None):\n        \"\"\"Constructor for mstar_node\n\n        Assumes the col_set is empty by default\n\n        coord         - tuple giving coordinates,  may store partial\n                        moves if not standard node\n        free          - part of the free configuration space\n        standard_node - represents a standard node,  and not a partial\n                        move\n        back_ptr      - pointer to best node to get to self\n        forwards_ptr  - pointer along the best path to the goal\n        \"\"\"\n        self.free = free\n        self.coord = coord\n        self.updated = -1\n        # Whether already in the open list\n        self.open = False\n\n        # Whether this has been expanded.  Note that a node can be added\n        # back to the open list after it has been expanded,  but will\n        # still be marked as closed.  It cannot have its cost changed,\n        # but it can add neighbors, but not be added as a neighbor\n        self.closed = False\n        self.standard_node = standard_node\n        # Heuristic cost to go,  None to ensure it will be properly\n        # calculated\n        self.h = None\n        # Cost to reach\n        self.cost = MAX_COST\n\n        # Optimal way to reach this node.  Point to self to indicate the\n        # initial position\n        self.back_ptr = back_ptr\n        self.back_prop_set = []  # Ways found to reach this node\n        self.col_set = ()\n        # store the collision set of back_ptr when the path from\n        # back_ptr to self was first found.  Used for hopefully more\n        # efficient cached path access\n        self.prev_col_set = ()\n        self.recursive = recursive\n\n        # Keeps track of solutions that have already been found,\n        # replace forwards_tree.  Denote the goal node by pointing\n        # forwards_ptr\n        # to itself\n        self.forwards_ptr = forwards_ptr\n        self.assignment = None  # Used for multiassignment mstar\n\n        # Used to track whether new assignments need to be generated for\n        # MURTY  mstar\n        self.colset_changed = False\n        # Tracks current offset for multiple re-expansion a la EPEA*\n        self.offset = 0\n\n    def reset(self, t):\n        \"\"\"Resets if t > last update time\"\"\"\n        if t > self.updated:\n            self.updated = t\n            self.open = False\n            self.closed = False\n            self.cost = MAX_COST\n            self.back_ptr = None\n            self.back_prop_set = []\n            self.offset = 0\n\n    def get_path(self):\n        \"\"\"Gets the path passing through path to the goal,  assumes that\n        self is either the goal node,  or a node connected to the goal\n        node through forwards_pointers\n        \"\"\"\n        path = self.backtrack_path()\n        return self.forwards_extend_path(path)\n\n    def backtrack_path(self, path=None, prev=None):\n        \"\"\"Finds the path that leads up to this node,  updating\n        forwards_ptr so that we can recover this path quickly,  only\n        returns standard nodes\n\n        path - current reconstructed path for use in recusion, must\n               start as None\n        prev - pointer to the last node visited by backtrack_path, used\n               to update forwards_ptr to record the best paths to the\n               goal\n        \"\"\"\n        if path is None:\n            path = []\n        if prev is not None:\n            self.forwards_ptr = prev\n            if isinstance(self.h, tuple):\n                # Needed for constrained od_mstar,  and don't feel like\n                # coming up with a better solution for now\n                self.h = (prev.h[0] + prev.cost[0] - self.cost[0], self.h[1])\n            else:\n                self.h = prev.h + (prev.cost - self.cost)\n        if self.standard_node:\n            assert self.coord not in path\n            path.insert(0, self.coord)\n        if self.back_ptr == self:\n            # Done so that it cannot terminate on a node that wasn't\n            # properly initialized\n            return path\n        return self.back_ptr.backtrack_path(path, self)\n\n    def forwards_extend_path(self, path):\n        \"\"\"Extends the path from self to the goal node,  following\n        forwards pointers,  only includes standard nodes\n\n        path - current path to extend towards the goal, as list of joint\n               configuration space coordinates\n        \"\"\"\n        if self.forwards_ptr == self:\n            return path\n        if self.forwards_ptr.standard_node:\n            path.append(self.forwards_ptr.coord)\n        return self.forwards_ptr.forwards_extend_path(path)\n\n    def add_col_set(self, c):\n        \"\"\"Adds the contents of c to self.col_set.\n\n        c - collision set to add to the current node's collision set\n\n        returns:\n\n        True if modifications were made, else False\n        \"\"\"\n        if len(c) == 0:\n            return False\n        if self.recursive:\n            temp = add_col_set_recursive(c, self.col_set)\n        else:\n            temp = add_col_set(c, self.col_set)\n        modified = (temp != self.col_set)\n        if modified:\n            self.col_set = temp\n            return True\n        return False\n\n    def back_prop_col_set(self, new_col, open_list, epeastar=False):\n        \"\"\"Propagates the collision dependencies found by its children\n        to the parent,  which adds any new dependencies to this col_set\n\n        new_col   - the new collision set to add\n        open_list - the open list to which nodes with changed collisoin\n                    sets are added,  assumed to be a SortedCollection\n        \"\"\"\n        further = self.add_col_set(new_col)\n        if further:\n            self.colset_changed = True\n            if not self.open:\n                # assert self.closed\n                self.open = True\n                # self.closed = False\n                self.offset = 0\n\n                # Inserting to the left of any node with the same key\n                # value,  to encourage exploring closer to the collison\n                open_list.insert(self)\n            elif epeastar and self.offset != 0:\n                # Need to reset the offset and reinsert to allow a path\n                # to be found even if the node is already in the open\n                # list\n                self.offset = 0\n                # Inserting to the left of any node with the same key\n                # value, to encourage exploring closer to the collison\n                open_list.insert(self)\n            for j in self.back_prop_set:\n                j.back_prop_col_set(self.col_set, open_list, epeastar=epeastar)\n\n    def get_step(self):\n        \"\"\"Returns the coordinate of the next standard node in the path,\n\n        returns:\n\n        None if no such thing\n        \"\"\"\n        if self.forwards_ptr is None:\n            return None\n        if self.forwards_ptr.standard_node:\n            return self.forwards_ptr.coord\n        else:\n            return self.forwards_ptr.get_step()\n\n\ndef individually_optimal_paths(obs_map, init_pos, goals):\n    \"\"\"Returns the individually optimal paths for a system\"\"\"\n\n    path = []\n    for i in range(len(init_pos)):\n        path.append(find_path(obs_map, [init_pos[i]], [goals[i]]))\n    # Need to convert to full space\n    max_length = max(list(map(len, path)))\n    for i in path:\n        while len(i) < max_length:\n            i.append(i[-1])\n    jpath = []\n    for i in range(max_length):\n        temp = []\n        for j in path:\n            temp.append(j[i][0])\n        jpath.append(temp)\n    return jpath\n\n\ndef find_path_limited_graph(obs_map, init_pos, goals, recursive=True,\n                            inflation=1.0, time_limit=5 * 60.0, astar=False,\n                            get_obj=False, connect_8=False, full_space=False,\n                            return_memory=False, flood_fill_policy=False,\n                            pruning_passes=5):\n    global global_move_list\n    global_move_list = []\n    o = Od_Mstar(obs_map, goals, recursive=recursive, inflation=inflation,\n                 astar=astar, connect_8=connect_8, full_space=full_space,\n                 flood_fill_policy=flood_fill_policy)\n    import prune_graph\n    G = prune_graph.to_networkx_graph(obs_map)\n    for i in range(pruning_passes):\n        G = prune_graph.prune_opposing_edge(G, num_edges=5)\n    # Replace the individual policies with limited graphs\n    for i in range(len(o.goals)):\n        o.sub_search[(i, )] = workspace_graph.Networkx_Graph(\n            obs_map, goals[i], graph=G, connect_8=connect_8)\n    # Need to make sure that the recursion limit is great enough to\n    # actually construct the path\n    longest = max([o.sub_search[(i, )].get_cost(init_pos[i])\n                   for i in range(len(init_pos))])\n    # Guess that the longest path will not be any longer than 5 times the\n    # longest individual robot path\n    sys.setrecursionlimit(max(sys.getrecursionlimit(), longest * 5 *\n                              len(init_pos)))\n    path = o.find_path(init_pos, time_limit=time_limit)\n    num_nodes = o.get_memory_useage(False)\n    corrected_mem = o.get_memory_useage(True)\n    if get_obj:\n        return path, o\n    # if visualize:\n    #     return path,  global_move_list\n    if return_memory:\n        return path, num_nodes, corrected_mem\n    return path\n"
  },
  {
    "path": "od_mstar3/od_vertex.hpp",
    "content": "#ifndef MSTAR_OD_VERTEX_H\n#define MSTAR_OD_VERTEX_H\n\n#include <limits>\n\n#include \"mstar_type_defs.hpp\"\n\nnamespace mstar{\n\n  struct OdVertex{\n    OdCoord coord;\n    ColSet col_set, gen_set; // Collision set and generating collision set\n    int updated; // last planning iteration used\n    bool closed, open;\n    double cost, h;\n    OdVertex* back_ptr; // optimal way to reach this\n    std::set<OdVertex*> back_prop_set; // all explored ways to reach this\n    OdVertex* forwards_ptr; // way to goal from this\n\n    OdVertex(OdCoord coord):\n      coord(coord), col_set(), updated(0), closed(false), open(false),\n      cost(std::numeric_limits<double>::max()), h(),\n      back_ptr(nullptr), back_prop_set(), forwards_ptr(nullptr)\n      {};\n\n    bool operator>=(const OdVertex &other) const{\n      return cost + h >= other.cost + other.h;\n    }\n\n    bool operator>(const OdVertex &other) const{\n      return cost + h > other.cost + other.h;\n    }\n\n    bool operator<=(const OdVertex &other) const{\n      return cost + h <= other.cost + other.h;\n    }\n\n    bool operator<(const OdVertex &other) const{\n      return cost + h < other.cost + other.h;\n    }\n\n    /**\n     * Resets a vertex used in a previous planning iteration\n     *\n     * @param t Current planning iteration\n     */\n    void reset(int t){\n      if (t > updated){\n\tupdated = t;\n\topen = false;\n\tclosed = false;\n\tcost = std::numeric_limits<double>::max();\n\tback_ptr = nullptr;\n\tback_prop_set = std::set<OdVertex *>();\n      }\n    }\n  };\n\n}\n\n#endif\n"
  },
  {
    "path": "od_mstar3/policy.cpp",
    "content": "#include <boost/graph/dijkstra_shortest_paths.hpp>\n#include <boost/graph/reverse_graph.hpp>\n\n#include \"policy.hpp\"\n\nusing namespace mstar;\n\n\nPolicy::Policy(const Graph &g, const RobCoord goal){\n  g_ = g;\n  costs_ = std::vector<double>(num_vertices(g_));\n  predecessors_.resize(boost::num_vertices(g_));\n\n  boost::dijkstra_shortest_paths(\n    boost::make_reverse_graph(g_), goal,\n    boost::predecessor_map(&predecessors_[0]).distance_map(&costs_[0]));\n  edge_weight_map_ = boost::get(boost::edge_weight_t(), g_);\n}\n\n\ndouble Policy::get_cost(RobCoord coord){\n  return costs_[coord];\n}\n\n\ndouble Policy::get_edge_cost(RobCoord u, RobCoord v){\n  // boost::edge returns pair<edge_descriptor, bool>\n  return boost::get(edge_weight_map_, boost::edge(u, v, g_).first);\n}\n\n\nstd::vector<RobCoord> Policy::get_out_neighbors(RobCoord coord){\n  std::vector<RobCoord> out;\n  for (auto adj_verts = boost::adjacent_vertices(coord, g_);\n       adj_verts.first != adj_verts.second; adj_verts.first++){\n    out.push_back(*(adj_verts.first));\n  }\n  return out;\n}\n\nRobCoord Policy::get_step(RobCoord coord){\n  return predecessors_[coord];\n}\n"
  },
  {
    "path": "od_mstar3/policy.hpp",
    "content": "#ifndef MSTAR_POLICY_H\n#define MSTAR_POLICY_H\n\n/****************************************************************************\n * Provides a wrapper for the Boost graphs\n ***************************************************************************/\n\n#include <iostream>\n#include <boost/graph/graph_traits.hpp>\n#include <boost/graph/adjacency_list.hpp>\n\n#include \"mstar_type_defs.hpp\"\n\n\nnamespace mstar{\n  /**\n   * Generates an individual policy for a robot to reach a specified goal\n   */\n  class Policy{\n  private:\n    Graph g_; // the boost graph this wraps\n    std::vector<double> costs_; // holds cost to goal from every configuration\n    boost::property_map<Graph, boost::edge_weight_t>::type edge_weight_map_;\n    std::vector<int> predecessors_;\n\n  public:\n    /**\n     * @param g The graph describing the workspace\n     * @param goal The goal coordinate of the robot\n     */\n    Policy(const Graph &g, const RobCoord goal);\n\n    /**\n     * Returns the cost-to-go from a vertex\n     * @param vert Vertex to query cost from\n     *\n     * @return the cost to go until the goal is reached\n     */\n    double get_cost(RobCoord coord);\n\n    /**\n     * Returns cost of traversing the edge (u, v)\n     *\n     * Does not check whether the edge exists\n     *\n     * @param u Source vertex of the edge\n     * @param v Destination vertex of the dge\n     *\n     * @return the cost of the edge\n     */\n    double get_edge_cost(RobCoord u, RobCoord v);\n\n    /**\n     * Returns the out-neighbors of a given coordinate\n     * @param coord Vertex to get out neighbors of\n     */\n    std::vector<RobCoord> get_out_neighbors(RobCoord coord);\n\n    /**\n     * Returns the successor of the specified coordinate\n     *\n     * @param coord coordinate to compute the successor thereof\n     *\n     * @return coordinate of next step\n     */\n    RobCoord get_step(RobCoord coord);\n  };\n}\n\n#endif\n"
  },
  {
    "path": "od_mstar3/prune_graph.py",
    "content": "from od_mstar3 import workspace_graph\nimport networkx as nx #Python network analysis module\n\n\n\ndef to_networkx_graph(obs_map):\n    '''Reads in a standard obs_map list and converts it to a networkx\n    digraph\n    obs_map - list of lists, 0 for empty cell, 1 for obstacle'''\n    #Create a workspace_graph object to generate neighbors\n    g = workspace_graph.Astar_Graph(obs_map,[0,0])\n    G = nx.DiGraph() #Creates the graph object\n    #Populate graph with nodes\n    for x in range(len(obs_map)):\n        for y in range(len(obs_map[x])):\n            if obs_map[x][y] == 0:\n                G.add_node((x,y))\n    #Add edges\n    for i in G.nodes():\n        #Stored nodes by their coordinates in G\n        for j in g.get_neighbors(i):\n            G.add_edge(i,j)\n    return G\n\ndef prune_opposing_edge(G,num_edges=1):\n    '''Reads in a networkx digraph and prunes the edge opposing the most\n    between (i.e. edge on the most shortest path connections).  If this edge\n    doesn't have an opposing edge, or if the removal of said edge would\n    reduce the connectivity of the space, the next most between edge is pruned\n    instead.  Since computing completeness can be expensive, allows multiple\n    edges to be pruned before computing the impact of said prunning on\n    completeness is computed'''\n    #Get the current number of strongly connected components, can't decrease\n    #without preventing some paths from being found\n    num_components = nx.number_strongly_connected_components(G)\n    pruned = 0\n    # print 'computing betweeness'\n    betweenness = nx.edge_betweenness_centrality(G)\n    # print 'betweenness computed'\n    while pruned < num_edges:\n        max_bet = max(betweenness.values())\n        if max_bet <= 0:\n            #Set betweeness to -1 if can't prune, set to 0 not between\n            return G\n        edge = betweenness.keys()[betweenness.values().index(max_bet)]\n        if not (edge[1],edge[0]) in G.edges():\n            #Already been pruned\n            betweenness[edge] = -1\n            # print 'no edge'\n            continue\n        #Test if pruning the edge will break connectivity\n        temp_graph = G.copy()\n        temp_graph.remove_edge(edge[1],edge[0])\n        if num_components == nx.number_strongly_connected_components(temp_graph):\n            #Can safely prune this edge\n            G = temp_graph\n            pruned+=1\n            betweenness[edge] = -1\n            betweenness.pop((edge[1],edge[0]))\n            # print 'pruned'\n            #Need to prevent further edges from being pruned from this vertex\n            for neighbor in G.neighbors(edge[1]):\n                betweenness[(edge[1],neighbor)] = -1\n        else:\n            betweenness[edge] = -1\n            # print 'breaks con %s' %(str(edge))\n    return G\n"
  },
  {
    "path": "od_mstar3/setup.py",
    "content": "from distutils.core import setup, Extension\nfrom Cython.Build import cythonize\n\nsetup(ext_modules = cythonize(Extension(\n           \"cpp_mstar\",                                \n           sources=[\"cython_od_mstar.pyx\"], \n           extra_compile_args=[\"-std=c++11\"]\n      )))\n"
  },
  {
    "path": "od_mstar3/workspace_graph.py",
    "content": "\"\"\"\nworkspace_graph.py\n\nThis module defines all of the classes for the low-level graphs and\npolicies used in Mstar. In general terms, these classes represent:\n\n    1.  Graphs representing the configuration space.  These graphs are\n        structured so that each node in the graph represents a\n        configuration, and each edge represents a permissible transition\n        between two different configurations.\n\n        *All of these graphs subclass the Graph_Interface class\n\n    2.  Policies, which define paths in a configuration space from an\n        initial configuration to a goal configuration.  Policies are\n        comprised of nodes, each of which represents a configuration\n        in the configuration space.  Each node in a policy has a pointer\n        to its optimal neighbor, i.e., the next node in the optimal path\n        to the goal node.  Policy classes compute optimal paths by using\n        some search algorithm to search the graphs generated in the\n        classes described above.\n\n        *All of these graphs subclass the Policy_Interface class\n\nThere are specific implementations of policies and classes within this\nmodule.  These are:\n\n    1.  Grid_Graph and Grid_Graph_Conn_8: These subclass Graph_Interface\n        and are used to represent simple configuration spaces in a\n        2-dimensional grid.  Each point on the grid is delegated with\n        either a zero or a one, to represent a free space or an\n        object in that location, respectively.  Grid_Graph specifies\n        a configuration space with 4 connectivity; i.e., each robot\n        can only go to the space immediately above its current position,\n        below its current position, or to the left or right of its\n        current position.  Grid_Graph_Conn_8 specifies a configuration\n        space with all the moves described in Grid_Graph, but with\n        additional options options of moving diagonally.\n\n    2.  Flood_Fill_Policy:  This subclasses Policy_Interface and\n        generates an optimal path to a goal configuration by using a\n        flood fill.  This method of policy generation relies on a series\n        of pointers between nodes to generate a policy.  It starts\n        with the goal node on an open list.  At each step, the\n        algorithm pops a node off of the open list and calculates its\n        neighbors, appending them to the open list.  It iterates through\n        the generated neighbors and checks to see if they should point\n        to the popped node, based on the popped node's cost and their\n        own cost.  If they should, their pointer is changed and cost is\n        updated.  Eventually, the algorithm finds the starting node, and\n        an optimal policy has been generated.\n\n        2.1 To reduce the amount of code that has to be copied each\n            time a new workspace is generated, actions that deal with\n            the workspace itself (rather than the configuration graph)\n            are passed into Flood_Fill_Policy as functions\n\n    3.  Astar_Policy:  This subclasses Policy_Interface and\n        generates  an optimal policy to a goal configuration by using\n        the A* search algorithm.  A* uses a Best-First Search approach\n        to generate optimal paths in lower-order average time than flood\n        fill.\n\n        3.1 To reduce code that needs to copied for each new workspace,\n            a scheme similar to that described in 2.1 has also\n            been implemented in Astar.\n\n    4.  Priority_Graph:  This subclasses Policy_Interface and\n        generates an optimal policy to a goal configuration using\n        an Astar_Policy graph.  However, Priority_Graph also adds\n        a time slot to each coordinate.  This way, routes can be planned\n        for time in addition to space.\n\n    5.  Back_Priority_Graph:  This subclasses Priority_Graph and\n        generates an optimal policy to a goal configuration. Differs\n        from Priority Graph in that time dynamics are configured for\n        planning backwards in time.\n\nFinally, an Edge_Checker class is implemented in the bottom of this\nmodule.  This class checks for collisions occurring when two robots\nattempt to move past each other.\n\nModule urrently assumes that all actions have equal cost (including\ndiagonal vs non-diagonal move\n\"\"\"\n\nfrom od_mstar3.col_set_addition import add_col_set_recursive, add_col_set\nfrom od_mstar3.col_set_addition import NoSolutionError\nfrom od_mstar3 import SortedCollection\nfrom collections import defaultdict\nfrom functools import wraps\ntry:\n    import ipdb as pdb\nexcept ImportError:\n    import pdb\nfrom od_mstar3 import interface\nimport math\n\n# Define values delegated to free spaces and spaces with obstacles\n# in the matrix of the workspace descriptor\nFREE = 0\nOBS = 1\n# Actions for 4 connected graph\nCONNECTED_4 = ((0, 0), (1, 0), (0, 1), (-1, 0), (0, -1))\n# Actions for 8 connected graph\nCONNECTED_8 = ((0, 0), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1),\n               (0, -1), (1, -1))\nMAX_COST = 1000000\n# DIAGONAL_COST, note that team policies imports this value as well\n#DIAGONAL_COST = 2 ** .5\nDIAGONAL_COST = 1.4\n\n\nclass wrk_node(object):\n\n    \"\"\"Holds information about a node in a policy's graph\n\n    Defines __slots__ to decrease program memory usage by allocating\n    a fixed amount of space to wrk_node object, rather than a\n    dictionary holding all attributes\n\n    Public interface of instance variables defined below:\n\n    coord         - coordinate representing configuration\n                    corresponding to this node in the configuration\n                    space\n    policy        - coordinate of neighboring configuration which is\n                    optimal for the policy to get to the goal\n                    configuration\n    opt_neighbors - list of all neighbors which lead to paths that are\n                    considered optimal by the policy (more than one path\n                    can be optimal)\n    h             - heuristic cost of configuration specified by coord\n    closed, open  - specify when a policy is finalized\n    iteration     - current step of policy\n    \"\"\"\n    __slots__ = ['coord', 'policy', 'opt_neighbors', 'cost', 'h', 'closed',\n                 'iteration', 'open']\n\n    def __init__(self, coord):\n        \"\"\"Initialization function for nodes of astar policy graph.\n\n        coord - coordinate of configuration which wrk_node represents\n                in astar_policy graph\n        \"\"\"\n        self.coord = coord  # Want to store as tuples\n        self.policy = None  # Holds coordinate of next neighbor to visit\n        # Holds all optimal neighbors, intended to make replanning the\n        # policy to find an optimal, collision avoiding path easier\n        self.opt_neighbors = []  # currently only generated by _road_rules,\n        # also used to store neighbor offsets for EPEA*\n        self.cost = MAX_COST  # Cost to goal\n        # Used for extension easier to running resumable A* search\n        self.h = 0\n        # Used to determine when a policy is finalized\n        self.closed = False\n        self.iteration = -1\n        self.open = False\n\n\n# Simple memoization decorator, can be used for any function\n# Although no code in this module has been effectively sped up with this\n# decorator yet, this will hopefully be useful with more complex graphs\n# and configuration spaces in the future\ndef memoize(f):\n    memo = {}\n\n    @wraps(f)\n    def inner(*args, **kwargs):\n        try:\n            return memo[args]\n        except KeyError:\n            z = memo[args] = f(*args)\n            return z\n    return inner\n\n\ndef node_cmp(n1, n2):\n    \"\"\" Sort nodes by cost \"\"\"\n    if n1.cost < n2.cost:\n        return -1\n    elif n1.cost > n2.cost:\n        return 1\n    # Returning 0 allows for stable sorting, i.e. equal objects stay in\n    # the same order, which should provide a bit of a performance boost,\n    # as well as a bit of consistency\n    return 0\n\n\nclass Networkx_DiGraph(interface.Graph_Interface):\n    \"\"\"Simple wrapper for networkx graphs, in particular, supports\n    digraphs.\n\n    Requires a modified policy which can account for DiGraphs, because\n    the forward and backword neighbors are not the same thing\n    \"\"\"\n\n    def __init__(self, graph):\n        \"\"\"graph - networkx.DiGraph specifying the configuration space.\n                   assumes, cost is stored in the cost parameter\n        \"\"\"\n        self.graph = graph\n\n    def get_edge_cost(self, coord1, coord2):\n        \"\"\"Returns edge_cost of going from coord1 to coord2\n\n        coord1, coord2 - node identification\n\n        returns:\n        edge cost\n        \"\"\"\n        return self.graph[coord1][coord2]['cost']\n\n    def get_neighbors(self, coord):\n        \"\"\"Returns the out-neighbors of the specified node\n\n        coord - identifier of the node to query\n\n        returns:\n        list of node identifiers of neighboring nodes\n        \"\"\"\n        return self.graph.neighbors(coord)\n\n    def get_in_neighbors(self, coord):\n        \"\"\"Returns the in-neighbors of the specified node\n\n        coord - identifier of the node to query\n\n        returns:\n        list of node identifiers of in-neighbors\n        \"\"\"\n        return [c[0] for c in self.graph.in_edges(coord)]\n\n\nclass Grid_Graph(interface.Graph_Interface):\n    \"\"\" Represents configuration space for grid workspace\n\n    This graph serves to generate the configuration graph for a\n    gridded workspace.  This workspace must be 4-connected, so\n    that a robot can go to the grid spaces located one space\n    up, one space to the left, one space to the right, and one\n    space down from its current coordinate.\n    \"\"\"\n\n    def __init__(self, world_descriptor, diagonal_cost=False):\n        \"\"\"Initialization for grid graph\n\n        world_descriptor - Rectangular matrix, 0 for free cell, 1 for obstacle\n        diagonal_cost    - Boolean, apply 2**.5 for diagonal cost\n        \"\"\"\n        self.world_descriptor = world_descriptor\n        self.width = len(world_descriptor)\n        self.height = len(world_descriptor[0])\n        self.actions = CONNECTED_4\n        if diagonal_cost:\n            self._diagonal_cost = DIAGONAL_COST\n        else:\n            self._diagonal_cost = 0\n\n    def get_edge_cost(self, coord1, coord2):\n        \"\"\"Retrieves config edge cost between two configurations\n\n        Grid_Graph has a fixed edge cost of one, effectively optimizing\n        make-span\n\n        coord1 - coordinate of source vertex\n        coord2 - coordinate of target vertex\n\n        Returns edge_cost of going from coord1 to coord2.\n        \"\"\"\n        if (self._diagonal_cost and coord1[0] != coord2[0] and\n                coord1[1] != coord2[1]):\n            return self._diagonal_cost\n        return 1\n\n    def get_neighbors(self, coord):\n        \"\"\"Returns collision free neighbors of the specified coordinate.\n\n        coord - (x, y) coordinate of the node for which neighbors are\n                being generated\n\n        Return value in form of list of (x, y) tuples giving coordinates\n        of neighbors, including self\n        \"\"\"\n        neighbors = []\n        min_cost = MAX_COST\n        for i in self.actions:\n            new_coord = (i[0] + coord[0], i[1] + coord[1])\n            # check if points to a coordinate in the graph\n            if (new_coord[0] < 0 or new_coord[0] >= self.width or\n                    new_coord[1] < 0 or new_coord[1] >= self.height):\n                continue\n            if self.world_descriptor[new_coord[0]][new_coord[1]] == OBS:\n                # Points to obstacle\n                continue\n            # Valid single robot action\n            neighbors.append(new_coord)\n\n        return neighbors\n\n    def get_in_neighbors(self, coord):\n        \"\"\"Returns the collision free in-neighbors of the specified\n        coordinate.\n\n        Equivalent to get_neighbors, because the graph is undirected\n\n        coord - (x, y) coordinate of vertex for which to return the\n                in-neighbors\n\n        Returns:\n        List of coordinates of in-neighbors\n        \"\"\"\n        return self.get_neighbors(coord)\n\n\nclass Grid_Graph_Conn_8(Grid_Graph):\n    \"\"\" Configuration graph for gridded workspace with 8 connection\n\n    This graph serves to generate the configuration graph for gridded\n    workspace where each point in the grid has eight neighbors.\n    \"\"\"\n\n    def __init__(self, world_descriptor, diagonal_cost=False):\n        \"\"\"Initialization for grid graph with 8 connectivity\n\n        world_descriptor    - Rectangular matrix, 0 for free cell, 1 for\n                              obstacle\n        \"\"\"\n        super(Grid_Graph_Conn_8, self).__init__(world_descriptor,\n                                                diagonal_cost=diagonal_cost)\n        self.actions = CONNECTED_8\n\n\nclass GridGraphConn4WaitAtGoal(Grid_Graph):\n    \"\"\"Variant of workspace_graph.Grid_Graph that allows for the robot\n    to wait at its goal with reduced cost\n    Note: this can not be used directly for CMS to allow reduced waiting\n    cost when a team is not ready to be formed, as cost should not be\n    reduced when a team is ready to be formed.\n    \"\"\"\n\n    def __init__(self, world_descriptor, goal, wait_cost=.0,\n                 diagonal_cost=False):\n        \"\"\"Initialization for grid graph\n\n        world_descriptor - Rectangular matrix, 0 for free cell, 1 for\n                           obstacle\n        goal             - goal of the robot\n        wait_cost        - cost to incur for waiting at the goal\n                           configuration\n        diagonal_cost    - incur DIAGONAL_COST for moving diagonally,\n                           1 otherwise. included to support subclasses\n        \"\"\"\n        super(GridGraphConn4WaitAtGoal, self).__init__(\n            world_descriptor, diagonal_cost=diagonal_cost)\n        self._goal = goal\n        self._wait_cost = wait_cost\n\n    def get_edge_cost(self, coord1, coord2):\n        \"\"\"Retrieves edge cost between two configurations\n\n        Waiting at the goal incurs cost wait_cost, while any other\n        action incurs cost self._wait_cost\n\n        coord1 - coordinate of source vertex\n        coord2 - coordinate of target vertex\n\n        Returns edge_cost of going from coord1 to coord2.\n        \"\"\"\n        if coord1 == self._goal and coord2 == self._goal:\n            return self._wait_cost\n        return super(GridGraphConn4WaitAtGoal, self).get_edge_cost(coord1,\n                                                                   coord2)\n\n\nclass GridGraphConn8WaitAtGoal(GridGraphConn4WaitAtGoal):\n    \"\"\"Variant of workspace_graph.Grid_Graph__con_8 that allows for the\n    robot to wait at its goal with reduced cost\n    Note: this can not be used directly for CMS to allow reduced waiting\n    cost when a team is not ready to be formed, as cost should not be\n    reduced when a team is ready to be formed.\n    \"\"\"\n\n    def __init__(self, world_descriptor, goal, wait_cost=.0,\n                 diagonal_cost=False):\n        \"\"\"Initialization for grid graph\n\n        world_descriptor - Rectangular matrix, 0 for free cell, 1 for\n                           obstacle\n        goal             - goal of the robot\n        wait_cost        - cost to incur for waiting at the goal\n                           configuration\n        diagonal_cost    - incur DIAGONAL_COST for moving diagonally if True,\n                           incur 1 if False\n        \"\"\"\n        super(GridGraphConn8WaitAtGoal, self).__init__(\n            world_descriptor, goal, wait_cost=wait_cost,\n            diagonal_cost=diagonal_cost)\n        self.actions = CONNECTED_8\n\n\ndef Workspace_Graph(world_descriptor, goal=None, connect_8=False,\n                    road_rules=True):\n    \"\"\"Wrapper function for returning Flood_Fill_Policy objects\n\n    Function returns objects with different args depending on the\n    connect_8 flag\n\n    world_descriptor - two-dimensional matrix representing the space in\n                       which the robot can travel.  A value of 1 in the\n                       space represents an obstacle, and a value of 0\n                       represents an open space\n    goal             - position [x,y] of the goal of the policy\n    connect_8        - boolean determining whether Grid_Graph or\n                       Grid_Graph_Conn_8 is used\n    road_rules       - boolean supplied to policy object to determine if\n                       rightmost neighbor node should always be used\n    \"\"\"\n    if connect_8:\n        return Flood_Fill_Policy(world_descriptor, Grid_Graph_Conn_8,\n                                 goal, road_rules)\n    return Flood_Fill_Policy(world_descriptor, Grid_Graph, goal,\n                             road_rules)\n\n\ndef compute_heuristic_conn_8(init_pos, coord):\n    \"\"\"Returns a heuristic for distance between coord and init_pos\n\n    init_pos - coordinate of position of goal configuration\n    coord    - coordinate of configuration for which heuristic is\n               being computed\n\n    Returns the heuristic distance to goal\n    \"\"\"\n    return max(map(lambda x, y: abs(x - y), coord, init_pos))\n\n\ndef compute_heuristic_conn_8_diagonal(init_pos, coord):\n    \"\"\"Returns a heuristic for distance between coord and init_pos\n\n    Used when moving diagonally costs DIAGONAL_COST instead of 1\n\n    init_pos - coordinate of position of goal configuration\n    coord    - coordinate of configuration for which heuristic is\n               being computed\n\n    Returns the heuristic distance to goal\n    \"\"\"\n    x_diff = abs(init_pos[0] - coord[0])\n    y_diff = abs(init_pos[1] - coord[1])\n    min_dist = min(x_diff, y_diff)\n    max_dist = max(x_diff, y_diff)\n    return DIAGONAL_COST * min_dist + (max_dist - min_dist)\n\n\ndef compute_heuristic_conn_4(init_pos, coord):\n    \"\"\"Returns Manhattan heuristic for distance from coord to init_pos\n\n    init_pos - coordinate of position of goal configuration\n    coord    - coordinate of configuration for which heursitic is\n               being computed\n\n    Returns the heuristic distance to goal through a\n    Manhattan metric calculation.\n    \"\"\"\n    return sum(map(lambda x, y: abs(x - y), coord, init_pos))\n\n\ndef Astar_Graph(world_descriptor, goal=None, connect_8=False,\n                diagonal_cost=False, makespan=False, wait_cost=0.):\n    \"\"\"Wrapper function for returning Astar_Policy objects\n\n    Different heuristic functions are given to Astar_Policy object\n    depending on whether the gridworld is 8 connected or not\n\n    world_descriptor - two-dimensional matrix which describes the\n                       gridworld with obstacles. Each point in the\n                       matrix is either a zero (no obstacle) or a\n                       one (obstacle)\n    goal             - position (x, y) of the goal of the policy\n    connect_8        - boolean determining whether each coordinate\n                       in the gridworld has eight neighbors\n                       (including all diagonal neighbors) or only\n                       four (cardinal neighbors)\n    diagonal_cost    - boolean, apply DIAGONAL_COST for diagonal costs if True,\n                       apply 1 if False\n    makespan         - minimize makespan instead of minimizing time\n    wait_cost        - cost of waiting at the goal\n    \"\"\"\n    if makespan:\n        if connect_8:\n            if diagonal_cost:\n                h_func = compute_heuristic_conn_8_diagonal\n            else:\n                h_func = compute_heuristic_conn_8\n            return Astar_Policy(\n                world_descriptor,\n                lambda x: Grid_Graph_Conn_8(x, diagonal_cost=diagonal_cost),\n                goal=goal, compute_heuristic=h_func)\n        else:\n            return Astar_Policy(world_descriptor, Grid_Graph, goal=goal,\n                                compute_heuristic=compute_heuristic_conn_4)\n    if connect_8:\n        if diagonal_cost:\n            h_func = compute_heuristic_conn_8_diagonal\n        else:\n            h_func = compute_heuristic_conn_8\n        return Astar_Policy(\n            world_descriptor,\n            lambda x: GridGraphConn8WaitAtGoal(x, goal,\n                                               wait_cost=wait_cost,\n                                               diagonal_cost=diagonal_cost,\n                                               ),\n            goal, h_func)\n    return Astar_Policy(world_descriptor,\n                        lambda x: GridGraphConn4WaitAtGoal(\n                            x, goal, wait_cost=wait_cost,\n                            diagonal_cost=diagonal_cost),\n                        goal, compute_heuristic_conn_4)\n\n\nclass Astar_Policy(interface.Policy_Interface):\n\n    \"\"\"Class that implements Astar to search config space\n\n    Uses resumable A* search instead of the flood fill used in\n    workspace graph, as the optimal policy computation is dominating\n    the time required for rM*  when inflated.\n\n    To avoid copying large amounts of code for each new workspace,\n    all functions interacting with the workspace are passed into this\n    class as arguments.\n    \"\"\"\n    def __init__(self, world_descriptor, config_graph, goal=None,\n                 compute_heuristic=compute_heuristic_conn_4):\n        \"\"\"Initialization function for Astar_Policy\n\n        world_descriptor  - two-dimensional matrix which describes the\n                            gridworld with obstacles. Each point in the\n                            matrix is either a zero (no obstacle) or a\n                            one (obstacle)\n        config_graph      - a callable that takes a single argument, the\n                            world descriptor, and returns an object that\n                            represents the configuration graph, which\n                            implements the methods defined by\n                            Graph_Interface\n        goal              - (x, y)  target, optional, if not supplied,\n                            will not generate policy\n        compute_heuristic - helper function used to calculate the\n                            heuristic distance to the goal. Passed in\n                            because it interacts with the workspace\n        \"\"\"\n        self.cspace = config_graph(world_descriptor)\n        self.graph = {}\n        self.iteration = 0\n        self.goal = goal\n        self.init_pos = self.goal\n        self.compute_heuristic = compute_heuristic\n        self.goal_node = self._get_node(self.goal)\n        # We implicitly assume a self loop by setting the goal node's\n        # policy to be its own coordin\n        self.goal_node.policy = self.goal_node.coord\n        self.goal_node.cost = 0\n        self.goal_node.open = True\n        self.open_list = SortedCollection.SortedCollection(\n            [self.goal_node], key=lambda x: -x.cost - x.h)\n\n    def _get_node(self, coord):\n        \"\"\"Returns node specified by coord\n\n        In addition, updates its heursitic and iteration values.  If no\n        such node exists, it is created.\n\n        coord - coordinate of node to return\n        \"\"\"\n        try:\n            node = self.graph[coord]\n        except KeyError:\n            node = self.graph[coord] = wrk_node(coord)\n\n        if self.iteration > node.iteration:\n            node.iteration = self.iteration\n            node.h = self.compute_heuristic(self.init_pos, coord)\n        return node\n\n    def _compute_path(self, coord):\n        \"\"\"Extends the search to reach the specified node\n\n        coord - (x,y) coordinate of targeted configuration\n\n        Tries to compute path from coord to goal.  If successful,\n        returns next coordinate in path to goal from coord.  If not\n        successful, raises an NoSolutionError.\n        \"\"\"\n        if self.init_pos == self.goal:\n            self.init_pos = coord\n            # First need to update the heuristic for nodes in the open\n            # list\n        # Only change the heuristic for the intial coordinate, when the\n        # open list is empty, so don't actually have to resort the open\n        # list\n        # Open list may be empty if trying after trying to find paths to\n        # two unreachable nodes.  This will only be done my\n        # multi_assignment_mstar while trying to compute the assignment\n        # cost matrix.  Besides which, this will trigger a\n        # NoSolutionError in case such a situtation is not supposed to\n        # be found assert len(self.open_list) > 0\n        while len(self.open_list) > 0:\n            node = self.open_list.pop()\n            if node.closed:\n                continue\n            node.closed = True\n            node.open = False\n            # Need to add the neighbors before checking if this is the\n            # goal, so search can be resumed without being blocked by\n            # this position\n            neighbors = self.get_neighbors(node.coord)\n            for i in neighbors:\n                tnode = self._get_node(i)\n                if (tnode.closed or tnode.cost <= node.cost +\n                        self.get_edge_cost(i, node.coord)):\n                    continue\n                tnode.cost = node.cost + self.get_edge_cost(\n                    i, node.coord)\n                tnode.policy = node.coord\n                tnode.open = True\n                # Can add tnode directly, and will just skip any\n                # inconsistent copies\n                self.open_list.insert_right(tnode)\n            if node.coord == coord:\n                # Done, so return the next step\n                return node.policy\n        raise NoSolutionError('Couldn\\'t finish individual policy')\n\n    def get_step(self, coord):\n        \"\"\"Gets the policy for the given coordinate\n\n        If no policy exists, extends planning to reach the coordinate\n\n        coord - (x, y) configuration\n\n        Returns a coordinate of the next node in the policy\n        \"\"\"\n        node = self._get_node(coord)\n        if node.closed:\n            # Have already computed the optimal policy here\n            return node.policy\n        self.iteration += 1\n        try:\n            return self._compute_path(coord)\n        except NoSolutionError:\n            # Couldn't find a path to goal, so return None\n            return None\n\n    def get_cost(self, coord):\n        \"\"\"Returns the cost of moving from given position to goal\n\n        Cost is for moving from coordinate specified at coord\n        to the goal configuration.\n\n        coord - (x, y) configuration\n        \"\"\"\n        node = self._get_node(coord)\n        if node.closed:\n            return node.cost\n        self.iteration += 1\n        self._compute_path(coord)\n        assert node.closed\n        return node.cost\n\n    def get_edge_cost(self, coord1, coord2):\n        \"\"\"Returns cost of config transition from coord1 to coord2\n\n        Wrapper function for returning the config space's\n        get_edge_cost from coord1 to coord2\n\n        coord1 - initial coordinate in transition\n        coord2 - final coordinate in transition\n\n        returns:\n        edge cost of going from coord1 to coord2\n        \"\"\"\n        return self.cspace.get_edge_cost(coord1, coord2)\n\n    def _gen_limited_offset_neighbors(self, coord):\n        \"\"\"Stores the neighbors of a node by changes in f-value\n\n        f-value - the sum of cost to reach and cost to go.\n\n        coord - (x, y) configuration for which limited offset neighbors\n                are generated\n        \"\"\"\n        # Repurposing a preexisting field, so need to change to a\n        # defaultdict\n        node = self._get_node(coord)\n        node.opt_neighbors = defaultdict(lambda: [])\n        base_cost = self.get_cost(coord)\n        # Need to compute offsets\n        for neib in self.get_neighbors(coord):\n            # difference in path cost using different paths, need to\n            # handle staying at the goal seperately\n            if neib == self.goal and neib == coord:\n                offset = 0\n            else:\n                offset = self.get_cost(neib) - base_cost + 1\n            node.opt_neighbors[offset].append((offset, neib))\n        node.opt_neighbors = dict(node.opt_neighbors)\n\n    def get_limited_offset_neighbors(self, coord, max_offset, min_offset=0):\n        \"\"\"Returns set of neighbors specified by the offsets\n\n        More specifically, returns the set of neighbors for which the\n        maximum difference in path cost if passed through is less than\n        the specified value.\n\n        (i.e. if you are forced to pass through coordinate x, instead of\n        the optimal step, what is the difference in cost)?\n\n        coord - coordinates of the node to find neighbors of\n        max_offset - the maximum increase in path cost to encur in\n                     choice of neighbors\n        min_offset - minimum increae in path cost to encur in a neighbor\n\n        returns:\n        a list of tuples of the form (offset, coordinate)\n        \"\"\"\n        node = self._get_node(coord)\n        if not node.opt_neighbors:\n            self._gen_limited_offset_neighbors(coord)\n        # Have already pre-computed the results\n        out = []\n        for offset, neighbors in node.opt_neighbors.iteritems():\n            if offset < min_offset:\n                continue\n            if offset > max_offset:\n                return out\n            out.extend(neighbors)\n        return out\n\n    def get_offset_neighbors(self, coord, offset):\n        \"\"\"Generates offset neighbors for node specified by coord\n\n        If no offset neighbors exist, they are created\n\n        Only offset neighbors at a certain offset are returned\n\n        coord  - (x,y) configuration for which neighbors are being\n                 generated\n        offset - value of offset determing which neighbors are\n                 included in return value\n\n        returns:\n        list of tuples of form (offset, neighbor)\n        \"\"\"\n        node = self._get_node(coord)\n        if not node.opt_neighbors:\n            self._gen_limited_offset_neighbors(coord)\n        return node.opt_neighbors[offset]\n\n    def get_offsets(self, coord):\n        \"\"\"Return the possible offsets of the neighbors.\n\n        The offset of a neighbor is the difference in the cost of the\n        optimal path from coord to the cost of the best path constrained\n        to pass through a specific neighbor.  Used in EPEA*\n\n        coord - (x,y) configuration for which neighbors are being\n                generated and their offsets returned\n\n        Returns list of offsets of all neighbor nodes to coord\n        \"\"\"\n        node = self._get_node(coord)\n        if not node.opt_neighbors:\n            self._gen_limited_offset_neighbors(coord)\n        return node.opt_neighbors.keys()\n\n    def get_neighbors(self, coord, opt=False):\n        \"\"\"Wrapper function for get_neighbors function of underlying\n        config_space graph.\n\n        opt - only optimal neighbors are returned\n        coord - configuration for which neighbors are being returned\n\n        Returns list of tuples, where each tuple is a coordinate\n        \"\"\"\n        neighbors = self.cspace.get_neighbors(coord)\n        if not opt:\n            return neighbors\n        for i in neighbors:\n            if opt:\n                cost = self.get_cost(i)\n                if cost < min_cost:\n                    min_cost = cost\n        opt_neighbors = []\n        for i in neighbors:\n            if self.get_cost(i) == min_cost:\n                opt_neighbors.append(i)\n        return opt_neighbors\n\n    def get_graph_size(self, correct_for_size=True):\n        \"\"\"Gets the size of the graph\n\n        correct_for_size - just intended to match signatures\n\n        Returns the number of nodes used for this graph\n        \"\"\"\n        return sum(map(len, self.graph))\n\n\nclass Astar_DiGraph_Policy(Astar_Policy):\n\n    \"\"\"Class that implements Astar to search configuration spaces that\n    are represented as a di graph\n\n    Differs slightly from Astar_Policy in using the get_in_neighbors\n    function when computing a policy, to explicitly plan back in time\n\n    Uses resumable A* search instead of the flood fill used in\n    workspace graph, as the optimal policy computation is dominating\n    the time required for rM*  when inflated.\n\n    To avoid copying large amounts of code for each new workspace,\n    all functions interacting with the workspace are passed into this\n    class as arguments.\n    \"\"\"\n    def __init__(self, world_descriptor, config_graph, goal=None,\n                 compute_heuristic=compute_heuristic_conn_4):\n        \"\"\"Initialization function for Astar_Policy\n\n        world_descriptor  - two-dimensional matrix which describes the\n                            gridworld with obstacles. Each point in the\n                            matrix is either a zero (no obstacle) or a\n                            one (obstacle)\n        config_graph      - a class which is used to represent the\n                            config space of the robot\n        goal              - (x, y)  target, optional, if not supplied,\n                            will not generate policy\n        compute_heuristic - helper function used to calculate the\n                            heuristic distance to the goal. Passed in\n                            because it interacts with the workspace\n        \"\"\"\n        super(Astar_DiGraph_Policy, self).__init__(\n            world_descriptor, config_graph, goal=goal,\n            compute_heuristic=compute_heuristic)\n\n    def _compute_path(self, coord):\n        \"\"\"Extends the search to reach the specified node\n\n        Explicitly plans in reverse from the goal to the target, using\n        get_in_neighbors to compute node expansion, instead of\n        Astar_Graph, which uses the get_neighbors function.\n\n        coord - (x,y) coordinate of targeted configuration\n\n        Tries to compute path from coord to goal.  If successful,\n        returns next coordinate in path to goal from coord.  If not\n        successful, raises an NoSolutionError.\n        \"\"\"\n        if self.init_pos == self.goal:\n            self.init_pos = coord\n            # First need to update the heuristic for nodes in the open\n            # list\n        # Only change the heuristic for the intial coordinate, when the\n        # open list is empty, so don't actually have to resort the open\n        # list\n        # Open list may be empty if trying after trying to find paths to\n        # two unreachable nodes.  This will only be done my\n        # multi_assignment_mstar while trying to compute the assignment\n        # cost matrix.  Besides which, this will trigger a\n        # NoSolutionError in case such a situtation is not supposed to\n        # be found assert len(self.open_list) > 0\n        while len(self.open_list) > 0:\n            node = self.open_list.pop()\n            if node.closed:\n                continue\n            node.closed = True\n            node.open = False\n            # Need to add the neighbors before checking if this is the\n            # goal, so search can be resumed without being blocked by\n            # this position\n            neighbors = self.get_in_neighbors(node.coord)\n            for i in neighbors:\n                tnode = self._get_node(i)\n                if (tnode.closed or tnode.cost <= node.cost +\n                        self.get_edge_cost(i, node.coord)):\n                    continue\n                tnode.cost = node.cost + self.get_edge_cost(\n                    i, node.coord)\n                tnode.policy = node.coord\n                tnode.open = True\n                # Can add tnode directly, and will just skip any\n                # inconsistent copies\n                self.open_list.insert_right(tnode)\n            if node.coord == coord:\n                # Done, so return the next step\n                return node.policy\n        raise NoSolutionError('Couldn\\'t finish individual policy')\n\n    def get_in_neighbors(self, coord):\n        \"\"\"Wraper for the get_in_neighbors function of the underlying\n        config_space graph\n\n        coord - coordinate of whom the predecessors (in neighbors) are\n                returned\n\n        returns:\n        list of coordinates of the predecessors of coord\n        \"\"\"\n        return self.cspace.get_in_neighbors(coord)\n\n\nclass Priority_Graph(interface.Policy_Interface):\n    \"\"\"Simple wrapper for A* graph that uses priority planning.\n\n    Adds/removes a time coordinate to allow for priority planning.\n    Implemented this way to make Indpendence_Detection happier, as it\n    makes use both of basic Astar_Policy and priority planners of\n    various forms. This way, any work done by the Astar_Policy can be\n    leveraged for the priority planner, and vice versa\n    \"\"\"\n    def __init__(self, astar_policy, max_t=None):\n        \"\"\"initialization for Priority_Graph\n\n        astar_policy       - the graph to wrap\n\n        max_t - greatest t - value allowed\n        \"\"\"\n        self.astar_policy = astar_policy\n        self.max_t = max_t\n\n    def get_step(self, coord):\n        \"\"\"Gets the policy for the given coordinate,\n\n        If necessary, extends planning to reach said coordinate.  Will\n        increment time by 1\n\n        coord - (x, y, t) position and time coordinate for the specified\n                node\n        \"\"\"\n        # Can do this by stripping time, querrying the underlying\n        # astar_policy, then appending the appropriate new time\n        t = coord[-1] + 1\n        # Check if this would exceed maximal value\n        if self.max_t is not None:\n            t = min(self.max_t, t)\n        step = self.astar_policy.get_step(coord[:2])\n        return step + (t, )\n\n    def get_cost(self, coord):\n        \"\"\"Gets cost of moving to goal from coord\n\n        coord - (x, y, t)  coordinates of node for which to get cost\n\n        Returns cost of moving from the given position to goal\n        \"\"\"\n        return self.astar_policy.get_cost(coord[:2])\n\n    def set_max_t(self, max_t):\n        \"\"\"Sets the maximum time value the graph will use.\n\n        Allows for easy changes for different constraints\n        \"\"\"\n        self.max_t = max_t\n\n    def get_neighbors(self, coord):\n        \"\"\"Gets the neighbors of the specified space-time point\n\n        coord - coordinate of configuration for which neighbors are\n                being returned\n\n        Returns neighbors of coord in config space, with a time stamp\n        one greater than that of coord\n        \"\"\"\n        pos_neighbors = self.astar_policy.get_neighbors((coord[0], coord[1]))\n        return map(lambda x: (x[0], x[1], min(self.max_t, coord[-1] + 1)),\n                   pos_neighbors)\n\n\nclass Back_Priority_Graph(Priority_Graph):\n\n    \"\"\"Simple wrapper for A* graph which just adds/removes a time\n    coordinate to allow for priority planning.\n\n    Implemented this way to make Indpendence_Detection happier, as it\n    makes use both of basic Astar_Policy and priority planners of various\n    forms. This way, any work done by the Astar_Policy can be leveraged\n    for the priority planner, and vice versa.\n\n    Differs from Priority Graph in that time dynamics are configured for\n    planning backwards in time.  Need to query max_t in each instance,\n    as multiple Constrained_Planners will make use of a single\n    Back_Priority Graph, and no other planner should be using one\n    \"\"\"\n\n    def __init__(self, astar_policy, max_t=None, prune_paths=True):\n        \"\"\"\n        astar_policy - the graph to wrap\n        max_t       - greatest t-value allowed\n        prune_paths - whether to prune neighbors that cannot reach the\n                      goal of astar_policy within the time specified.\n                      This is the default behavior.  Disabling when\n                      running task swapping allows for paths to be found\n                      to multiple initial configurations\n        \"\"\"\n        Priority_Graph.__init__(self, astar_policy, max_t=max_t)\n        self.prune_paths = prune_paths\n\n    def get_neighbors(self, coord, max_t):\n        \"\"\"Gets the neighbors of the specified space-time point\"\"\"\n        self.max_t = max_t\n        if coord[-1] == 0 and self.max_t != 0:\n            return []\n        pos_neighbors = self.astar_policy.get_neighbors((coord[0], coord[1]))\n        if coord[-1] == self.max_t:\n            neighbors = []\n            for pos in pos_neighbors:\n                neighbors.append((pos[0], pos[1], self.max_t))\n                # Make sure that you can actually get form the initial\n                # position to the suggested vertex in time\n                if self.prune_paths:\n                    if (not self.max_t == 0 and\n                            self.astar_policy.get_cost(pos) <= coord[-1] - 1):\n                        neighbors.append((pos[0], pos[1], coord[-1] - 1))\n                else:\n                    # Don't check on whether there is time to reach the\n                    # intial configuration\n                    neighbors.append((pos[0], pos[1], coord[-1] - 1))\n            return neighbors\n        if self.prune_paths:\n            return [(x[0], x[1], coord[-1] - 1) for x in pos_neighbors\n                    if self.astar_policy.get_cost(x) <= coord[-1] - 1]\n        else:\n            return [(x[0], x[1], coord[-1] - 1) for x in pos_neighbors]\n\n    def get_forwards_neighbors(self, coord, max_t):\n        \"\"\"Gets the forward time dynamics neighbors of this point\"\"\"\n        self.max_t = max_t\n        return Priority_Graph.get_neighbors(self, coord)\n\n    def get_cost(self, coord, max_t):\n        \"\"\"Returns the cost of moving from given position to goal\n\n        coord - (x, y, t)  coordinates of node for which to get cost\n\n        \"\"\"\n        self.max_t = max_t\n        return Priority_Graph.get_cost(self, coord)\n\n    def get_step(self, coord, max_t):\n        \"\"\"Gets the policy for the given coordinate, extending planning\n        to reach said coordinate if necessary.  Will increment time by 1\n\n        coord - (x, y, t) position and time coordinate for the specified\n                node\n\n        \"\"\"\n        self.max_t = max_t\n        return Priority_Graph.get_step(self, coord)\n\n\nclass Limited_Astar_Policy(Astar_Policy):\n    \"\"\"Uses resumable A* search instead of the flood fill used in\n    workspace graph, as the optimal policy computation is dominating the\n    time required for rM* when inflated.\n\n    Also takes a networkx graph, called limit graph, which specifies the\n    legal edges\n\n    \"\"\"\n    def __init__(self, world_descriptor, goal, limit_graph, connect_8=False):\n        Astar_Policy.__init__(self, world_descriptor, goal, connect_8)\n        self.limit_graph = limit_graph\n\n    def get_neighbors(self, coord):\n        \"\"\"Returns the neighbors of the given coordinate in the limit\n        graph\n\n        \"\"\"\n        return self.limit_graph.neighbors(coord)\n\n\nclass Edge_Checker(interface.Planner_Edge_Checker):\n    \"\"\"Used to wrap edge checking so more complex graphs can be cleanly\n    handled (may require keeping track of state for non-trivial graphs\n\n    \"\"\"\n    def __init__(self):\n        \"\"\"Takes no arguments, because on grid graph, only the\n        coordinates matter\n\n        \"\"\"\n        pass\n\n    def simple_pass_through(self, c1, c2):\n        \"\"\"Simply check for collisions, avoid the additional overhead\n\n        for use with basic OD (op_decomp)\n\n        c1 - coordinate at time t\n        c2 - coordinate at time t + 1\n\n        returns:\n        True if pass through collision, else false\n\n        \"\"\"\n        for i in range(len(c1)):\n            for j in range(i + 1, len(c1)):\n                if c1[i] == c2[j] and c1[j] == c2[i]:\n                    return True\n        return False\n\n    def simple_col_check(self, c1):\n        \"\"\"Checks for robot-robot collisions at c1,\n\n        for use with basic OD (op_decomp)\n\n        returns:\n        True if collision exists\n\n        \"\"\"\n        for i in range(len(c1)):\n            for j in range(i + 1, len(c1)):\n                if c1[i] == c1[j]:\n                    return True\n        return False\n\n    def simple_cross_over(self, c1, c2):\n        \"\"\"Check for cross over collisions in 8-connected worlds\n\n        returns:\n        True if collision is detected\n\n        \"\"\"\n        for i in range(len(c1)):\n            for j in range(i + 1, len(c1)):\n                # compute displacement vector\n                disp = [c1[i][0] - c1[j][0], c1[i][1] - c1[j][1]]\n                if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n                    continue\n                # compute previous? displacement vector.  Have a pass\n                # through or cross over collision if the displacement\n                # vector is the opposite\n                if (disp[0] == -(c2[i][0] - c2[j][0]) and\n                        disp[1] == -(c2[i][1] - c2[j][1])):\n                    return True\n        return False\n\n    def simple_incremental_cross_over(self, c1, c2):\n        \"\"\"Check for cross over collisions in 8-connected worlds.\n\n        Assumes that collision checking has been performed for everything\n        but the last robot in the coordinates.  To be used to save a bit\n        of time for partial expansion approaches\n\n        \"\"\"\n        for i in range(len(c1) - 1):\n            disp = [c1[i][0] - c1[-1][0], c1[i][1] - c1[-1][1]]\n            if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n                continue\n            # compute previous? displacement vector.  Have a pass through\n            # or cross over collision if the displacement vector is the\n            # opposite\n            if (disp[0] == -(c2[i][0] - c2[-1][0]) and\n                    disp[1] == -(c2[i][1] - c2[-1][1])):\n                return True\n        return False\n\n    def simple_incremental_col_check(self, c1):\n        \"\"\"Checks for robot-robot collisions at c1,\n\n        for use with basic OD (op_decomp)\n\n        returns:\n        True if collision exists\n\n        \"\"\"\n        for i in range(len(c1) - 1):\n            if c1[i] == c1[-1]:\n                return True\n        return False\n\n    def single_bot_outpath_check(self, cur_coord, prev_coord, cur_t, paths):\n        \"\"\"Tests for collisions when moving from prev_coord to cur_coord\n        with the robots in paths.\n\n        cur_coord - position of a single robot\n\n        Returns:\n\n        True if a collision is found,\n        False otherwise\n\n        \"\"\"\n        if paths is None:\n            return False\n        prev_t = cur_t - 1\n        check_t = min(cur_t, len(paths) - 1)\n        new_cols = 0\n        for bot in range(len(paths[0])):\n            # Check for simultaneous occupation\n            if (cur_coord[0] == paths[check_t][bot][0] and\n                    cur_coord[1] == paths[check_t][bot][1]):\n                return True\n            if cur_t >= len(paths):\n                # Can't have edge collisions when out-group robots\n                # aren't moving\n                continue\n            # Check for pass-through/cross over collisions\n            disp = [prev_coord[0] - paths[prev_t][bot][0],\n                    prev_coord[1] - paths[prev_t][bot][1]]\n            if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n                continue\n            # Compute current displacement vector, and check for\n            # inversion\n            if (disp[0] == -(cur_coord[0] - paths[cur_t][bot][0]) and\n                    disp[1] == -(cur_coord[1] - paths[cur_t][bot][1])):\n                return True\n        return False\n\n    def simple_prio_col_check(self, coord, t, paths, pcoord=None,\n                              conn_8=False):\n        \"\"\"Returns true, if collision is detected, false otherwise\n        at the moment only used to check the obstacle collisions, but\n        didn't want to reject the other code already\n\n        coord - coord of potential new neighbor\n        t - current time step\n        paths - previously found paths\n        pcoord - previous coordinate of the path\n\n        \"\"\"\n        if not isinstance(coord, tuple):\n            coord = tuple(coord)\n        if paths is not None:\n            t = min(t, len(paths) - 1)\n            # only one path\n            if isinstance(paths[0][0], int):\n                paths = map(lambda x: [x], paths)\n            for bot in range(len(paths[t])):\n                if not isinstance(paths[t][bot], tuple):\n                    paths[t][bot] = tuple(paths[t][bot])\n                # (a) simultaneous occupation of one node\n                if coord == paths[t][bot]:\n                        return True\n                # (b) pass through and cross over collision\n                if pcoord is not None:\n                    if not isinstance(pcoord, tuple):\n                        pcoord = tuple(pcoord)\n                if not isinstance(paths[t - 1][bot], tuple):\n                    paths[t - 1][bot] = tuple(paths[t - 1][bot])\n                if paths[t - 1][bot] == coord and paths[t][bot] == pcoord:\n                    return True\n                # (c) cross over collision in case of conn_8\n                if conn_8:\n                    if self.single_bot_cross_over(paths[t][bot],\n                                                  paths[t - 1][bot], coord,\n                                                  pcoord):\n                        return True\n        # No collision\n        return False\n\n    def col_check(self, c1, recursive):\n        \"\"\"Checks for collisions at a single point.  Returns either a M*\n        or rM* collision set in the form of sets, depending on the\n        setting of recursive.\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        for i in range(len(c1) - 1):\n            for j in range(i + 1, len(c1)):\n                if c1[i] == c1[j]:\n                    col_set = adder([frozenset([i, j])], col_set)\n        return col_set\n\n    def incremental_col_check(self, c1, recursive):\n        \"\"\"Checks for collisions at a single point.  Returns either a M*\n        or rM* collision set in the form of sets, depending on the\n        setting of recursive.  Only checks whether the last robot is\n        involved in a collision, for use with incremental methods\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        j = len(c1) - 1\n        for i in range(len(c1) - 1):\n            if c1[i] == c1[j]:\n                col_set = adder([frozenset((i, j))], col_set)\n        return col_set\n\n    def cross_over(self, c1, c2, recursive=False):\n        \"\"\"Detects cross over collisions as well as pass through\n        collisions\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        for i in range(len(c1) - 1):\n            for j in range(i + 1, len(c1)):\n                # compute current displacement vector\n                if c1[i] is None or c1[j] is None or c2[i] is None or c2[j] \\\n                        is None:\n                    continue\n                disp = (c1[i][0] - c1[j][0], c1[i][1] - c1[j][1])\n                if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n                    continue\n                # Compute previous displacement vector.  Have a cross over or\n                # pass through collision if the two displacement vectors are\n                # opposites\n                # pdisp = [c2[i][0] - c2[j][0], c2[i][1] - c2[j][1]]\n                if (disp[0] == -(c2[i][0] - c2[j][0]) and\n                        disp[1] == -(c2[i][1] - c2[j][1])):\n                    col_set = adder([frozenset([i, j])], col_set)\n        return col_set\n\n    def incremental_cross_over(self, c1, c2, recursive=False):\n        \"\"\"Detects cross over collisions as well as pass through\n        collisions.\n\n        Only checks if the last robot is involved in a collision, for use\n        with partial expansion approaches.\n\n        c1 - the initial configuration.\n        c2 - the final configuration. c1 may include additional robots,\n             if necessary\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        j = len(c2) - 1\n        for i in range(len(c2) - 1):\n            # compute current displacement vector\n            disp = (c1[i][0] - c1[j][0], c1[i][1] - c1[j][1])\n            if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n                continue\n            # Compute previous displacement vector.  Have a cross over or\n            # pass through collision if the two displacement vectors are\n            # opposites\n            # pdisp = [c2[i][0] - c2[j][0], c2[i][1] - c2[j][1]]\n            if (disp[0] == -(c2[i][0] - c2[j][0]) and\n                    disp[1] == -(c2[i][1] - c2[j][1])):\n                col_set = adder([frozenset([i, j])], col_set)\n        return col_set\n\n    def pass_through(self, c1, c2, recursive=False):\n        \"\"\"returns a tuple of colliding robots, or set of tuples if\n        recursive\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        for i in range(len(c1) - 1):\n            for j in range(i + 1, len(c1)):\n                if c1[i] == c2[j] and c1[j] == c2[i]:\n                    col_set = adder([frozenset((i, j))], col_set)\n        return col_set\n\n    def single_bot_cross_over(self, coord1, pcoord1, coord2, pcoord2):\n        \"\"\"Checks for cross-over and collisions between robots one and 2\n        moving from pcoord to coord\n\n        \"\"\"\n        disp = (pcoord1[0] - pcoord2[0], pcoord1[1] - pcoord2[1])\n        if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n            return False\n        if (disp[0] == -(coord1[0] - coord2[0]) and\n                disp[1] == -(coord1[1] - coord2[1])):\n            return True\n        return False\n\n    def prio_col_check(self, coord, pcoord, t, paths=None, conn_8=False,\n                       recursive=False):\n        \"\"\"Collision checking with paths passed as constraints\n\n        coord  - current node\n        pcoord - previous node\n        t      - timestep\n        paths  - paths that need to be avoided\n\n        \"\"\"\n        if not isinstance(coord, tuple):\n            coord = tuple(coord)\n        if not isinstance(pcoord, tuple):\n            pcoord = tuple(pcoord)\n        if paths is not None:\n            col_set = []\n            adder = add_col_set\n            if recursive:\n                adder = add_col_set_recursive\n            else:\n                for i in range(len(coord)):\n                    for j in range(len(paths[t])):\n                        # simultaneous occupation\n                        if coord[i] == paths[t][j]:\n                            col_set = adder([frozenset([i])], col_set)\n                            return col_set\n                        # pass-through and cross-over\n                        disp = [pcoord[i][0] - paths[t - 1][j][0],\n                                pcoord[i][1] - paths[t - 1][j][1]]\n                        if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n                            continue\n                        if (disp[0] == -(coord[i][0] - paths[t][j][0]) and\n                                disp[1] == -(coord[i][0] - paths[t][j][1])):\n                            col_set = adder([frozenset([i])], col_set)\n                            return col_set\n        return None\n\n\nclass NoRotationChecker(interface.Planner_Edge_Checker):\n    \"\"\"Used to wrap edge checking so more complex graphs can be cleanly\n    handled (may require keeping track of state for non-trivial graphs\n\n    Collision checking that doesn't allow rotations (i.e. robots moving\n    into the place that was just vacated\n\n    \"\"\"\n    def __init__(self):\n        \"\"\"Takes no arguments, because on grid graph, only the\n        coordinates matter\n\n        \"\"\"\n        pass\n\n    def col_check(self, c1, recursive):\n        \"\"\"Checks for collisions at a single point.  Returns either a M*\n        or rM* collision set in the form of sets, depending on the\n        setting of recursive.\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        for i in range(len(c1) - 1):\n            for j in range(i + 1, len(c1)):\n                if c1[i] == c1[j]:\n                    col_set = adder([frozenset([i, j])], col_set)\n        return col_set\n\n    def cross_over(self, c1, c2, recursive=False):\n        \"\"\"Detects cross over collisions as well as pass through\n        collisions\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        for i in range(len(c1) - 1):\n            for j in range(i + 1, len(c1)):\n                # compute current displacement vector\n                if c1[i] is None or c1[j] is None or c2[i] is None or c2[j] \\\n                        is None:\n                    continue\n                disp = (c1[i][0] - c1[j][0], c1[i][1] - c1[j][1])\n                if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n                    continue\n                # Compute previous displacement vector.  Have a cross over or\n                # pass through collision if the two displacement vectors are\n                # opposites\n                # pdisp = [c2[i][0] - c2[j][0], c2[i][1] - c2[j][1]]\n                if (disp[0] == -(c2[i][0] - c2[j][0]) and\n                        disp[1] == -(c2[i][1] - c2[j][1])):\n                    col_set = adder([frozenset([i, j])], col_set)\n                elif c1[i] == c2[j] or c1[j] == c2[i]:\n                    # There is a rotation, which is banned\n                    col_set = adder([frozenset([i, j])], col_set)\n        return col_set\n\n\nclass Lazy_Edge_Checker(interface.Planner_Edge_Checker):\n    \"\"\"Used to wrap edge checking so more complex graphs can be cleanly\n    handled (may require keeping track of state for non-trivial graphs\n\n    \"\"\"\n    def __init__(self):\n        \"\"\"Takes no arguments, because on grid graph, only the\n        coordinates matter\n\n        \"\"\"\n        pass\n\n    def col_check(self, c1, recursive):\n        \"\"\"Checks for collisions at a single point.  Returns either a M*\n        or rM* collision set in the form of sets, depending on the\n        setting of recursive.\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        for i in range(len(c1) - 1):\n            for j in range(i + 1, len(c1)):\n                if c1[i] == c1[j]:\n                    col_set = adder([frozenset([i, j])], col_set)\n                    return col_set\n        return col_set\n\n    def pass_through(self, c1, c2, recursive=False):\n        \"\"\"returns a tuple of colliding robots, or set of tuples if\n        recursive\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        for i in range(len(c1) - 1):\n            for j in range(i + 1, len(c1)):\n                if c1[i] == c2[j] and c1[j] == c2[i]:\n                    col_set = adder([frozenset([i, j])], col_set)\n                    return col_set\n        return col_set\n\n    def cross_over(self, c1, c2, recursive=False):\n        \"\"\"Detects cross over collisions as well as pass through\n        collisions\n\n        \"\"\"\n        col_set = []\n        # Select the function to be used for adding collision sets\n        adder = add_col_set\n        if recursive:\n            adder = add_col_set_recursive\n        for i in range(len(c1) - 1):\n            for j in range(i + 1, len(c1)):\n                # compute current displacement vector\n                disp = [c1[i][0] - c1[j][0], c1[i][1] - c1[j][1]]\n                if abs(disp[1]) > 1 or abs(disp[0]) > 1:\n                    continue\n                # Compute previous displacement vector.  Have a cross\n                # over or pass through collision if the two displacement\n                # vectors are opposites\n                # pdisp = [c2[i][0] - c2[j][0], c2[i][1] - c2[j][1]]\n                if (disp[0] == -(c2[i][0] - c2[j][0]) and\n                        disp[1] == -(c2[i][1] - c2[j][1])):\n                    col_set = adder([frozenset([i, j])], col_set)\n                    return col_set\n        return col_set\n"
  },
  {
    "path": "parameters.py",
    "content": "import numpy as np\n# Learning parameters\n\ngamma                   = .95  # discount rate for advantage estimation and reward discounting\nLR_Q                    = 2.e-5  # 8.e-5 / NUM_THREADS # default: 1e-5\nADAPT_LR                = True\nADAPT_COEFF             = 5.e-5  # the coefficient A in LR_Q/sqrt(A*steps+1) for calculating LR\nEXPERIENCE_BUFFER_SIZE  = 256\nmax_episode_length      = 256\nIL_MAX_EP_LENGTH        = 64\nepisode_count           = 0\n\n# observer parameters\nOBS_SIZE                = 11   # the size of the FOV grid to apply to each agent\nNUM_FUTURE_STEPS        = 3\n\n# environment parameters\nENVIRONMENT_SIZE        = (10, 60)  # the total size of the environment (length of one side) , Starting Point of Curriculum Only\nWALL_COMPONENTS         = (1, 21)    # Starting Params of Curriculum = TRUE\nOBSTACLE_DENSITY        = (0, 0.75)  # range of densities   Starting Params of Curriculum = TRUE\n\nDIAG_MVMT               = False  # Diagonal movements allowed?\na_size                  = 5 + int(DIAG_MVMT) * 4\nNUM_META_AGENTS         = 9\nNUM_IL_META_AGENTS      = 4\n\nNUM_THREADS             = 8 # int(multiprocessing.cpu_count() / (2 * NUM_META_AGENTS))\nNUM_BUFFERS             = 1  # NO EXPERIENCE REPLAY int(NUM_THREADS / 2)\n\n# training parameters\nSUMMARY_WINDOW          = 10\nload_model              = False\nRESET_TRAINER           = False\ntraining_version        = 'astar3_continuous_0.5IL_ray2'\nmodel_path              = 'model_' + training_version\ngifs_path               = 'gifs_' + training_version\ntrain_path              = 'train_' + training_version\nOUTPUT_GIFS             = False  # Only for RL gifs\nGIFS_FREQUENCY_RL       = 512\nOUTPUT_IL_GIFS          = False\nIL_GIF_PROB             = 0.\n\n\n# Imitation options\nPRIMING_LENGTH          = 0   # number of episodes at the beginning to train only on demonstrations\nMSTAR_CALL_FREQUENCY    = 1\n\n# observation variables\nNUM_CHANNEL             = 8 + NUM_FUTURE_STEPS\n\n# others\nEPISODE_START           = episode_count\nTRAINING                = True\nEPISODE_SAMPLES         = EXPERIENCE_BUFFER_SIZE  # 64\nGLOBAL_NET_SCOPE        = 'global'\nswarm_reward            = [0] * NUM_META_AGENTS\nswarm_targets           = [0] * NUM_META_AGENTS\n\n# Shared arrays for tensorboard\nepisode_rewards         = [[] for _ in range(NUM_META_AGENTS)] \nepisode_finishes        = [[] for _ in range(NUM_META_AGENTS)]\nepisode_lengths         = [[] for _ in range(NUM_META_AGENTS)]\nepisode_mean_values     = [[] for _ in range(NUM_META_AGENTS)]\nepisode_invalid_ops     = [[] for _ in range(NUM_META_AGENTS)]\nepisode_stop_ops        = [[] for _ in range(NUM_META_AGENTS)]\nepisode_wrong_blocking  = [[] for _ in range(NUM_META_AGENTS)]\nrollouts                = [None for _ in range(NUM_META_AGENTS)]\nGIF_frames              = []\n\n# Joint variables \njoint_actions           = [{} for _ in range(NUM_META_AGENTS)]\njoint_env               = [None for _ in range(NUM_META_AGENTS)]\njoint_observations      =[{} for _ in range(NUM_META_AGENTS)]\njoint_rewards           = [{} for _ in range(NUM_META_AGENTS)]\njoint_done              = [{} for _ in range(NUM_META_AGENTS)]\n\n\nenv_params              = [[ [WALL_COMPONENTS[0], WALL_COMPONENTS[1]] , [OBSTACLE_DENSITY[0],OBSTACLE_DENSITY[1]]]  for _ in range(NUM_META_AGENTS)]\n\n\n\n\nclass JOB_OPTIONS:\n    getExperience = 1\n    getGradient = 2\n\n\nclass COMPUTE_OPTIONS:\n    multiThreaded = 1\n    synchronous = 2\n    \n\nJOB_TYPE = JOB_OPTIONS.getGradient\nCOMPUTE_TYPE = COMPUTE_OPTIONS.multiThreaded\n"
  },
  {
    "path": "requirements.txt",
    "content": "absl-py==0.9.0\naiohttp==3.6.2\naioredis==1.3.1\nappdirs==1.4.4\nastor==0.8.1\nasync-timeout==3.0.1\nattrs==19.3.0\nbackcall==0.1.0\nbeautifulsoup4==4.9.1\nblessings==1.7\ncachetools==4.1.1\nCairoSVG==2.4.2\ncertifi==2020.4.5.2\ncffi==1.14.0\nchardet==3.0.4\nclick==7.1.2\ncloudpickle==1.2.2\ncolorama==0.4.3\ncolorful==0.5.4\ncontextvars==2.4\ncrowdai-api==0.1.22\ncssselect2==0.3.0\ncycler==0.10.0\nCython==0.29.21\ndecorator==4.4.2\ndefusedxml==0.6.0\ndill==0.3.2\ndistlib==0.3.0\nfilelock==3.0.12\nFlask==1.1.2\nFlask-Cors==3.0.8\nFlask-SocketIO==4.3.0\nfuture==0.18.2\ngast==0.2.2\ngoogle==3.0.0\ngoogle-api-core==1.22.1\ngoogle-auth==1.20.1\ngoogle-pasta==0.2.0\ngoogleapis-common-protos==1.52.0\ngpustat==0.6.0\ngrpcio==1.28.1\ngym==0.17.3\nh5py==2.10.0\nhiredis==1.1.0\nidna==2.9\nidna-ssl==1.1.0\nimagecodecs==2020.2.18\nimageio==2.8.0\nimmutables==0.14\nimportlib-metadata==1.6.1\nimportlib-resources==1.5.0\nipython==7.14.0\nipython-genutils==0.2.0\nitsdangerous==1.1.0\njedi==0.17.0\nJinja2==2.11.2\njsonschema==3.2.0\nKeras==2.0.0\nKeras-Applications==1.0.8\nKeras-Preprocessing==1.1.0\nkiwisolver==1.2.0\nlxml==4.5.1\nMarkdown==3.2.1\nMarkupSafe==1.1.1\nmatplotlib==3.2.1\nmkl-fft==1.1.0\nmkl-random==1.1.1\nmkl-service==2.3.0\nmock==4.0.2\nmore-itertools==8.3.0\nmsgpack==1.0.0\nmsgpack-numpy==0.4.6.post0\nmultidict==4.7.6\nmultiprocess==0.70.10\nnetworkx==2.4\nnumpy==1.18.2\nnvidia-ml-py3==7.352.0\nopencensus==0.7.10\nopencensus-context==0.1.1\nopt-einsum==3.2.1\npackaging==20.4\npandas==1.0.4\nparso==0.7.0\npathos==0.2.6\npexpect==4.8.0\npickleshare==0.7.5\nPillow==7.1.2\npluggy==0.13.1\npox==0.2.8\nppft==1.6.6.2\nprometheus-client==0.8.0\nprompt-toolkit==3.0.5\nprotobuf==3.11.3\npsutil==5.7.2\nptyprocess==0.6.0\npy==1.8.1\npy-spy==0.3.3\npyarrow==0.17.1\npyasn1==0.4.8\npyasn1-modules==0.2.8\npycparser==2.20\npyglet==1.5.0\nPygments==2.6.1\npyparsing==2.4.7\npyrsistent==0.16.0\npytest==5.4.3\npytest-runner==5.2\npython-dateutil==2.8.1\npython-engineio==3.13.0\npython-gitlab==2.3.1\npython-socketio==4.6.0\npytz==2020.1\nPyWavelets==1.1.1\nPyYAML==5.3.1\nray==0.8.7\nrecordtype==1.3\nredis==3.4.1\nrequests==2.23.0\nrsa==4.6\nscikit-image==0.17.2\nscipy==1.4.1\nsix==1.14.0\nsoupsieve==2.0.1\nsumolib==1.6.0\nsvgutils==0.3.1\ntensorboard==1.11.0\ntensorflow==1.11.0\ntensorflow-estimator==1.15.1\ntermcolor==1.1.0\nTheano==1.0.4\ntifffile==2020.5.11\ntimeout-decorator==0.4.1\ntinycss2==1.0.2\ntoml==0.10.1\ntox==3.15.2\ntqdm==4.50.2\ntraci==1.6.0\ntraitlets==4.3.3\ntyping-extensions==3.7.4.2\nurllib3==1.25.9\nvirtualenv==20.0.21\nwcwidth==0.1.9\nwebencodings==0.5.1\nWerkzeug==1.0.1\nwrapt==1.12.1\nyarl==1.5.1\nzipp==3.1.0\n"
  }
]