Repository: marmotlab/PRIMAL2 Branch: main Commit: 49f75876502f Files: 38 Total size: 526.1 KB Directory structure: gitextract_3v6j6_ws/ ├── .gitignore ├── Env_Builder.py ├── GroupLock.py ├── LICENSE.md ├── Map_Generator.py ├── Observer_Builder.py ├── Primal2Env.py ├── Primal2Observer.py ├── README.md ├── Ray_ACNet.py ├── Runner.py ├── Worker.py ├── driver.py ├── od_mstar3/ │ ├── SortedCollection.py │ ├── col_checker.cpp │ ├── col_checker.hpp │ ├── col_set.hpp │ ├── col_set_addition.py │ ├── cython_od_mstar.cpp │ ├── cython_od_mstar.pyx │ ├── grid_planning.cpp │ ├── grid_planning.hpp │ ├── grid_policy.cpp │ ├── grid_policy.hpp │ ├── interface.py │ ├── mstar_type_defs.hpp │ ├── mstar_utils.hpp │ ├── od_mstar.cpp │ ├── od_mstar.hpp │ ├── od_mstar.py │ ├── od_vertex.hpp │ ├── policy.cpp │ ├── policy.hpp │ ├── prune_graph.py │ ├── setup.py │ └── workspace_graph.py ├── parameters.py └── requirements.txt ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ # Compiled source # ###################### *.pyc *.so *.so.* *.dll *.o *.a *.hpp.gch # Packages # ###################### # it's better to unpack these files and commit the raw source # git has its own built in compression methods *.7z *.dmg *.gz *.iso *.jar *.rar *.tar *.zip # Logs and databases # ###################### *.log *.sql *.sqlite *.mat # Micro$oft Office # ###################### *.pptx *.PPTX *.docx *.DOCX *.xlsx *.XLSX # LateX ###################### *.aux *.log *.bbl *.blg *.toc *.lof *.lot *.out *.idx *.ilg *.ind *.bag *.BAG # Others # ###################### .depend \#* *.flv *.FLV *.mov *.MOV *.avi *.AVI *.mp4 *.MP4 *.mkv *.MKV *.mp3 *.MP3 *.wmv *.WMV *.pdf *.PDF .git/ auto/ # Learning # ################### #checkpoint *.cptk.data-* *.cptk.index *.cptk.meta *.gif *.GIF # Python stuffs ################### __pycache__/ results/ .idea/ model_v1_oIL_no_prune/checkpoint .vscode/settings.json ================================================ FILE: Env_Builder.py ================================================ import copy from operator import sub, add import gym import numpy as np import math, time import warnings from od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError from od_mstar3 import od_mstar from od_mstar3 import cpp_mstar from GroupLock import Lock from matplotlib.colors import * from gym.envs.classic_control import rendering import imageio from gym import spaces def make_gif(images, fname): gif = imageio.mimwrite(fname, images, subrectangles=True) print("wrote gif") return gif def opposite_actions(action, isDiagonal=False): if isDiagonal: checking_table = {0: -1, 1: 3, 2: 4, 3: 1, 4: 2} raise NotImplemented else: checking_table = {0: -1, 1: 3, 2: 4, 3: 1, 4: 2} return checking_table[action] def action2dir(action): checking_table = {0: (0, 0), 1: (0, 1), 2: (1, 0), 3: (0, -1), 4: (-1, 0)} return checking_table[action] def dir2action(direction): checking_table = {(0, 0): 0, (0, 1): 1, (1, 0): 2, (0, -1): 3, (-1, 0): 4} return checking_table[direction] def tuple_plus(a, b): """ a + b """ return tuple(map(add, a, b)) def tuple_minus(a, b): """ a - b """ return tuple(map(sub, a, b)) def _heap(ls, max_length): while True: if len(ls) > max_length: ls.pop(0) else: return ls def get_key(dict, value): return [k for k, v in dict.items() if v == value] def getAstarDistanceMap(map: np.array, start: tuple, goal: tuple, isDiagonal: bool = False): """ returns a numpy array of same dims as map with the distance to the goal from each coord :param map: a n by m np array, where -1 denotes obstacle :param start: start_position :param goal: goal_position :return: optimal distance map """ def lowestF(fScore, openSet): # find entry in openSet with lowest fScore assert (len(openSet) > 0) minF = 2 ** 31 - 1 minNode = None for (i, j) in openSet: if (i, j) not in fScore: continue if fScore[(i, j)] < minF: minF = fScore[(i, j)] minNode = (i, j) return minNode def getNeighbors(node): # return set of neighbors to the given node n_moves = 9 if isDiagonal else 5 neighbors = set() for move in range(1, n_moves): # we dont want to include 0 or it will include itself direction = action2dir(move) dx = direction[0] dy = direction[1] ax = node[0] ay = node[1] if (ax + dx >= map.shape[0] or ax + dx < 0 or ay + dy >= map.shape[ 1] or ay + dy < 0): # out of bounds continue if map[ax + dx, ay + dy] == -1: # collide with static obstacle continue neighbors.add((ax + dx, ay + dy)) return neighbors # NOTE THAT WE REVERSE THE DIRECTION OF SEARCH SO THAT THE GSCORE WILL BE DISTANCE TO GOAL start, goal = goal, start start, goal = tuple(start), tuple(goal) # The set of nodes already evaluated closedSet = set() # The set of currently discovered nodes that are not evaluated yet. # Initially, only the start node is known. openSet = set() openSet.add(start) # For each node, which node it can most efficiently be reached from. # If a node can be reached from many nodes, cameFrom will eventually contain the # most efficient previous step. cameFrom = dict() # For each node, the cost of getting from the start node to that node. gScore = dict() # default value infinity # The cost of going from start to start is zero. gScore[start] = 0 # For each node, the total cost of getting from the start node to the goal # by passing by that node. That value is partly known, partly heuristic. fScore = dict() # default infinity # our heuristic is euclidean distance to goal heuristic_cost_estimate = lambda x, y: math.hypot(x[0] - y[0], x[1] - y[1]) # For the first node, that value is completely heuristic. fScore[start] = heuristic_cost_estimate(start, goal) while len(openSet) != 0: # current = the node in openSet having the lowest fScore value current = lowestF(fScore, openSet) openSet.remove(current) closedSet.add(current) for neighbor in getNeighbors(current): if neighbor in closedSet: continue # Ignore the neighbor which is already evaluated. if neighbor not in openSet: # Discover a new node openSet.add(neighbor) # The distance from start to a neighbor # in our case the distance between is always 1 tentative_gScore = gScore[current] + 1 if tentative_gScore >= gScore.get(neighbor, 2 ** 31 - 1): continue # This is not a better path. # This path is the best until now. Record it! cameFrom[neighbor] = current gScore[neighbor] = tentative_gScore fScore[neighbor] = gScore[neighbor] + heuristic_cost_estimate(neighbor, goal) # parse through the gScores Astar_map = map.copy() for (i, j) in gScore: Astar_map[i, j] = gScore[i, j] return Astar_map class Agent: """ The agent object that contains agent's position, direction dict and position dict, currently only supporting 4-connected region. self.distance_map is None here. Assign values in upper class. ########### WARNING: direction_history[i] means the action taking from i-1 step, resulting in the state of step i, such that len(direction_history) == len(position_history) ########### """ def __init__(self, isDiagonal=False): self._path_count = -1 self.IsDiagonal = isDiagonal self.freeze = 0 self.position, self.position_history, self.ID, self.direction, self.direction_history, \ self.action_history, self.goal_pos, self.distanceMap, self.dones, self.status, self.next_goal, self.next_distanceMap \ = None, [], None, None, [(None, None)], [(None, None)], None, None, 0, None, None, None def reset(self): self._path_count = -1 self.freeze = 0 self.position, self.position_history, self.ID, self.direction, self.direction_history, \ self.action_history, self.goal_pos, self.distanceMap, self.dones, self.status, self.next_goal, self.next_distanceMap \ = None, [], None, None, [(None, None)], [(None, None)], None, None, 0, None, None, None def move(self, pos, status=None): if pos is None: pos = self.position if self.position is not None: assert pos in [self.position, tuple_plus(self.position, (0, 1)), tuple_plus(self.position, (0, -1)), tuple_plus(self.position, (1, 0)), tuple_plus(self.position, (-1, 0)), ], \ "only 1 step 1 cell allowed. Previous pos:" + str(self.position) self.add_history(pos, status) def add_history(self, position, status): assert len(position) == 2 self.status = status self._path_count += 1 self.position = tuple(position) if self._path_count != 0: direction = tuple_minus(position, self.position_history[-1]) action = dir2action(direction) assert action in list(range(4 + 1)), \ "direction not in actionDir, something going wrong" self.direction_history.append(direction) self.action_history.append(action) self.position_history.append(tuple(position)) self.position_history = _heap(self.position_history, 30) self.direction_history = _heap(self.direction_history, 30) self.action_history = _heap(self.action_history, 30) class World: """ Include: basic world generation rules, blank map generation and collision checking. reset_world: Do not add action pruning, reward structure or any other routine for training in this class. Pls add in upper class MAPFEnv """ def __init__(self, map_generator, num_agents, isDiagonal=False): self.num_agents = num_agents self.manual_world = False self.manual_goal = False self.goal_generate_distance = 2 self.map_generator = map_generator self.isDiagonal = isDiagonal self.agents_init_pos, self.goals_init_pos = None, None self.reset_world() self.init_agents_and_goals() def reset_world(self): """ generate/re-generate a world map, and compute its corridor map """ def scan_for_agents(state_map): agents = {} for i in range(state_map.shape[0]): for j in range(state_map.shape[1]): if state_map[i, j] > 0: agentID = state_map[i, j] agents.update({agentID: (i, j)}) return agents self.state, self.goals_map = self.map_generator() # detect manual world if (self.state > 0).any(): self.manual_world = True self.agents_init_pos = scan_for_agents(self.state) if self.num_agents is not None and self.num_agents != len(self.agents_init_pos.keys()): warnings.warn("num_agent does not match the actual agent number in manual map! " "num_agent has been set to be consistent with manual map.") self.num_agents = len(self.agents_init_pos.keys()) self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)} else: assert self.num_agents is not None self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)} # detect manual goals_map if self.goals_map is not None: self.manual_goal = True self.goals_init_pos = scan_for_agents(self.goals_map) if self.manual_goal else None else: self.goals_map = np.zeros([self.state.shape[0], self.state.shape[1]]) self.corridor_map = {} self.restrict_init_corridor = True self.visited = [] self.corridors = {} self.get_corridors() def reset_agent(self): """ remove all the agents (with their travel history) and goals in the env, rebase the env into a blank one """ self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)} self.state[self.state > 0] = 0 # remove agents in the map def get_corridors(self): """ in corridor_map , output = list: list[0] : if In corridor, corridor id , else -1 list[1] : If Inside Corridor = 1 If Corridor Endpoint = 2 If Free Cell Outside Corridor = 0 If Obstacle = -1 """ corridor_count = 1 # Initialize corridor map for i in range(self.state.shape[0]): for j in range(self.state.shape[1]): if self.state[i, j] >= 0: self.corridor_map[(i, j)] = [-1, 0] else: self.corridor_map[(i, j)] = [-1, -1] # Compute All Corridors and End-points, store them in self.corridors , update corridor_map for i in range(self.state.shape[0]): for j in range(self.state.shape[1]): positions = self.blank_env_valid_neighbor(i, j) if (positions.count(None)) == 2 and (i, j) not in self.visited: allowed = self.check_for_singular_state(positions) if not allowed: continue self.corridors[corridor_count] = {} self.corridors[corridor_count]['Positions'] = [(i, j)] self.corridor_map[(i, j)] = [corridor_count, 1] self.corridors[corridor_count]['EndPoints'] = [] self.visited.append((i, j)) for num in range(4): if positions[num] is not None: self.visit(positions[num][0], positions[num][1], corridor_count) corridor_count += 1 # Get Delta X , Delta Y for the computed corridors ( Delta= Displacement to corridor exit) for k in range(1, corridor_count): if k in self.corridors: if len(self.corridors[k]['EndPoints']) == 2: self.corridors[k]['DeltaX'] = {} self.corridors[k]['DeltaY'] = {} pos_a = self.corridors[k]['EndPoints'][0] pos_b = self.corridors[k]['EndPoints'][1] self.corridors[k]['DeltaX'][pos_a] = (pos_a[0] - pos_b[0]) # / (max(1, abs(pos_a[0] - pos_b[0]))) self.corridors[k]['DeltaX'][pos_b] = -1 * self.corridors[k]['DeltaX'][pos_a] self.corridors[k]['DeltaY'][pos_a] = (pos_a[1] - pos_b[1]) # / (max(1, abs(pos_a[1] - pos_b[1]))) self.corridors[k]['DeltaY'][pos_b] = -1 * self.corridors[k]['DeltaY'][pos_a] else: print('Weird2') # Rearrange the computed corridor list such that it becomes easier to iterate over the structure # Basically, sort the self.corridors['Positions'] list in a way that the first element of the list is # adjacent to Endpoint[0] and the last element of the list is adjacent to EndPoint[1] # If there is only 1 endpoint, the sorting doesn't matter since blocking is easy to compute for t in range(1, corridor_count): positions = self.blank_env_valid_neighbor(self.corridors[t]['EndPoints'][0][0], self.corridors[t]['EndPoints'][0][1]) for position in positions: if position is not None and self.corridor_map[position][0] == t: break index = self.corridors[t]['Positions'].index(position) if index == 0: pass if index != len(self.corridors[t]['Positions']) - 1: temp_list = self.corridors[t]['Positions'][0:index + 1] temp_list.reverse() temp_end = self.corridors[t]['Positions'][index + 1:] self.corridors[t]['Positions'] = [] self.corridors[t]['Positions'].extend(temp_list) self.corridors[t]['Positions'].extend(temp_end) elif index == len(self.corridors[t]['Positions']) - 1 and len(self.corridors[t]['EndPoints']) == 2: positions2 = self.blank_env_valid_neighbor(self.corridors[t]['EndPoints'][1][0], self.corridors[t]['EndPoints'][1][1]) for position2 in positions2: if position2 is not None and self.corridor_map[position2][0] == t: break index2 = self.corridors[t]['Positions'].index(position2) temp_list = self.corridors[t]['Positions'][0:index2 + 1] temp_list.reverse() temp_end = self.corridors[t]['Positions'][index2 + 1:] self.corridors[t]['Positions'] = [] self.corridors[t]['Positions'].extend(temp_list) self.corridors[t]['Positions'].extend(temp_end) self.corridors[t]['Positions'].reverse() else: if len(self.corridors[t]['EndPoints']) == 2: print("Weird3") self.corridors[t]['StoppingPoints'] = [] if len(self.corridors[t]['EndPoints']) == 2: position_first = self.corridors[t]['Positions'][0] position_last = self.corridors[t]['Positions'][-1] self.corridors[t]['StoppingPoints'].append([position_first[0], position_first[1]]) self.corridors[t]['StoppingPoints'].append([position_last[0], position_last[1]]) else: position_first = self.corridors[t]['Positions'][0] self.corridors[t]['StoppingPoints'].append([position[0], position[1]]) self.corridors[t]['StoppingPoints'].append(None) return def check_for_singular_state(self, positions): counter = 0 for num in range(4): if positions[num] is not None: new_positions = self.blank_env_valid_neighbor(positions[num][0], positions[num][1]) if new_positions.count(None) in [2, 3]: counter += 1 return counter > 0 def visit(self, i, j, corridor_id): positions = self.blank_env_valid_neighbor(i, j) if positions.count(None) in [0, 1]: self.corridors[corridor_id]['EndPoints'].append((i, j)) self.corridor_map[(i, j)] = [corridor_id, 2] return elif positions.count(None) in [2, 3]: self.visited.append((i, j)) self.corridors[corridor_id]['Positions'].append((i, j)) self.corridor_map[(i, j)] = [corridor_id, 1] for num in range(4): if positions[num] is not None and positions[num] not in self.visited: self.visit(positions[num][0], positions[num][1], corridor_id) else: print('Weird') def blank_env_valid_neighbor(self, i, j): possible_positions = [None, None, None, None] move = [[0, 1], [1, 0], [-1, 0], [0, -1]] if self.state[i, j] == -1: return possible_positions else: for num in range(4): x = i + move[num][0] y = j + move[num][1] if 0 <= x < self.state.shape[0] and 0 <= y < self.state.shape[1]: if self.state[x, y] != -1: possible_positions[num] = (x, y) continue return possible_positions def getPos(self, agent_id): return tuple(self.agents[agent_id].position) def getDone(self, agentID): # get the number of goals that an agent has finished return self.agents[agentID].dones def get_history(self, agent_id, path_id=None): """ :param: path_id: if None, get the last step :return: past_pos: (x,y), past_direction: int """ if path_id is None: path_id = self.agents[agent_id].path_count - 1 if self.agents[agent_id].path_count > 0 else 0 try: return self.agents[agent_id].position_history[path_id], self.agents[agent_id].direction_history[path_id] except IndexError: print("you are giving an invalid path_id") def getGoal(self, agent_id): return tuple(self.agents[agent_id].goal_pos) def init_agents_and_goals(self): """ place all agents and goals in the blank env. If turning on corridor population restriction, only 1 agent is allowed to be born in each corridor. """ def corridor_restricted_init_poss(state_map, corridor_map, goal_map, id_list=None): """ generate agent init positions when corridor init population is restricted return a dict of positions {agentID:(x,y), ...} """ if id_list is None: id_list = list(range(1, self.num_agents + 1)) free_space1 = list(np.argwhere(state_map == 0)) free_space1 = [tuple(pos) for pos in free_space1] corridors_visited = [] manual_positions = {} break_completely = False for idx in id_list: if break_completely: return None pos_set = False agentID = idx while not pos_set: try: assert (len(free_space1) > 1) random_pos = np.random.choice(len(free_space1)) except AssertionError or ValueError: print('wrong agent') self.reset_world() self.init_agents_and_goals() break_completely = True if idx == id_list[-1]: return None break position = free_space1[random_pos] cell_info = corridor_map[position[0], position[1]][1] if cell_info in [0, 2]: if goal_map[position[0], position[1]] != agentID: manual_positions.update({idx: (position[0], position[1])}) free_space1.remove(position) pos_set = True elif cell_info == 1: corridor_id = corridor_map[position[0], position[1]][0] if corridor_id not in corridors_visited: if goal_map[position[0], position[1]] != agentID: manual_positions.update({idx: (position[0], position[1])}) corridors_visited.append(corridor_id) free_space1.remove(position) pos_set = True else: free_space1.remove(position) else: print("Very Weird") # print('Manual Positions' ,manual_positions) return manual_positions # no corridor population restriction if not self.restrict_init_corridor or (self.restrict_init_corridor and self.manual_world): self.put_goals(list(range(1, self.num_agents + 1)), self.goals_init_pos) self._put_agents(list(range(1, self.num_agents + 1)), self.agents_init_pos) # has corridor population restriction else: check = self.put_goals(list(range(1, self.num_agents + 1)), self.goals_init_pos) if check is not None: manual_positions = corridor_restricted_init_poss(self.state, self.corridor_map, self.goals_map) if manual_positions is not None: self._put_agents(list(range(1, self.num_agents + 1)), manual_positions) def _put_agents(self, id_list, manual_pos=None): """ put some agents in the blank env, saved history data in self.agents and self.state get distance map for the agents :param id_list: a list of agent_id manual_pos: a dict of manual positions {agentID: (x,y),...} """ if manual_pos is None: # randomly init agents everywhere free_space = np.argwhere(np.logical_or(self.state == 0, self.goals_map == 0) == 1) new_idx = np.random.choice(len(free_space), size=len(id_list), replace=False) init_poss = [free_space[idx] for idx in new_idx] else: assert len(manual_pos.keys()) == len(id_list) init_poss = [manual_pos[agentID] for agentID in id_list] assert len(init_poss) == len(id_list) for idx, agentID in enumerate(id_list): self.agents[agentID].ID = agentID self.agents_init_pos = {} if self.state[init_poss[idx][0], init_poss[idx][1]] in [0, agentID] \ and self.goals_map[init_poss[idx][0], init_poss[idx][1]] != agentID: self.state[init_poss[idx][0], init_poss[idx][1]] = agentID self.agents_init_pos.update({agentID: (init_poss[idx][0], init_poss[idx][1])}) else: print(self.state) print(init_poss) raise ValueError('invalid manual_pos for agent' + str(agentID) + ' at: ' + str(init_poss[idx])) self.agents[agentID].move(init_poss[idx]) self.agents[agentID].distanceMap = getAstarDistanceMap(self.state, self.agents[agentID].position, self.agents[agentID].goal_pos) def put_goals(self, id_list, manual_pos=None): """ put a goal of single agent in the env, if the goal already exists, remove that goal and put a new one :param manual_pos: a dict of manual_pos {agentID: (x, y)} :param id_list: a list of agentID :return: an Agent object """ def random_goal_pos(previous_goals=None, distance=None): next_goal_buffer = {agentID: self.agents[agentID].next_goal for agentID in range(1, self.num_agents + 1)} curr_goal_buffer = {agentID: self.agents[agentID].goal_pos for agentID in range(1, self.num_agents + 1)} if previous_goals is None: previous_goals = {agentID: None for agentID in id_list} if distance is None: distance = self.goal_generate_distance free_for_all = np.logical_and(self.state == 0, self.goals_map == 0) # print(previous_goals) if not all(previous_goals.values()): # they are new born agents free_space = np.argwhere(free_for_all == 1) init_idx = np.random.choice(len(free_space), size=len(id_list), replace=False) new_goals = {agentID: tuple(free_space[init_idx[agentID - 1]]) for agentID in id_list} return new_goals else: new_goals = {} for agentID in id_list: free_on_agents = np.logical_and(self.state > 0, self.state != agentID) free_spaces_for_previous_goal = np.logical_or(free_on_agents, free_for_all) # free_spaces_for_previous_goal = np.logical_and(free_spaces_for_previous_goal, self.goals_map==0) if distance > 0: previous_x, previous_y = previous_goals[agentID] x_lower_bound = (previous_x - distance) if (previous_x - distance) > 0 else 0 x_upper_bound = previous_x + distance + 1 y_lower_bound = (previous_y - distance) if (previous_x - distance) > 0 else 0 y_upper_bound = previous_y + distance + 1 free_spaces_for_previous_goal[x_lower_bound:x_upper_bound, y_lower_bound:y_upper_bound] = False free_spaces_for_previous_goal = list(np.argwhere(free_spaces_for_previous_goal == 1)) free_spaces_for_previous_goal = [pos.tolist() for pos in free_spaces_for_previous_goal] try: unique = False counter = 0 while unique == False and counter < 500: init_idx = np.random.choice(len(free_spaces_for_previous_goal)) init_pos = free_spaces_for_previous_goal[init_idx] unique = True if tuple(init_pos) in next_goal_buffer.values() or tuple( init_pos) in curr_goal_buffer.values() or tuple(init_pos) in new_goals.values(): unique = False if previous_goals is not None: if tuple(init_pos) in previous_goals.values(): unique = False counter += 1 if counter >= 500: print('Hard to find Non Conflicting Goal') new_goals.update({agentID: tuple(init_pos)}) except ValueError: print('wrong goal') self.reset_world() print(self.agents[1].position) self.init_agents_and_goals() return None return new_goals previous_goals = {agentID: self.agents[agentID].goal_pos for agentID in id_list} if manual_pos is None: new_goals = random_goal_pos(previous_goals, distance=self.goal_generate_distance) else: new_goals = manual_pos if new_goals is not None: # recursive breaker refresh_distance_map = False for agentID in id_list: if self.state[new_goals[agentID][0], new_goals[agentID][1]] >= 0: if self.agents[agentID].next_goal is None: # no next_goal to use # set goals_map self.goals_map[new_goals[agentID][0], new_goals[agentID][1]] = agentID # set agent.goal_pos self.agents[agentID].goal_pos = (new_goals[agentID][0], new_goals[agentID][1]) # set agent.next_goal new_next_goals = random_goal_pos(new_goals, distance=self.goal_generate_distance) if new_next_goals is None: return None self.agents[agentID].next_goal = (new_next_goals[agentID][0], new_next_goals[agentID][1]) # remove previous goal if previous_goals[agentID] is not None: self.goals_map[previous_goals[agentID][0], previous_goals[agentID][1]] = 0 else: # use next_goal as new goal # set goals_map self.goals_map[self.agents[agentID].next_goal[0], self.agents[agentID].next_goal[1]] = agentID # set agent.goal_pos self.agents[agentID].goal_pos = self.agents[agentID].next_goal # set agent.next_goal self.agents[agentID].next_goal = ( new_goals[agentID][0], new_goals[agentID][1]) # store new goal into next_goal # remove previous goal if previous_goals[agentID] is not None: self.goals_map[previous_goals[agentID][0], previous_goals[agentID][1]] = 0 else: print(self.state) print(self.goals_map) raise ValueError('invalid manual_pos for goal' + str(agentID) + ' at: ', str(new_goals[agentID])) if previous_goals[agentID] is not None: # it has a goal! if previous_goals[agentID] != self.agents[agentID].position: print(self.state) print(self.goals_map) print(previous_goals) raise RuntimeError("agent hasn't finished its goal but asking for a new goal!") refresh_distance_map = True # compute distance map self.agents[agentID].next_distanceMap = getAstarDistanceMap(self.state, self.agents[agentID].goal_pos, self.agents[agentID].next_goal) if refresh_distance_map: self.agents[agentID].distanceMap = getAstarDistanceMap(self.state, self.agents[agentID].position, self.agents[agentID].goal_pos) return 1 else: return None def CheckCollideStatus(self, movement_dict): """ WARNING: ONLY NON-DIAGONAL IS IMPLEMENTED return collision status and predicted next positions, do not move agent directly :return: 1: action executed, and agents standing on its goal. 0: action executed -1: collision with env (obstacles, out of bound) -2: collision with robot, swap -3: collision with robot, cell-wise """ if self.isDiagonal is True: raise NotImplemented Assumed_newPos_dict = {} newPos_dict = {} status_dict = {agentID: None for agentID in range(1, self.num_agents + 1)} not_checked_list = list(range(1, self.num_agents + 1)) # detect env collision for agentID in range(1, self.num_agents + 1): direction_vector = action2dir(movement_dict[agentID]) newPos = tuple_plus(self.getPos(agentID), direction_vector) Assumed_newPos_dict.update({agentID: newPos}) if newPos[0] < 0 or newPos[0] > self.state.shape[0] or newPos[1] < 0 \ or newPos[1] > self.state.shape[1] or self.state[newPos] == -1: status_dict[agentID] = -1 newPos_dict.update({agentID: self.getPos(agentID)}) Assumed_newPos_dict[agentID] = self.getPos(agentID) not_checked_list.remove(agentID) # collide, stay at the same place # detect swap collision for agentID in copy.deepcopy(not_checked_list): collided_ID = self.state[Assumed_newPos_dict[agentID]] if collided_ID != 0: # some one is standing on the assumed pos if Assumed_newPos_dict[collided_ID] == self.getPos(agentID): # he wants to swap if status_dict[agentID] is None: status_dict[agentID] = -2 newPos_dict.update({agentID: self.getPos(agentID)}) # stand still Assumed_newPos_dict[agentID] = self.getPos(agentID) not_checked_list.remove(agentID) if status_dict[collided_ID] is None: status_dict[collided_ID] = -2 newPos_dict.update({collided_ID: self.getPos(collided_ID)}) # stand still Assumed_newPos_dict[collided_ID] = self.getPos(collided_ID) not_checked_list.remove(collided_ID) # detect cell-wise collision for agentID in copy.deepcopy(not_checked_list): other_agents_dict = copy.deepcopy(Assumed_newPos_dict) other_agents_dict.pop(agentID) if Assumed_newPos_dict[agentID] in newPos_dict.values(): status_dict[agentID] = -3 newPos_dict.update({agentID: self.getPos(agentID)}) # stand still Assumed_newPos_dict[agentID] = self.getPos(agentID) not_checked_list.remove(agentID) elif Assumed_newPos_dict[agentID] in other_agents_dict.values(): other_coming_agents = get_key(Assumed_newPos_dict, Assumed_newPos_dict[agentID]) other_coming_agents.remove(agentID) # if the agentID is the biggest among all other coming agents, # allow it to move. Else, let it stand still if agentID < min(other_coming_agents): status_dict[agentID] = 1 if Assumed_newPos_dict[agentID] == self.agents[agentID].goal_pos else 0 newPos_dict.update({agentID: Assumed_newPos_dict[agentID]}) not_checked_list.remove(agentID) else: status_dict[agentID] = -3 newPos_dict.update({agentID: self.getPos(agentID)}) # stand still Assumed_newPos_dict[agentID] = self.getPos(agentID) not_checked_list.remove(agentID) # the rest are valid actions for agentID in copy.deepcopy(not_checked_list): status_dict[agentID] = 1 if Assumed_newPos_dict[agentID] == self.agents[agentID].goal_pos else 0 newPos_dict.update({agentID: Assumed_newPos_dict[agentID]}) not_checked_list.remove(agentID) assert not not_checked_list return status_dict, newPos_dict class MAPFEnv(gym.Env): metadata = {"render.modes": ["human", "ansi"]} def __init__(self, observer, map_generator, num_agents=None, IsDiagonal=False, frozen_steps=0, isOneShot=False): self.observer = observer self.map_generator = map_generator self.viewer = None self.isOneShot = isOneShot self.frozen_steps = frozen_steps self.num_agents = num_agents self.IsDiagonal = IsDiagonal self.set_world() self.obs_size = self.observer.observation_size self.isStandingOnGoal = {i: False for i in range(1, self.num_agents + 1)} self.individual_rewards = {i: 0 for i in range(1, self.num_agents + 1)} self.mutex = Lock() self.GIF_frame = [] if IsDiagonal: self.action_space = spaces.Tuple([spaces.Discrete(self.num_agents), spaces.Discrete(9)]) else: self.action_space = spaces.Tuple([spaces.Discrete(self.num_agents), spaces.Discrete(5)]) self.ACTION_COST, self.GOAL_REWARD, self.COLLISION_REWARD = -0.3, 5., -2. def getObstacleMap(self): return (self.world.state == -1).astype(int) def getGoals(self): return {i: self.world.agents[i].goal_pos for i in range(1, self.num_agents + 1)} def getStatus(self): return {i: self.world.agents[i].status for i in range(1, self.num_agents + 1)} def getPositions(self): return {i: self.world.agents[i].position for i in range(1, self.num_agents + 1)} def getLastMovements(self): return {i: self.world.agents[i].position_history(-1) for i in range(1, self.num_agents + 1)} def set_world(self): self.world = World(self.map_generator, num_agents=self.num_agents, isDiagonal=self.IsDiagonal) self.num_agents = self.world.num_agents self.observer.set_env(self.world) def _reset(self, *args, **kwargs): raise NotImplementedError def isInCorridor(self, agentID): """ :param agentID: start from 1 not 0! :return: isIn: bool, corridor_ID: int """ agent_pos = self.world.getPos(agentID) if self.world.corridor_map[(agent_pos[0], agent_pos[1])][1] in [-1, 2]: return False, None else: return True, self.world.corridor_map[(agent_pos[0], agent_pos[1])][0] def _observe(self, handles=None): """ Returns Dict of observation {agentid:[], ...} """ if handles is None: self.obs_dict = self.observer.get_many(list(range(1, self.num_agents + 1))) elif handles in list(range(1, self.num_agents + 1)): self.obs_dict = self.observer.get_many([handles]) elif set(handles) == set(handles) & set(list(range(1, self.num_agents + 1))): self.obs_dict = self.observer.get_many(handles) else: raise ValueError("Invalid agent_id given") return self.obs_dict def step_all(self, movement_dict): """ Agents are forced to freeze self.frozen_steps steps if they are standing on their goals. The new goal will be generated at the FIRST step it remains on its goal. :param movement_dict: {agentID_starting_from_1: action:int 0-4, ...} unmentioned agent will be considered as taking standing still :return: obs_of_all:dict, reward_of_single_step:dict """ for agentID in range(1, self.num_agents + 1): if self.world.agents[agentID].freeze > self.frozen_steps: # set frozen agents free self.world.agents[agentID].freeze = 0 if agentID not in movement_dict.keys() or self.world.agents[agentID].freeze: movement_dict.update({agentID: 0}) else: assert movement_dict[agentID] in list(range(5)) if self.IsDiagonal else list(range(9)), \ 'action not in action space' status_dict, newPos_dict = self.world.CheckCollideStatus(movement_dict) self.world.state[self.world.state > 0] = 0 # remove agents in the map put_goal_list = [] freeze_list = [] for agentID in range(1, self.num_agents + 1): if self.isOneShot and self.world.getDone(agentID) > 0: continue newPos = newPos_dict[agentID] self.world.state[newPos] = agentID self.world.agents[agentID].move(newPos, status_dict[agentID]) self.give_moving_reward(agentID) if status_dict[agentID] == 1: if not self.isOneShot: if self.world.agents[agentID].freeze == 0: put_goal_list.append(agentID) if self.world.agents[agentID].action_history[-1] == 0: # standing still on goal freeze_list.append(agentID) self.world.agents[agentID].freeze += 1 else: self.world.agents[agentID].status = 2 self.world.state[newPos] = 0 self.world.goals_map[newPos] = 0 free_agents = list(range(1, self.num_agents + 1)) if put_goal_list and not self.isOneShot: self.world.put_goals(put_goal_list) # remove obs for frozen agents: for frozen_agent in freeze_list: free_agents.remove(frozen_agent) return self._observe(free_agents), self.individual_rewards def give_moving_reward(self, agentID): raise NotImplementedError def listValidActions(self, agent_ID, agent_obs): raise NotImplementedError def expert_until_first_goal(self, inflation=2.0, time_limit=60.0): world = self.getObstacleMap() start_positions = [] goals = [] start_positions_dir = self.getPositions() goals_dir = self.getGoals() for i in range(1, self.world.num_agents + 1): start_positions.append(start_positions_dir[i]) goals.append(goals_dir[i]) mstar_path = None start_time = time.time() try: mstar_path = cpp_mstar.find_path(world, start_positions, goals, inflation, time_limit / 5.0) except OutOfTimeError: # M* timed out print("timeout") print('World', world) print('Start Pos', start_positions) print('Goals', goals) except NoSolutionError: print("nosol????") print('World', world) print('Start Pos', start_positions) print('Goals', goals) except: c_time = time.time() - start_time if c_time > time_limit: return mstar_path # should be None # print("cpp_mstar crash most likely... trying python mstar instead") try: mstar_path = od_mstar.find_path(world, start_positions, goals, inflation=inflation, time_limit=time_limit - c_time) except OutOfTimeError: # M* timed out print("timeout") print('World', world) print('Start Pos', start_positions) print('Goals', goals) except NoSolutionError: print("nosol????") print('World', world) print('Start Pos', start_positions) print('Goals', goals) except: print("Unknown bug?!") return mstar_path def _add_rendering_entry(self, entry, permanent=False): if permanent: self.viewer.add_geom(entry) else: self.viewer.add_onetime(entry) def _render(self, mode='human', close=False, screen_width=800, screen_height=800): def painter(state_map, agents_dict, goals_dict): def initColors(num_agents): c = {a + 1: hsv_to_rgb(np.array([a / float(num_agents), 1, 1])) for a in range(num_agents)} return c def create_rectangle(x, y, width, height, fill): ps = [(x, y), ((x + width), y), ((x + width), (y + height)), (x, (y + height))] rect = rendering.FilledPolygon(ps) rect.set_color(fill[0], fill[1], fill[2]) rect.add_attr(rendering.Transform()) return rect def drawStar(centerX, centerY, diameter, numPoints, color): entry_list = [] outerRad = diameter // 2 innerRad = int(outerRad * 3 / 8) # fill the center of the star angleBetween = 2 * math.pi / numPoints # angle between star points in radians for i in range(numPoints): # p1 and p3 are on the inner radius, and p2 is the point pointAngle = math.pi / 2 + i * angleBetween p1X = centerX + innerRad * math.cos(pointAngle - angleBetween / 2) p1Y = centerY - innerRad * math.sin(pointAngle - angleBetween / 2) p2X = centerX + outerRad * math.cos(pointAngle) p2Y = centerY - outerRad * math.sin(pointAngle) p3X = centerX + innerRad * math.cos(pointAngle + angleBetween / 2) p3Y = centerY - innerRad * math.sin(pointAngle + angleBetween / 2) # draw the triangle for each tip. poly = rendering.FilledPolygon([(p1X, p1Y), (p2X, p2Y), (p3X, p3Y)]) poly.set_color(color[0], color[1], color[2]) poly.add_attr(rendering.Transform()) entry_list.append(poly) return entry_list def create_circle(x, y, diameter, world_size, fill, resolution=20): c = (x + world_size / 2, y + world_size / 2) dr = math.pi * 2 / resolution ps = [] for i in range(resolution): x = c[0] + math.cos(i * dr) * diameter / 2 y = c[1] + math.sin(i * dr) * diameter / 2 ps.append((x, y)) circ = rendering.FilledPolygon(ps) circ.set_color(fill[0], fill[1], fill[2]) circ.add_attr(rendering.Transform()) return circ assert len(goals_dict) == len(agents_dict) num_agents = len(goals_dict) world_shape = state_map.shape world_size = screen_width / max(*world_shape) colors = initColors(num_agents) if self.viewer is None: self.viewer = rendering.Viewer(screen_width, screen_height) rect = create_rectangle(0, 0, screen_width, screen_height, (.6, .6, .6)) self._add_rendering_entry(rect, permanent=True) for i in range(world_shape[0]): start = 0 end = 1 scanning = False write = False for j in range(world_shape[1]): if state_map[i, j] != -1 and not scanning: # free start = j scanning = True if (j == world_shape[1] - 1 or state_map[i, j] == -1) and scanning: end = j + 1 if j == world_shape[1] - 1 else j scanning = False write = True if write: x = i * world_size y = start * world_size rect = create_rectangle(x, y, world_size, world_size * (end - start), (1, 1, 1)) self._add_rendering_entry(rect, permanent=True) write = False for agent in range(1, num_agents + 1): i, j = agents_dict[agent] x = i * world_size y = j * world_size color = colors[state_map[i, j]] rect = create_rectangle(x, y, world_size, world_size, color) self._add_rendering_entry(rect) i, j = goals_dict[agent] x = i * world_size y = j * world_size color = colors[agent] circ = create_circle(x, y, world_size, world_size, color) self._add_rendering_entry(circ) if agents_dict[agent][0] == goals_dict[agent][0] and agents_dict[agent][1] == goals_dict[agent][1]: color = (0, 0, 0) circ = create_circle(x, y, world_size, world_size, color) self._add_rendering_entry(circ) # if self.action_probs is not None: # n_moves = 9 if self.IsDiagonal else 5 # for agent in range(1, num_agents + 1): # # take the a_dist from the given data and draw it on the frame # a_dist = self.action_probs[agent - 1] # if a_dist is not None: # for m in range(n_moves): # dx, dy = action2dir(m) # x = (agents_dict(agent)[0] + dx) * world_size # y = (agents_dict(agent)[1] + dy) * world_size # circ = create_circle(x, y, world_size, world_size, (0, 0, 0)) # self._add_rendering_entry(circ) result = self.viewer.render(return_rgb_array=1) return result frame = painter(self.world.state, self.getPositions(), self.getGoals()) return frame if __name__ == "__main__": from Primal2Observer import Primal2Observer from Map_Generator import * from Primal2Env import Primal2Env import numpy as np from tqdm import tqdm for _ in tqdm(range(2000)): n_agents = np.random.randint(low=25, high=30) env = Primal2Env(num_agents=n_agents, observer=Primal2Observer(observation_size=3), map_generator=maze_generator(env_size=(10, 30), wall_components=(3, 8), obstacle_density=(0.5, 0.7)), IsDiagonal=False) for agentID in range(1, n_agents + 1): pos = env.world.agents[agentID].position goal = env.world.agents[agentID].goal_pos assert agentID == env.world.state[pos] assert agentID == env.world.goals_map[goal] assert len(np.argwhere(env.world.state > 0)) == n_agents assert len(np.argwhere(env.world.goals_map > 0)) == n_agents ================================================ FILE: GroupLock.py ================================================ from threading import Lock, Condition class GroupLock: '''Queues asynchronus threads by group. Args: groups (key list list) : a list of lists of keys which represent the keys for each thread in each group. e.g. -> [['thread1','thread2'], ['thread3']]''' def __init__(self, groups): self.groups = groups self.activeGroup = 0 self.hasReleased = [{member:False for member in group} for group in groups] self.numGroups = len(groups) self._groupConditions = [{member:Condition(Lock()) for member in group} for group in groups] def acquire(self, group, id): '''Acquires the lock, blocks if not the thread's turn yet. A thread can acquire the lock once per group cycle. e.g. if GroupLock was initialized with [['thread1','thread2'], ['thread3']], a calls would be GroupLock.acquire(1, 'thread3') GroupLock.acquire(0, 'thread2') Args: group (int): number of calling thread's group id (key): key given to identify the thread (given in init)''' self._groupConditions[group][id].acquire() if self.hasReleased[group][id] or self.activeGroup != group: self._groupConditions[group][id].wait() def release(self, group, id): '''Releases the group lock. All threads in a lock must release before the next group's turn can begin. e.g. if GroupLock was initialized with [['thread1','thread2'], ['thread3']], a calls would be GroupLock.acquire(1, 'thread3') GroupLock.acquire(0, 'thread2') Args: group (int): number of calling thread's group id (key): key given to identify the thread(given in init)''' self._groupConditions[group][id].release() self.hasReleased[group][id] = True if all(self.hasReleased[group].values()): self.hasReleased[group] = {member:False for member in self.hasReleased[group]} self.activeGroup = (self.activeGroup + 1) % len(self.groups) releasedGroup = self.activeGroup for memberCondition in self._groupConditions[releasedGroup]: self._groupConditions[releasedGroup][memberCondition].acquire() self._groupConditions[releasedGroup][memberCondition].notify_all() self._groupConditions[releasedGroup][memberCondition].release() def releaseAll(self): releasedGroup = self.activeGroup for memberCondition in self._groupConditions[releasedGroup]: self._groupConditions[releasedGroup][memberCondition].acquire() self._groupConditions[releasedGroup][memberCondition].notify_all() self._groupConditions[releasedGroup][memberCondition].release() ================================================ FILE: LICENSE.md ================================================ The MIT License (MIT) Copyright (c) .NET Foundation and Contributors All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: Map_Generator.py ================================================ import numpy as np import random import sys from Env_Builder import World def isConnected(world0): sys.setrecursionlimit(10000) world0 = world0.copy() def firstFree(world0): for x in range(world0.shape[0]): for y in range(world0.shape[1]): if world0[x, y] == 0: return x, y def floodfill(world, i, j): sx, sy = world.shape[0], world.shape[1] if i < 0 or i >= sx or j < 0 or j >= sy: # out of bounds, return return if world[i, j] == -1: return world[i, j] = -1 floodfill(world, i + 1, j) floodfill(world, i, j + 1) floodfill(world, i - 1, j) floodfill(world, i, j - 1) i, j = firstFree(world0) floodfill(world0, i, j) if np.any(world0 == 0): return False else: return True def GetConnectedRegion(world, regions_dict, x, y): sys.setrecursionlimit(1000000) '''returns a list of tuples of connected squares to the given tile this is memorized with a dict''' if (x, y) in regions_dict: return regions_dict[(x, y)] visited = set() sx, sy = world.shape[0], world.shape[1] work_list = [(x, y)] while len(work_list) > 0: (i, j) = work_list.pop() if i < 0 or i >= sx or j < 0 or j >= sy: # out of bounds, return continue if world[i, j] == -1: continue # crashes if world[i, j] > 0: regions_dict[(i, j)] = visited if (i, j) in visited: continue visited.add((i, j)) work_list.append((i + 1, j)) work_list.append((i, j + 1)) work_list.append((i - 1, j)) work_list.append((i, j - 1)) regions_dict[(x, y)] = visited return visited def maze_generator(env_size=(10, 70), wall_components=(1, 8), obstacle_density=None, go_straight=0.8): min_size, max_size = env_size min_component, max_component = wall_components num_components = np.random.randint(low=min_component, high=max_component + 1) assert min_size > 5 # todo: write comments """ num_agents, IsDiagonal, min_size: min length of the 'radius' of the map, max_size: max length of the 'radius' of the map, complexity, obstacle_density, go_straight, """ if obstacle_density is None: obstacle_density = [0, 1] def maze(h, w, total_density=0): # Only odd shapes assert h > 0 and w > 0, "You are giving non-positive width and height" shape = ((h // 2) * 2 + 3, (w // 2) * 2 + 3) # Adjust num_components and density relative to maze world_size # density = int(density * ((shape[0] // 2) * (shape[1] // 2))) // 20 # world_size of components density = int(shape[0] * shape[1] * total_density // num_components) if num_components != 0 else 0 # Build actual maze Z = np.zeros(shape, dtype='int') # Fill borders Z[0, :] = Z[-1, :] = 1 Z[:, 0] = Z[:, -1] = 1 # Make aisles for i in range(density): x, y = np.random.randint(0, shape[1] // 2) * 2, np.random.randint(0, shape[ 0] // 2) * 2 # pick a random position Z[y, x] = 1 last_dir = 0 for j in range(num_components): neighbours = [] if x > 1: neighbours.append((y, x - 2)) if x < shape[1] - 2: neighbours.append((y, x + 2)) if y > 1: neighbours.append((y - 2, x)) if y < shape[0] - 2: neighbours.append((y + 2, x)) if len(neighbours): if last_dir == 0: y_, x_ = neighbours[np.random.randint(0, len(neighbours))] if Z[y_, x_] == 0: last_dir = (y_ - y, x_ - x) Z[y_, x_] = 1 Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1 x, y = x_, y_ else: index_F = -1 index_B = -1 diff = [] for k in range(len(neighbours)): diff.append((neighbours[k][0] - y, neighbours[k][1] - x)) if diff[k] == last_dir: index_F = k elif diff[k][0] + last_dir[0] == 0 and diff[k][1] + last_dir[1] == 0: index_B = k assert (index_B >= 0) if (index_F + 1): p = (1 - go_straight) * np.ones(len(neighbours)) / (len(neighbours) - 2) p[index_B] = 0 p[index_F] = go_straight # assert(p.sum() == 1) else: if len(neighbours) == 1: p = 1 else: p = np.ones(len(neighbours)) / (len(neighbours) - 1) p[index_B] = 0 assert (p.sum() == 1) I = np.random.choice(range(len(neighbours)), p=p) (y_, x_) = neighbours[I] if Z[y_, x_] == 0: last_dir = (y_ - y, x_ - x) Z[y_, x_] = 1 Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1 x, y = x_, y_ return Z def generator(): # randomize the world RANDOMIZE THE STATIC OBSTACLES obstacle_density = # np.random.triangular(obstacle_density[0], .33 * obstacle_density[0] + .66 * obstacle_density[1], # obstacle_density[1]) world_size = np.random.randint(min_size, max_size + 1) world = -maze(int(world_size), int(world_size), total_density=np.random.uniform(obstacle_density[0], obstacle_density[1]), ).astype(int) world = np.array(world) return world, None return generator def manual_generator(state_map, goals_map=None): state_map = np.array(state_map) assert state_map is not None assert len(state_map.shape) == 2 assert min(state_map.shape) >= 5 if goals_map is not None: goals_map = np.array(goals_map) assert goals_map.shape[0] == state_map.shape[0] and goals_map.shape[1] == state_map.shape[1] def generator(): return state_map, goals_map return generator if __name__ == "__main__": from matplotlib import pyplot as plt print("testing randomized map generation") plt.ion() for _ in range(1000): generator = maze_generator() world = generator() plt.imshow(world[0]) # obstacle map plt.pause(0.1) plt.ioff() plt.show() ================================================ FILE: Observer_Builder.py ================================================ import numpy as np def _get_one_hot_for_agent_direction(agent): """Retuns the agent's direction to one-hot encoding.""" direction = np.zeros(4) direction[agent.direction] = 1 return direction class ObservationBuilder: """ ObservationBuilder base class. """ def __init__(self): self.world = None self.NUM_CHANNELS = None def set_env(self, env): self.world = env def reset(self): """ Called after each environment reset. """ raise NotImplementedError() def get_many(self, handles): """ Called whenever an observation has to be computed for the `env` environment, for each agent with handle in the `handles` list. Parameters ---------- handles : list of handles, optional List with the handles of the agents for which to compute the observation vector. Returns ------- function A dictionary of observation structures, specific to the corresponding environment, with handles from `handles` as keys. """ raise NotImplementedError class DummyObserver(ObservationBuilder): """ DummyObservationBuilder class which returns dummy observations This is used in the evaluation service """ def __init__(self): super().__init__() self.observation_size = 1 def reset(self): pass def get_many(self, handles) -> bool: return True def get(self, handle: int = 0) -> bool: return True ================================================ FILE: Primal2Env.py ================================================ from Env_Builder import * from od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError from od_mstar3 import od_mstar from GroupLock import Lock import random from gym import spaces ''' Observation: Action space: (Tuple) agent_id: positive integer action: {0:STILL, 1:MOVE_NORTH, 2:MOVE_EAST, 3:MOVE_SOUTH, 4:MOVE_WEST, 5:NE, 6:SE, 7:SW, 8:NW, 5,6,7,8 not used in non-diagonal world} Reward: ACTION_COST for each action, GOAL_REWARD when robot arrives at target ''' class Primal2Env(MAPFEnv): metadata = {"render.modes": ["human", "ansi"]} def __init__(self, observer, map_generator, num_agents=None, IsDiagonal=False, frozen_steps=0, isOneShot=False): super(Primal2Env, self).__init__(observer=observer, map_generator=map_generator, num_agents=num_agents, IsDiagonal=IsDiagonal, frozen_steps=frozen_steps, isOneShot=isOneShot) def _reset(self, new_generator=None): if new_generator is None: self.set_world() else: self.map_generator = new_generator self.world = World(self.map_generator, num_agents=self.num_agents, isDiagonal=self.IsDiagonal) self.num_agents = self.world.num_agents self.observer.set_env(self.world) self.fresh = True if self.viewer is not None: self.viewer = None def give_moving_reward(self, agentID): """ WARNING: ONLY CALL THIS AFTER MOVING AGENTS! Only the moving agent that encounters the collision is penalized! Standing still agents never get punishment. """ collision_status = self.world.agents[agentID].status if collision_status == 0: reward = self.ACTION_COST self.isStandingOnGoal[agentID] = False elif collision_status == 1: reward = self.ACTION_COST + self.GOAL_REWARD self.isStandingOnGoal[agentID] = True self.world.agents[agentID].dones += 1 else: reward = self.ACTION_COST + self.COLLISION_REWARD self.isStandingOnGoal[agentID] = False self.individual_rewards[agentID] = reward def listValidActions(self, agent_ID, agent_obs): """ :return: action:int, pos:(int,int) in non-corridor states: return all valid actions in corridor states: if standing on goal: Only going 'forward' allowed if not standing on goal: only going 'forward' allowed """ def get_last_pos(agentID, position): """ get the last different position of an agent """ history_list = copy.deepcopy(self.world.agents[agentID].position_history) history_list.reverse() assert (history_list[0] == self.world.getPos(agentID)) history_list.pop(0) if history_list == []: return None for pos in history_list: if pos != position: return pos return None available_actions = [] pos = self.world.getPos(agent_ID) # if the agent is inside a corridor if self.world.corridor_map[pos[0], pos[1]][1] == 1: corridor_id = self.world.corridor_map[pos[0], pos[1]][0] if [pos[0], pos[1]] not in self.world.corridors[corridor_id]['StoppingPoints']: possible_moves = self.world.blank_env_valid_neighbor(*pos) last_position = get_last_pos(agent_ID, pos) for possible_position in possible_moves: if possible_position is not None and possible_position != last_position \ and self.world.state[possible_position[0], possible_position[1]] == 0: available_actions.append(dir2action(tuple_minus(possible_position, pos))) elif len(self.world.corridors[corridor_id]['EndPoints']) == 1 and possible_position is not None \ and possible_moves.count(None) == 3: available_actions.append(dir2action(tuple_minus(possible_position, pos))) if not available_actions: available_actions.append(0) else: possible_moves = self.world.blank_env_valid_neighbor(*pos) last_position = get_last_pos(agent_ID, pos) if last_position in self.world.corridors[corridor_id]['Positions']: available_actions.append(0) for possible_position in possible_moves: if possible_position is not None and possible_position != last_position \ and self.world.state[possible_position[0], possible_position[1]] == 0: available_actions.append(dir2action(tuple_minus(possible_position, pos))) else: for possible_position in possible_moves: if possible_position is not None \ and self.world.state[possible_position[0], possible_position[1]] == 0: available_actions.append(dir2action(tuple_minus(possible_position, pos))) if not available_actions: available_actions.append(0) else: available_actions.append(0) # standing still always allowed num_actions = 4 + 1 if not self.IsDiagonal else 8 + 1 for action in range(1, num_actions): direction = action2dir(action) new_pos = tuple_plus(direction, pos) lastpos = None blocking_valid = self.get_blocking_validity(agent_obs, agent_ID, new_pos) if not blocking_valid: continue try: lastpos = self.world.agents[agent_ID].position_history[-2] except: pass if new_pos == lastpos: continue if self.world.corridor_map[new_pos[0], new_pos[1]][1] == 1: valid = self.get_convention_validity(agent_obs, agent_ID, new_pos) if not valid: continue if self.world.state[new_pos[0], new_pos[1]] == 0: available_actions.append(action) return available_actions def get_blocking_validity(self, observation, agent_ID, pos): top_left = (self.world.getPos(agent_ID)[0] - self.obs_size // 2, self.world.getPos(agent_ID)[1] - self.obs_size // 2) blocking_map = observation[0][5] if blocking_map[pos[0] - top_left[0], pos[1] - top_left[1]] == 1: return 0 return 1 def get_convention_validity(self, observation, agent_ID, pos): top_left = (self.world.getPos(agent_ID)[0] - self.obs_size // 2, self.world.getPos(agent_ID)[1] - self.obs_size // 2) blocking_map = observation[0][5] if blocking_map[pos[0] - top_left[0], pos[1] - top_left[1]] == -1: deltay_map = observation[0][7] if deltay_map[pos[0] - top_left[0], pos[1] - top_left[1]] > 0: return 1 elif deltay_map[pos[0] - top_left[0], pos[1] - top_left[1]] == 0: deltax_map = observation[0][6] if deltax_map[pos[0] - top_left[0], pos[1] - top_left[1]] > 0: return 1 else: return 0 elif deltay_map[pos[0] - top_left[0], pos[1] - top_left[1]] < 0: return 0 else: print('Weird') else: return 1 class DummyEnv(Primal2Env): def __init__(self, observer, map_generator, num_agents=None, IsDiagonal=False): super(DummyEnv, self).__init__(observer=observer, map_generator=map_generator, num_agents=num_agents, IsDiagonal=IsDiagonal) def _render(self, mode='human', close=False, screen_width=800, screen_height=800): pass if __name__ == '__main__': from matplotlib import pyplot from Primal2Observer import Primal2Observer from Map_Generator import maze_generator from Map_Generator import manual_generator state0 = [[-1, -1, -1, -1, -1, -1, -1], [-1, 1, -1, 0, 0, 0, -1], [-1, 0, -1, -1, -1, 0, -1], [-1, 0, 0, 0, -1, 0, -1], [-1, 0, -1, 0, 0, 0, -1], [-1, 2, -1, 0, 0, 0, -1], [-1, -1, -1, -1, -1, -1, -1]] n_agents = 3 env = Primal2Env(num_agents=n_agents, observer=Primal2Observer(observation_size=5), map_generator=maze_generator(env_size=(8, 10), wall_components=(3, 8), obstacle_density=(0.3, 0.7)), IsDiagonal=False) print(env.world.state) print(env.world.goals_map) c = 0 a = c b = c for j in range(0, 50): movement = {1: a, 2: b, 3: c, 4: c, 5: c, 6: c, 7: c, 8: c} env.step_all(movement) obs = env._observe() print(env.world.state) a = int(input()) b = int(input()) ================================================ FILE: Primal2Observer.py ================================================ from Observer_Builder import ObservationBuilder import numpy as np import copy from Env_Builder import * import time class Primal2Observer(ObservationBuilder): """ obs shape: (8 + num_future_steps * obs_size * obs_size ) map order: poss_map, goal_map, goals_map, obs_map, pathlength_map, blocking_map, deltax_map, deltay_map, astar maps """ def __init__(self, observation_size=11, num_future_steps=3, printTime=False): super(Primal2Observer, self).__init__() self.observation_size = observation_size self.num_future_steps = num_future_steps self.NUM_CHANNELS = 8 + self.num_future_steps self.printTime = printTime def set_world(self, world): super().set_env(world) def get_next_positions(self, agent_id): agent_pos = self.world.getPos(agent_id) positions = [] current_pos = [agent_pos[0], agent_pos[1]] next_positions = self.world.blank_env_valid_neighbor(current_pos[0], current_pos[1]) for position in next_positions: if position is not None and position != agent_pos: positions.append([position[0], position[1]]) next_next_positions = self.world.blank_env_valid_neighbor(position[0], position[1]) for pos in next_next_positions: if pos is not None and pos not in positions and pos != agent_pos: positions.append([pos[0], pos[1]]) return positions def _get(self, agent_id, all_astar_maps): start_time = time.time() assert (agent_id > 0) agent_pos = self.world.getPos(agent_id) top_left = (agent_pos[0] - self.observation_size // 2, agent_pos[1] - self.observation_size // 2) bottom_right = (top_left[0] + self.observation_size, top_left[1] + self.observation_size) centre = (self.observation_size - 1) / 2 obs_shape = (self.observation_size, self.observation_size) goal_map = np.zeros(obs_shape) poss_map = np.zeros(obs_shape) goals_map = np.zeros(obs_shape) obs_map = np.zeros(obs_shape) astar_map = np.zeros([self.num_future_steps, self.observation_size, self.observation_size]) astar_map_unpadded = np.zeros([self.num_future_steps, self.world.state.shape[0], self.world.state.shape[1]]) pathlength_map = np.zeros(obs_shape) deltax_map = np.zeros(obs_shape) deltay_map = np.zeros(obs_shape) blocking_map = np.zeros(obs_shape) time1 = time.time() - start_time start_time = time.time() # concatenate all_astar maps other_agents = list(range(self.world.num_agents)) # needs to be 0-indexed for numpy magic below other_agents.remove(agent_id - 1) # 0-indexing again astar_map_unpadded = np.zeros([self.num_future_steps, self.world.state.shape[0], self.world.state.shape[1]]) astar_map_unpadded[:self.num_future_steps, max(0, top_left[0]):min(bottom_right[0], self.world.state.shape[0]), max(0, top_left[1]):min(bottom_right[1], self.world.state.shape[1])] = \ np.sum(all_astar_maps[other_agents, :self.num_future_steps, max(0, top_left[0]):min(bottom_right[0], self.world.state.shape[0]), max(0, top_left[1]):min(bottom_right[1], self.world.state.shape[1])], axis=0) time2 = time.time() - start_time start_time = time.time() # original layers from PRIMAL1 visible_agents = [] for i in range(top_left[0], top_left[0] + self.observation_size): for j in range(top_left[1], top_left[1] + self.observation_size): if i >= self.world.state.shape[0] or i < 0 or j >= self.world.state.shape[1] or j < 0: # out of bounds, just treat as an obstacle obs_map[i - top_left[0], j - top_left[1]] = 1 pathlength_map[i - top_left[0], j - top_left[1]] = -1 continue astar_map[:self.num_future_steps, i - top_left[0], j - top_left[1]] = astar_map_unpadded[ :self.num_future_steps, i, j] if self.world.state[i, j] == -1: # obstacles obs_map[i - top_left[0], j - top_left[1]] = 1 if self.world.state[i, j] == agent_id: # agent's position poss_map[i - top_left[0], j - top_left[1]] = 1 # updated_poss_map[i - top_left[0], j - top_left[1]] = 0 if self.world.goals_map[i, j] == agent_id: # agent's goal goal_map[i - top_left[0], j - top_left[1]] = 1 if self.world.state[i, j] > 0 and self.world.state[i, j] != agent_id: # other agents' positions visible_agents.append(self.world.state[i, j]) poss_map[i - top_left[0], j - top_left[1]] = 1 # updated_poss_map[i - top_left[0], j - top_left[1]] = self.world.state[i, j] # we can keep this map even if on goal, # since observation is computed after the refresh of new distance map pathlength_map[i - top_left[0], j - top_left[1]] = self.world.agents[agent_id].distanceMap[i, j] time3 = time.time() - start_time start_time = time.time() for agent in visible_agents: x, y = self.world.getGoal(agent) min_node = (max(top_left[0], min(top_left[0] + self.observation_size - 1, x)), max(top_left[1], min(top_left[1] + self.observation_size - 1, y))) goals_map[min_node[0] - top_left[0], min_node[1] - top_left[1]] = 1 dx = self.world.getGoal(agent_id)[0] - agent_pos[0] dy = self.world.getGoal(agent_id)[1] - agent_pos[1] mag = (dx ** 2 + dy ** 2) ** .5 if mag != 0: dx = dx / mag dy = dy / mag if mag > 60: mag = 60 time4 = time.time() - start_time start_time = time.time() current_corridor_id = -1 current_corridor = self.world.corridor_map[self.world.getPos(agent_id)[0], self.world.getPos(agent_id)[1]][1] if current_corridor == 1: current_corridor_id = \ self.world.corridor_map[self.world.getPos(agent_id)[0], self.world.getPos(agent_id)[1]][0] positions = self.get_next_positions(agent_id) for position in positions: cell_info = self.world.corridor_map[position[0], position[1]] if cell_info[1] == 1: corridor_id = cell_info[0] if corridor_id != current_corridor_id: if len(self.world.corridors[corridor_id]['EndPoints']) == 1: if [position[0], position[1]] == self.world.corridors[corridor_id]['StoppingPoints'][0]: blocking_map[position[0] - top_left[0], position[1] - top_left[1]] = self.get_blocking( corridor_id, 0, agent_id, 1) elif [position[0], position[1]] == self.world.corridors[corridor_id]['StoppingPoints'][0]: end_point_pos = self.world.corridors[corridor_id]['EndPoints'][0] deltax_map[position[0] - top_left[0], position[1] - top_left[1]] = (self.world.corridors[ corridor_id]['DeltaX'][(end_point_pos[0], end_point_pos[1])]) # / max(mag, 1) deltay_map[position[0] - top_left[0], position[1] - top_left[1]] = (self.world.corridors[ corridor_id]['DeltaY'][(end_point_pos[0], end_point_pos[1])]) # / max(mag, 1) blocking_map[position[0] - top_left[0], position[1] - top_left[1]] = self.get_blocking( corridor_id, 0, agent_id, 2) elif [position[0], position[1]] == self.world.corridors[corridor_id]['StoppingPoints'][1]: end_point_pos = self.world.corridors[corridor_id]['EndPoints'][1] deltax_map[position[0] - top_left[0], position[1] - top_left[1]] = (self.world.corridors[ corridor_id]['DeltaX'][(end_point_pos[0], end_point_pos[1])]) # / max(mag, 1) deltay_map[position[0] - top_left[0], position[1] - top_left[1]] = (self.world.corridors[ corridor_id]['DeltaY'][(end_point_pos[0], end_point_pos[1])]) # / max(mag, 1) blocking_map[position[0] - top_left[0], position[1] - top_left[1]] = self.get_blocking( corridor_id, 1, agent_id, 2) else: pass time5 = time.time() - start_time start_time = time.time() free_spaces = list(np.argwhere(pathlength_map > 0)) distance_list = [] for arg in free_spaces: dist = pathlength_map[arg[0], arg[1]] if dist not in distance_list: distance_list.append(dist) distance_list.sort() step_size = (1 / len(distance_list)) for i in range(self.observation_size): for j in range(self.observation_size): dist_mag = pathlength_map[i, j] if dist_mag > 0: index = distance_list.index(dist_mag) pathlength_map[i, j] = (index + 1) * step_size state = np.array([poss_map, goal_map, goals_map, obs_map, pathlength_map, blocking_map, deltax_map, deltay_map]) state = np.concatenate((state, astar_map), axis=0) time6 = time.time() - start_time start_time = time.time() return state, [dx, dy, mag], np.array([time1, time2, time3, time4, time5, time6]) def get_many(self, handles=None): observations = {} all_astar_maps = self.get_astar_map() if handles is None: handles = list(range(1, self.world.num_agents + 1)) times = np.zeros((1, 6)) for h in handles: state, vector, time = self._get(h, all_astar_maps) observations[h] = [state, vector] times += time if self.printTime: print(times) return observations def get_astar_map(self): """ :return: a dict of 3D np arrays. Each astar_maps[agentID] is a num_future_steps * obs_size * obs_size matrix. """ def get_single_astar_path(distance_map, start_position, path_len): """ :param distance_map: :param start_position: :param path_len: :return: [[(x,y), ...],..] a list of lists of positions from start_position, the length of the return can be smaller than num_future_steps. Index of the return: list[step][0-n] = tuple(x, y) """ def get_astar_one_step(position): next_astar_cell = [] h = self.world.state.shape[0] w = self.world.state.shape[1] for direction in [(1, 0), (0, 1), (-1, 0), (0, -1)]: # print(position, direction) new_pos = tuple_plus(position, direction) if 0 < new_pos[0] <= h and 0 < new_pos[1] <= w: if distance_map[new_pos] == distance_map[position] - 1 \ and distance_map[new_pos] >= 0: next_astar_cell.append(new_pos) return next_astar_cell path_counter = 0 astar_list = [[start_position]] while path_counter < path_len: last_step_cells = astar_list[-1] next_step_cells = [] for cells_per_step in last_step_cells: new_cell_list = get_astar_one_step(cells_per_step) if not new_cell_list: # no next step, should be standing on goal astar_list.pop(0) return astar_list next_step_cells.extend(new_cell_list) next_step_cells = list(set(next_step_cells)) # remove repeated positions astar_list.append(next_step_cells) path_counter += 1 astar_list.pop(0) return astar_list astar_maps = {} for agentID in range(1, self.world.num_agents + 1): astar_maps.update( {agentID: np.zeros([self.num_future_steps, self.world.state.shape[0], self.world.state.shape[1]])}) distance_map0, start_pos0 = self.world.agents[agentID].distanceMap, self.world.agents[agentID].position astar_path = get_single_astar_path(distance_map0, start_pos0, self.num_future_steps) if not len(astar_path) == self.num_future_steps: # this agent reaches its goal during future steps distance_map1, start_pos1 = self.world.agents[agentID].next_distanceMap, \ self.world.agents[agentID].goal_pos astar_path.extend( get_single_astar_path(distance_map1, start_pos1, self.num_future_steps - len(astar_path))) for i in range(self.num_future_steps - len(astar_path)): # only happen when min_distance not sufficient astar_path.extend([[astar_path[-1][-1]]]) # stay at the last pos assert len(astar_path) == self.num_future_steps for step in range(self.num_future_steps): for cell in astar_path[step]: astar_maps[agentID][step, cell[0], cell[1]] = 1 return np.asarray([astar_maps[i] for i in range(1, self.world.num_agents + 1)]) def get_blocking(self, corridor_id, reverse, agent_id, dead_end): def get_last_pos(agentID, position): history_list = copy.deepcopy(self.world.agents[agentID].position_history) history_list.reverse() assert (history_list[0] == self.world.getPos(agentID)) history_list.pop(0) if history_list == []: return None for pos in history_list: if pos != position: return pos return None positions_to_check = copy.deepcopy(self.world.corridors[corridor_id]['Positions']) if reverse: positions_to_check.reverse() idx = -1 for position in positions_to_check: idx += 1 state = self.world.state[position[0], position[1]] if state > 0 and state != agent_id: if dead_end == 1: return 1 if idx == 0: return 1 last_pos = get_last_pos(state, position) if last_pos == None: return 1 if idx == len(positions_to_check) - 1: if last_pos != positions_to_check[idx - 1]: return 1 break if last_pos == positions_to_check[idx + 1]: return 1 if dead_end == 2: if not reverse: other_endpoint = self.world.corridors[corridor_id]['EndPoints'][1] else: other_endpoint = self.world.corridors[corridor_id]['EndPoints'][0] state_endpoint = self.world.state[other_endpoint[0], other_endpoint[1]] if state_endpoint > 0 and state_endpoint != agent_id: return -1 return 0 if __name__ == "__main__": pass ================================================ FILE: README.md ================================================ # PRIMAL_2: Pathfinding via Reinforcement and Imitation Multi_agent Learning - Lifelong ## Setting up Code - cd into the od_mstar3 folder. - python3 setup.py build_ext --inplace - Check by going back to the root of the git folder, running python3 and "import cpp_mstar" ## Running Code - Pick appropriate number of meta agents via variables `NUM_META_AGENTS` and `NUM_IL_META_AGENTS` in `parameters.py` - The number of RL meta-agents is implicity defined by the difference between total meta-agents and IL meta-agents (`NUM_RL_META_AGENTS` = `NUM_META_AGENTS` - `NUM_IL_META_AGENTS`) - Name training run via `training_version` in `parameters.py` - call `python driver.py` ## Frequently asked questions 1. I got `pyglet.canvas.xlib.NoSuchDisplayException: Cannot connect to "None"` when running on a server. Running your code starting with `xvfb-run` will solve the problem. You may refer to https://stackoverflow.com/questions/60922076/pyglet-canvas-xlib-nosuchdisplayexception-cannot-connect-to-none-only-happens and relevant issues on StackFlow for help. 2. In one-shot environment, why agent turns black after reaching a goal? In the one-shot scenario, agent will 'disappear'(i.e., removed from the env). For visualization we keep it as black. Removal of agent who has achieved its goal is necessary, since a lot of narrow corridors in the map could cause unsolvable block and collision. One-shot scenario per se is just a way to test the optimality of the planner. By contrast we do not remove any agents for any reason in continuous env. ## Key Files - `parameters.py` - Training parameters. - `driver.py` - Driver of program. Holds global network for A3C. - `Runner.py` - Compute node for training. Maintains a single meta agent. - `Worker.py` - A single agent in a simulation environment. Majority of episode computation, including gradient calculation, occurs here. - `Ray_ACNet.py` - Defines network architecture. - `Env_Builder.py` - Defines the lower level structure of the Lifelong MAPF environment for PRIMAL2, including the world and agents class. - `PRIMAL2Env.py` - Defines the high level environment class. - `Map_Generator2.py` - Algorithm used to generate worlds, parameterized by world size, obstacle density and wall components. - `PRIMAL2Observer.py` - Defines the decentralized observation of each PRIMAL2 agent. - `Obsever_Builder.py` - The high level observation class ## Other Links - fully trained PRIMAL2 model in one-shot environment - https://www.dropbox.com/s/3nppkpy7psg0j5v/model_PRIMAL2_oneshot_3astarMaps.7z?dl=0 - fully trained PRIMAL2 model in LMAPF environment - https://www.dropbox.com/s/6wjq2bje4mcjywj/model_PRIMAL2_continuous_3astarMaps.7z?dl=0 ## Authors [Mehul Damani](damanimehul24@gmail.com) [Zhiyao Luo](luozhiyao933@126.com) [Emerson Wenzel](emersonwenzel@gmail.com) [Guillaume Sartoretti](guillaume.sartoretti@gmail.com) ================================================ FILE: Ray_ACNet.py ================================================ import tensorflow as tf import tensorflow.contrib.layers as layers import numpy as np # parameters for training GRAD_CLIP = 10.0 KEEP_PROB1 = 1 # was 0.5 KEEP_PROB2 = 1 # was 0.7 RNN_SIZE = 512 GOAL_REPR_SIZE = 12 # Used to initialize weights for policy and value output layers (Do we need to use that? Maybe not now) def normalized_columns_initializer(std=1.0): def _initializer(shape, dtype=None, partition_info=None): out = np.random.randn(*shape).astype(np.float32) out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True)) return tf.constant(out) return _initializer class ACNet: def __init__(self, scope, a_size, trainer, TRAINING, NUM_CHANNEL, OBS_SIZE, GLOBAL_NET_SCOPE, GLOBAL_NETWORK=False): with tf.variable_scope(str(scope) + '/qvalues'): self.trainer = trainer # The input size may require more work to fit the interface. self.inputs = tf.placeholder(shape=[None, NUM_CHANNEL, OBS_SIZE, OBS_SIZE], dtype=tf.float32) self.goal_pos = tf.placeholder(shape=[None, 3], dtype=tf.float32) self.myinput = tf.transpose(self.inputs, perm=[0, 2, 3, 1]) self.policy, self.value, self.state_out, self.state_in, self.state_init, self.valids = self._build_net( self.myinput, self.goal_pos, RNN_SIZE, TRAINING, a_size) if TRAINING: self.actions = tf.placeholder(shape=[None], dtype=tf.int32) self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32) self.train_valid = tf.placeholder(shape=[None, a_size], dtype=tf.float32) self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget') self.advantages = tf.placeholder(shape=[None], dtype=tf.float32) self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1]) self.train_value = tf.placeholder(tf.float32, [None]) self.train_policy = tf.placeholder(tf.float32, [None]) self.train_imitation = tf.placeholder(tf.float32, [None]) # NEED THIS self.optimal_actions = tf.placeholder(tf.int32, [None]) # NEED THIS self.optimal_actions_onehot = tf.one_hot(self.optimal_actions, a_size, dtype=tf.float32) # NEED THIS self.train_valids= tf.placeholder(tf.float32, [None,1]) # Loss Functions self.value_loss = 0.1 * tf.reduce_mean( self.train_value * tf.square(self.target_v - tf.reshape(self.value, shape=[-1]))) self.entropy = - tf.reduce_mean(self.policy * tf.log(tf.clip_by_value(self.policy, 1e-10, 1.0))) self.policy_loss = - 0.5 * tf.reduce_mean(self.train_policy* tf.log(tf.clip_by_value(self.responsible_outputs, 1e-15, 1.0)) * self.advantages) self.valid_loss = - 16 * tf.reduce_mean(self.train_valids * tf.log(tf.clip_by_value(self.valids, 1e-10, 1.0)) * \ self.train_valid + tf.log( tf.clip_by_value(1 - self.valids, 1e-10, 1.0)) * (1 - self.train_valid)) self.loss = self.value_loss + self.policy_loss + self.valid_loss - self.entropy * 0.01 # IMPORTANT: 0 * self.value_loss is important so we can # fetch the gradients properly self.imitation_loss = 0 * self.value_loss + tf.reduce_mean(self.train_imitation* tf.keras.backend.categorical_crossentropy(self.optimal_actions_onehot, self.policy)) # Get gradients from local network using local losses and # normalize the gradients using clipping local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope + '/qvalues') self.gradients = tf.gradients(self.loss, local_vars) self.var_norms = tf.global_norm(local_vars) self.grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP) # Apply local gradients to global network global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE + '/qvalues') if self.trainer: self.apply_grads = self.trainer.apply_gradients(zip(self.grads, global_vars)) self.local_vars = local_vars # now the gradients for imitation loss self.i_gradients = tf.gradients(self.imitation_loss, local_vars) self.i_var_norms = tf.global_norm(local_vars) self.i_grads, self.i_grad_norms = tf.clip_by_global_norm(self.i_gradients, GRAD_CLIP) # Apply local gradients to global network if self.trainer: self.apply_imitation_grads = self.trainer.apply_gradients(zip(self.i_grads, global_vars)) if GLOBAL_NETWORK: print("\n\n\n\n is a global network\n\n\n\n") weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) self.tempGradients = [tf.placeholder(shape=w.get_shape(), dtype=tf.float32) for w in weightVars] self.apply_grads = self.trainer.apply_gradients(zip(self.tempGradients, weightVars)) #self.clippedGrads, norms = tf.clip_by_global_norm(self.tempGradients, GRAD_CLIP) #self.apply_grads = self.trainer.apply_gradients(zip(self.clippedGrads, weightVars)) print("Hello World... From " + str(scope)) # :) def _build_net(self, inputs, goal_pos, RNN_SIZE, TRAINING, a_size): def conv_mlp(inputs, kernal_size, output_size): inputs = tf.reshape(inputs, [-1, 1, kernal_size, 1]) conv = layers.conv2d(inputs=inputs, padding="VALID", num_outputs=output_size, kernel_size=[1, kernal_size], stride=1, data_format="NHWC", weights_initializer=w_init, activation_fn=tf.nn.relu) return conv def VGG_Block(inputs): def conv_2d(inputs, kernal_size, output_size): conv = layers.conv2d(inputs=inputs, padding="SAME", num_outputs=output_size, kernel_size=[kernal_size[0], kernal_size[1]], stride=1, data_format="NHWC", weights_initializer=w_init, activation_fn=tf.nn.relu) return conv conv1 = conv_2d(inputs, [3, 3], RNN_SIZE // 4) conv1a = conv_2d(conv1, [3, 3], RNN_SIZE // 4) conv1b = conv_2d(conv1a, [3, 3], RNN_SIZE // 4) pool1 = layers.max_pool2d(inputs=conv1b, kernel_size=[2, 2]) return pool1 w_init = layers.variance_scaling_initializer() vgg1 = VGG_Block(inputs) vgg2 = VGG_Block(vgg1) conv3 = layers.conv2d(inputs=vgg2, padding="VALID", num_outputs=RNN_SIZE - GOAL_REPR_SIZE, kernel_size=[2, 2], stride=1, data_format="NHWC", weights_initializer=w_init, activation_fn=None) flat = tf.nn.relu(layers.flatten(conv3)) goal_layer = layers.fully_connected(inputs=goal_pos, num_outputs=GOAL_REPR_SIZE) hidden_input = tf.concat([flat, goal_layer], 1) h1 = layers.fully_connected(inputs=hidden_input, num_outputs=RNN_SIZE) d1 = layers.dropout(h1, keep_prob=KEEP_PROB1, is_training=TRAINING) h2 = layers.fully_connected(inputs=d1, num_outputs=RNN_SIZE, activation_fn=None) d2 = layers.dropout(h2, keep_prob=KEEP_PROB2, is_training=TRAINING) self.h3 = tf.nn.relu(d2 + hidden_input) # Recurrent network for temporal dependencies lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE, state_is_tuple=True) c_init = np.zeros((1, lstm_cell.state_size.c), np.float32) h_init = np.zeros((1, lstm_cell.state_size.h), np.float32) state_init = [c_init, h_init] c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c]) h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h]) state_in = (c_in, h_in) rnn_in = tf.expand_dims(self.h3, [0]) step_size = tf.shape(inputs)[:1] state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in) lstm_outputs, lstm_state = tf.nn.dynamic_rnn( lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size, time_major=False) lstm_c, lstm_h = lstm_state state_out = (lstm_c[:1, :], lstm_h[:1, :]) self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE]) policy_layer = layers.fully_connected(inputs=self.rnn_out, num_outputs=a_size, weights_initializer=normalized_columns_initializer(1. / float(a_size)), biases_initializer=None, activation_fn=None) policy = tf.nn.softmax(policy_layer) policy_sig = tf.sigmoid(policy_layer) value = layers.fully_connected(inputs=self.rnn_out, num_outputs=1, weights_initializer=normalized_columns_initializer(1.0), biases_initializer=None, activation_fn=None) return policy, value, state_out, state_in, state_init, policy_sig ================================================ FILE: Runner.py ================================================ import tensorflow as tf import threading import numpy as np import ray import os from Ray_ACNet import ACNet import GroupLock from Primal2Env import Primal2Env from Primal2Observer import Primal2Observer from Map_Generator import maze_generator from Worker import Worker import scipy.signal as signal from parameters import * class Runner(object): """Actor object to start running simulation on workers. Gradient computation is also executed on this object.""" def __init__(self, metaAgentID): # tensorflow must be imported within the constructor # because this class will be instantiated on a remote ray node import tensorflow as tf num_agents = NUM_THREADS self.env = Primal2Env(num_agents=num_agents, observer=Primal2Observer(observation_size=OBS_SIZE, num_future_steps=NUM_FUTURE_STEPS), map_generator=maze_generator( env_size=ENVIRONMENT_SIZE, wall_components=WALL_COMPONENTS, obstacle_density=OBSTACLE_DENSITY), IsDiagonal=DIAG_MVMT, isOneShot=False) self.metaAgentID = metaAgentID trainer = None self.localNetwork = ACNet(GLOBAL_NET_SCOPE,a_size,trainer,True,NUM_CHANNEL,OBS_SIZE,GLOBAL_NET_SCOPE=GLOBAL_NET_SCOPE, GLOBAL_NETWORK=False) self.currEpisode = int(metaAgentID) self.global_step = tf.placeholder(tf.float32) # first `NUM_IL_META_AGENTS` only use IL and don't need gpu/tensorflow if self.metaAgentID < NUM_IL_META_AGENTS: config = tf.ConfigProto(allow_soft_placement=True, device_count={"GPU": 0}) self.coord = None self.saver = None else: # set up tf session config = tf.ConfigProto(allow_soft_placement = True) config.gpu_options.per_process_gpu_memory_fraction = 1.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1) config.gpu_options.allow_growth=True self.saver = tf.train.Saver(max_to_keep=1) self.coord = tf.train.Coordinator() self.sess = tf.Session(config=config) self.sess.run(tf.global_variables_initializer()) self.weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) weights = self.sess.run(self.weightVars) self.weightSetters = [tf.placeholder(shape=w.shape, dtype=tf.float32) for w in weights] self.set_weights_ops = [var.assign(w) for var, w in zip(self.weightVars, self.weightSetters)] def set_weights(self, weights): feed_dict = { self.weightSetters[i]: w for i, w in enumerate(weights) } self.sess.run([self.set_weights_ops], feed_dict=feed_dict) def multiThreadedJob(self, episodeNumber): workers = [] worker_threads = [] workerNames = ["worker_" + str(i+1) for i in range(NUM_THREADS)] groupLock = GroupLock.GroupLock([workerNames, workerNames]) # TODO workersPerMetaAgent = NUM_THREADS for a in range(NUM_THREADS): agentID = a + 1 workers.append(Worker(self.metaAgentID, agentID, workersPerMetaAgent, self.env, self.localNetwork, self.sess, groupLock, learningAgent=True, global_step=self.global_step)) for w in workers: groupLock.acquire(0, w.name) worker_work = lambda: w.work(episodeNumber, self.coord, self.saver, self.weightVars) t = threading.Thread(target=(worker_work)) t.start() worker_threads.append(t) self.coord.join(worker_threads) jobResults = [] loss_metrics = [] perf_metrics = [] is_imitation = None for w in workers: if w.learningAgent: if JOB_TYPE == JOB_OPTIONS.getGradient: jobResults = jobResults + w.allGradients elif JOB_TYPE == JOB_OPTIONS.getExperience: jobResults.append(w.experienceBuffer) is_imitation = False # w.is_imitation loss_metrics.append(w.loss_metrics) perf_metrics.append(w.perf_metrics) avg_loss_metrics = list(np.mean(np.array(loss_metrics), axis=0)) if not is_imitation: # perf_metrics structure: # # w.perf_metrics = [ # episode_step_count, # episode_values, # episode_inv_count, # episode_stop_count, # episode_reward, # targets_done # ] perf_metrics = np.array(perf_metrics) avg_perf_metrics = np.mean(perf_metrics[:, :4], axis=0) episode_reward = np.sum(perf_metrics[:,4]) targets_done = np.sum(perf_metrics[:, 5]) avg_perf_metrics = list(avg_perf_metrics) + [episode_reward, targets_done] all_metrics = avg_loss_metrics + avg_perf_metrics else: all_metrics = avg_loss_metrics return jobResults, all_metrics, is_imitation def imitationLearningJob(self, episodeNumber): workersPerMetaAgent = NUM_THREADS agentID=None groupLock = None worker = Worker(self.metaAgentID, agentID, workersPerMetaAgent, self.env, self.localNetwork, self.sess, None, learningAgent=True, global_step=self.global_step) gradients, losses = worker.imitation_learning_only(episodeNumber) mean_imitation_loss = [np.mean(losses)] is_imitation = True return gradients, mean_imitation_loss, is_imitation def job(self, global_weights, episodeNumber): print("starting episode {} on metaAgent {}".format(episodeNumber, self.metaAgentID)) # set the local weights to the global weight values from the master network self.set_weights(global_weights) # set first `NUM_IL_META_AGENTS` to perform imitation learning if self.metaAgentID < NUM_IL_META_AGENTS: print("running imitation job") jobResults, metrics, is_imitation = self.imitationLearningJob(episodeNumber) elif COMPUTE_TYPE == COMPUTE_OPTIONS.multiThreaded: jobResults, metrics, is_imitation = self.multiThreadedJob(episodeNumber) elif COMPUTE_TYPE == COMPUTE_OPTIONS.synchronous: print("not implemented") assert(1==0) # Get the job results from the learning agents # and send them back to the master network info = { "id": self.metaAgentID, "episode_number": episodeNumber, "is_imitation": is_imitation } return jobResults, metrics, info @ray.remote(num_cpus=3, num_gpus= 1.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1)) class RLRunner(Runner): def __init__(self, metaAgentID): super().__init__(metaAgentID) @ray.remote(num_cpus=1, num_gpus=0) class imitationRunner(Runner): def __init__(self, metaAgentID): super().__init__(metaAgentID) ================================================ FILE: Worker.py ================================================ import scipy.signal as signal import copy import numpy as np import ray import os import imageio from Env_Builder import * from Map_Generator import maze_generator from parameters import * # helper functions def discount(x, gamma): return signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] class Worker(): def __init__(self, metaAgentID, workerID, workers_per_metaAgent, env, localNetwork, sess, groupLock, learningAgent, global_step): self.metaAgentID = metaAgentID self.agentID = workerID self.name = "worker_" + str(workerID) self.num_workers = workers_per_metaAgent self.global_step = global_step self.nextGIF = 0 self.env = env self.local_AC = localNetwork self.groupLock = groupLock self.learningAgent = learningAgent self.sess = sess self.allGradients = [] def calculateImitationGradient(self, rollout, episode_count): rollout = np.array(rollout, dtype=object) # we calculate the loss differently for imitation # if imitation=True the rollout is assumed to have different dimensions: # [o[0],o[1],optimal_actions] temp_actions = np.stack(rollout[:, 2]) rnn_state = self.local_AC.state_init feed_dict = {self.global_step : episode_count, self.local_AC.inputs : np.stack(rollout[:, 0]), self.local_AC.goal_pos : np.stack(rollout[:, 1]), self.local_AC.optimal_actions: np.stack(rollout[:, 2]), self.local_AC.state_in[0] : rnn_state[0], self.local_AC.state_in[1] : rnn_state[1], self.local_AC.train_imitation: (rollout[:, 3]), self.local_AC.target_v : np.stack(temp_actions), self.local_AC.train_value : temp_actions, } v_l, i_l, i_grads = self.sess.run([self.local_AC.value_loss, self.local_AC.imitation_loss, self.local_AC.i_grads], feed_dict=feed_dict) return [i_l], i_grads def calculateGradient(self, rollout, bootstrap_value, episode_count, rnn_state0): # ([s,a,r,s1,v[0,0]]) rollout = np.array(rollout, dtype=object) observations = rollout[:, 0] goals = rollout[:, -3] actions = rollout[:, 1] rewards = rollout[:, 2] values = rollout[:, 4] valids = rollout[:, 5] train_value = rollout[:, -2] train_policy = rollout[:, -1] # Here we take the rewards and values from the rollout, and use them to # generate the advantage and discounted returns. (With bootstrapping) # The advantage function uses "Generalized Advantage Estimation" self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value]) discounted_rewards = discount(self.rewards_plus, gamma)[:-1] self.value_plus = np.asarray(values.tolist() + [bootstrap_value]) advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1] advantages = discount(advantages, gamma) num_samples = min(EPISODE_SAMPLES, len(advantages)) sampleInd = np.sort(np.random.choice(advantages.shape[0], size=(num_samples,), replace=False)) feed_dict = { self.global_step : episode_count, self.local_AC.target_v : np.stack(discounted_rewards), self.local_AC.inputs : np.stack(observations), self.local_AC.goal_pos : np.stack(goals), self.local_AC.actions : actions, self.local_AC.train_valid : np.stack(valids), self.local_AC.advantages : advantages, self.local_AC.train_value : train_value, self.local_AC.state_in[0] : rnn_state0[0], self.local_AC.state_in[1] : rnn_state0[1], self.local_AC.train_policy: train_policy, self.local_AC.train_valids: np.vstack(train_policy) } v_l, p_l, valid_l, e_l, g_n, v_n, grads = self.sess.run([self.local_AC.value_loss, self.local_AC.policy_loss, self.local_AC.valid_loss, self.local_AC.entropy, self.local_AC.grad_norms, self.local_AC.var_norms, self.local_AC.grads], feed_dict=feed_dict) return [v_l, p_l, valid_l, e_l, g_n, v_n], grads def imitation_learning_only(self, episode_count): self.env._reset() rollouts, targets_done = self.parse_path(episode_count) if rollouts is None: return None, 0 gradients = [] losses = [] for i in range(self.num_workers): train_buffer = rollouts[i] imitation_loss, grads = self.calculateImitationGradient(train_buffer, episode_count) gradients.append(grads) losses.append(imitation_loss) return gradients, losses def run_episode_multithreaded(self, episode_count, coord): if self.metaAgentID < NUM_IL_META_AGENTS: assert (1 == 0) # print("THIS CODE SHOULD NOT TRIGGER") self.is_imitation = True self.imitation_learning_only() global episode_lengths, episode_mean_values, episode_invalid_ops, episode_stop_ops, episode_rewards, episode_finishes num_agents = self.num_workers with self.sess.as_default(), self.sess.graph.as_default(): while self.shouldRun(coord, episode_count): episode_buffer, episode_values = [], [] episode_reward = episode_step_count = episode_inv_count = targets_done = episode_stop_count = 0 # Initial state from the environment if self.agentID == 1: self.env._reset() joint_observations[self.metaAgentID] = self.env._observe() self.synchronize() # synchronize starting time of the threads # Get Information For Each Agent validActions = self.env.listValidActions(self.agentID, joint_observations[self.metaAgentID][self.agentID]) s = joint_observations[self.metaAgentID][self.agentID] rnn_state = self.local_AC.state_init rnn_state0 = rnn_state self.synchronize() # synchronize starting time of the threads swarm_reward[self.metaAgentID] = 0 swarm_targets[self.metaAgentID] = 0 episode_rewards[self.metaAgentID] = [] episode_finishes[self.metaAgentID] = [] episode_lengths[self.metaAgentID] = [] episode_mean_values[self.metaAgentID] = [] episode_invalid_ops[self.metaAgentID] = [] episode_stop_ops[self.metaAgentID] = [] # ===============================start training ======================================================================= # RL if True: # prepare to save GIF saveGIF = False global GIFS_FREQUENCY_RL if OUTPUT_GIFS and self.agentID == 1 and ((not TRAINING) or (episode_count >= self.nextGIF)): saveGIF = True self.nextGIF = episode_count + GIFS_FREQUENCY_RL GIF_episode = int(episode_count) GIF_frames = [self.env._render()] # start RL self.env.finished = False while not self.env.finished: a_dist, v, rnn_state = self.sess.run([self.local_AC.policy, self.local_AC.value, self.local_AC.state_out], feed_dict={self.local_AC.inputs : [s[0]], # state self.local_AC.goal_pos : [s[1]], # goal vector self.local_AC.state_in[0]: rnn_state[0], self.local_AC.state_in[1]: rnn_state[1]}) skipping_state = False train_policy = train_val = 1 if not skipping_state: if not (np.argmax(a_dist.flatten()) in validActions): episode_inv_count += 1 train_val = 0 train_valid = np.zeros(a_size) train_valid[validActions] = 1 valid_dist = np.array([a_dist[0, validActions]]) valid_dist /= np.sum(valid_dist) a = validActions[np.random.choice(range(valid_dist.shape[1]), p=valid_dist.ravel())] joint_actions[self.metaAgentID][self.agentID] = a if a == 0: episode_stop_count += 1 # Make A Single Agent Gather All Information self.synchronize() if self.agentID == 1: all_obs, all_rewards = self.env.step_all(joint_actions[self.metaAgentID]) for i in range(1, self.num_workers + 1): joint_observations[self.metaAgentID][i] = all_obs[i] joint_rewards[self.metaAgentID][i] = all_rewards[i] joint_done[self.metaAgentID][i] = (self.env.world.agents[i].status == 1) if saveGIF and self.agentID == 1: GIF_frames.append(self.env._render()) self.synchronize() # synchronize threads # Get observation,reward, valid actions for each agent s1 = joint_observations[self.metaAgentID][self.agentID] r = copy.deepcopy(joint_rewards[self.metaAgentID][self.agentID]) validActions = self.env.listValidActions(self.agentID, s1) self.synchronize() # Append to Appropriate buffers if not skipping_state: episode_buffer.append( [s[0], a, joint_rewards[self.metaAgentID][self.agentID], s1, v[0, 0], train_valid, s[1], train_val, train_policy]) episode_values.append(v[0, 0]) episode_reward += r episode_step_count += 1 # Update State s = s1 # If the episode hasn't ended, but the experience buffer is full, then we # make an update step using that experience rollout. if (len(episode_buffer) > 1) and ( (len(episode_buffer) % EXPERIENCE_BUFFER_SIZE == 0) or joint_done[self.metaAgentID][ self.agentID] or episode_step_count == max_episode_length): # Since we don't know what the true final return is, # we "bootstrap" from our current value estimation. if len(episode_buffer) >= EXPERIENCE_BUFFER_SIZE: train_buffer = episode_buffer[-EXPERIENCE_BUFFER_SIZE:] else: train_buffer = episode_buffer[:] if joint_done[self.metaAgentID][self.agentID]: s1Value = 0 # Terminal state episode_buffer = [] joint_done[self.metaAgentID][self.agentID] = False targets_done += 1 else: s1Value = self.sess.run(self.local_AC.value, feed_dict={self.local_AC.inputs : np.array([s[0]]), self.local_AC.goal_pos : [s[1]], self.local_AC.state_in[0]: rnn_state[0], self.local_AC.state_in[1]: rnn_state[1]})[0, 0] self.loss_metrics, grads = self.calculateGradient(train_buffer, s1Value, episode_count, rnn_state0) self.allGradients.append(grads) rnn_state0 = rnn_state self.synchronize() # finish condition: reach max-len or all agents are done under one-shot mode if episode_step_count >= max_episode_length: break episode_lengths[self.metaAgentID].append(episode_step_count) episode_mean_values[self.metaAgentID].append(np.nanmean(episode_values)) episode_invalid_ops[self.metaAgentID].append(episode_inv_count) episode_stop_ops[self.metaAgentID].append(episode_stop_count) swarm_reward[self.metaAgentID] += episode_reward swarm_targets[self.metaAgentID] += targets_done self.synchronize() if self.agentID == 1: episode_rewards[self.metaAgentID].append(swarm_reward[self.metaAgentID]) episode_finishes[self.metaAgentID].append(swarm_targets[self.metaAgentID]) if saveGIF: make_gif(np.array(GIF_frames), '{}/episode_{:d}_{:d}_{:.1f}.gif'.format(gifs_path, GIF_episode, episode_step_count, swarm_reward[self.metaAgentID])) self.synchronize() perf_metrics = np.array([ episode_step_count, np.nanmean(episode_values), episode_inv_count, episode_stop_count, episode_reward, targets_done ]) assert len(self.allGradients) > 0, 'Empty gradients at end of RL episode?!' return perf_metrics def synchronize(self): # handy thing for keeping track of which to release and acquire if not hasattr(self, "lock_bool"): self.lock_bool = False self.groupLock.release(int(self.lock_bool), self.name) self.groupLock.acquire(int(not self.lock_bool), self.name) self.lock_bool = not self.lock_bool def work(self, currEpisode, coord, saver, allVariables): ''' Interacts with the environment. The agent gets either gradients or experience buffer ''' self.currEpisode = currEpisode if COMPUTE_TYPE == COMPUTE_OPTIONS.multiThreaded: self.perf_metrics = self.run_episode_multithreaded(currEpisode, coord) else: print("not implemented") assert (1 == 0) # gradients are accessed by the runner in self.allGradients return # Used for imitation learning def parse_path(self, episode_count): """needed function to take the path generated from M* and create the observations and actions for the agent path: the exact path ouput by M*, assuming the correct number of agents returns: the list of rollouts for the "episode": list of length num_agents with each sublist a list of tuples (observation[0],observation[1],optimal_action,reward)""" result = [[] for i in range(self.num_workers)] actions = {} o = {} train_imitation = {} targets_done = 0 saveGIF = False if np.random.rand() < IL_GIF_PROB: saveGIF = True if saveGIF and OUTPUT_IL_GIFS: GIF_frames = [self.env._render()] single_done = False new_call = False new_MSTAR_call = False all_obs = self.env._observe() for agentID in range(1, self.num_workers + 1): o[agentID] = all_obs[agentID] train_imitation[agentID] = 1 step_count = 0 while step_count <= IL_MAX_EP_LENGTH: path = self.env.expert_until_first_goal() if path is None: # solution not exists if step_count != 0: return result, targets_done # print('Failed intially') return None, 0 none_on_goal = True path_step = 1 while none_on_goal and step_count <= IL_MAX_EP_LENGTH: completed_agents = [] start_positions = [] goals = [] for i in range(self.num_workers): agent_id = i + 1 next_pos = path[path_step][i] diff = tuple_minus(next_pos, self.env.world.getPos(agent_id)) actions[agent_id] = dir2action(diff) all_obs, _ = self.env.step_all(actions) for i in range(self.num_workers): agent_id = i + 1 result[i].append([o[agent_id][0], o[agent_id][1], actions[agent_id], train_imitation[agent_id]]) if self.env.world.agents[agent_id].status == 1: completed_agents.append(i) targets_done += 1 single_done = True if targets_done % MSTAR_CALL_FREQUENCY == 0: new_MSTAR_call = True else: new_call = True if saveGIF and OUTPUT_IL_GIFS: GIF_frames.append(self.env._render()) if single_done and new_MSTAR_call: path = self.env.expert_until_first_goal() if path is None: return result, targets_done path_step = 0 elif single_done and new_call: path = path[path_step:] path = [list(state) for state in path] for finished_agent in completed_agents: path = merge_plans(path, [None] * len(path), finished_agent) try: while path[-1] == path[-2]: path = path[:-1] except: assert (len(path) <= 2) start_positions_dir = self.env.getPositions() goals_dir = self.env.getGoals() for i in range(1, self.env.world.num_agents + 1): start_positions.append(start_positions_dir[i]) goals.append(goals_dir[i]) world = self.env.getObstacleMap() # print('OLD PATH', path) # print('CURRENT POSITIONS', start_positions) # print('CURRENT GOALS',goals) # print('WORLD',world) try: path = priority_planner(world, tuple(start_positions), tuple(goals), path) except: path = self.env.expert_until_first_goal() if path is None: return result, targets_done path_step = 0 o = all_obs step_count += 1 path_step += 1 new_call = False new_MSTAR_call = False if saveGIF and OUTPUT_IL_GIFS: make_gif(np.array(GIF_frames), '{}/episodeIL_{}.gif'.format(gifs_path, episode_count)) return result, targets_done def shouldRun(self, coord, episode_count=None): if TRAINING: return not coord.should_stop() ================================================ FILE: driver.py ================================================ import numpy as np import tensorflow as tf import os import ray from Ray_ACNet import ACNet from Runner import imitationRunner, RLRunner from parameters import * import random ray.init(num_gpus=1) tf.reset_default_graph() print("Hello World") config = tf.ConfigProto(allow_soft_placement = True) config.gpu_options.per_process_gpu_memory_fraction = 1.0 / (NUM_META_AGENTS - NUM_IL_META_AGENTS + 1) config.gpu_options.allow_growth=True # Create directories if not os.path.exists(model_path): os.makedirs(model_path) if not os.path.exists(gifs_path): os.makedirs(gifs_path) global_step = tf.placeholder(tf.float32) if ADAPT_LR: # computes LR_Q/sqrt(ADAPT_COEFF*steps+1) # we need the +1 so that lr at step 0 is defined lr = tf.divide(tf.constant(LR_Q), tf.sqrt(tf.add(1., tf.multiply(tf.constant(ADAPT_COEFF), global_step)))) else: lr = tf.constant(LR_Q) def apply_gradients(global_network, gradients, sess, curr_episode): feed_dict = { global_network.tempGradients[i]: g for i, g in enumerate(gradients) } feed_dict[global_step] = curr_episode sess.run([global_network.apply_grads], feed_dict=feed_dict) def writeImitationDataToTensorboard(global_summary, metrics, curr_episode): summary = tf.Summary() summary.value.add(tag='Losses/Imitation loss', simple_value=metrics[0]) global_summary.add_summary(summary, curr_episode) global_summary.flush() def writeEpisodeRatio(global_summary, numIL, numRL, sess, curr_episode): summary = tf.Summary() current_learning_rate = sess.run(lr, feed_dict={global_step: curr_episode}) RL_IL_Ratio = numRL / (numRL + numIL) summary.value.add(tag='Perf/Num IL Ep.', simple_value=numIL) summary.value.add(tag='Perf/Num RL Ep.', simple_value=numRL) summary.value.add(tag='Perf/ RL IL ratio Ep.', simple_value=RL_IL_Ratio) summary.value.add(tag='Perf/Learning Rate', simple_value=current_learning_rate) global_summary.add_summary(summary, curr_episode) global_summary.flush() def writeToTensorBoard(global_summary, tensorboardData, curr_episode, plotMeans=True): # each row in tensorboardData represents an episode # each column is a specific metric if plotMeans == True: tensorboardData = np.array(tensorboardData) tensorboardData = list(np.mean(tensorboardData, axis=0)) valueLoss, policyLoss, validLoss, entropyLoss, gradNorm, varNorm,\ mean_length, mean_value, mean_invalid, \ mean_stop, mean_reward, mean_finishes = tensorboardData else: firstEpisode = tensorboardData[0] valueLoss, policyLoss, validLoss, entropyLoss, gradNorm, varNorm, \ mean_length, mean_value, mean_invalid, \ mean_stop, mean_reward, mean_finishes = firstEpisode summary = tf.Summary() summary.value.add(tag='Perf/Reward', simple_value=mean_reward) summary.value.add(tag='Perf/Targets Done', simple_value=mean_finishes) summary.value.add(tag='Perf/Length', simple_value=mean_length) summary.value.add(tag='Perf/Valid Rate', simple_value=(mean_length - mean_invalid) / mean_length) summary.value.add(tag='Perf/Stop Rate', simple_value=(mean_stop) / mean_length) summary.value.add(tag='Losses/Value Loss', simple_value=valueLoss) summary.value.add(tag='Losses/Policy Loss', simple_value=policyLoss) summary.value.add(tag='Losses/Valid Loss', simple_value=validLoss) summary.value.add(tag='Losses/Entropy Loss', simple_value=entropyLoss) summary.value.add(tag='Losses/Grad Norm', simple_value=gradNorm) summary.value.add(tag='Losses/Var Norm', simple_value=varNorm) global_summary.add_summary(summary, int(curr_episode - len(tensorboardData))) global_summary.flush() def main(): with tf.device("/gpu:0"): trainer = tf.contrib.opt.NadamOptimizer(learning_rate=lr, use_locking=True) global_network = ACNet(GLOBAL_NET_SCOPE,a_size,trainer,False,NUM_CHANNEL, OBS_SIZE,GLOBAL_NET_SCOPE, GLOBAL_NETWORK=True) global_summary = tf.summary.FileWriter(train_path) saver = tf.train.Saver(max_to_keep=1) with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) if load_model == True: print ('Loading Model...') ckpt = tf.train.get_checkpoint_state(model_path) p=ckpt.model_checkpoint_path p=p[p.find('-')+1:] p=p[:p.find('.')] curr_episode=int(p) saver.restore(sess,ckpt.model_checkpoint_path) print("curr_episode set to ",curr_episode) else: curr_episode = 0 # launch all of the threads: il_agents = [imitationRunner.remote(i) for i in range(NUM_IL_META_AGENTS)] rl_agents = [RLRunner.remote(i) for i in range(NUM_IL_META_AGENTS, NUM_META_AGENTS)] meta_agents = il_agents + rl_agents # get the initial weights from the global network weight_names = tf.trainable_variables() weights = sess.run(weight_names) # Gets weights in numpy arrays CHECK weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) # launch the first job (e.g. getGradient) on each runner jobList = [] # Ray ObjectIDs for i, meta_agent in enumerate(meta_agents): jobList.append(meta_agent.job.remote(weights, curr_episode)) curr_episode += 1 tensorboardData = [] IDs = [None] * NUM_META_AGENTS numImitationEpisodes = 0 numRLEpisodes = 0 try: while True: # wait for any job to be completed - unblock as soon as the earliest arrives done_id, jobList = ray.wait(jobList) # get the results of the task from the object store jobResults, metrics, info = ray.get(done_id)[0] # imitation episodes write different data to tensorboard if info['is_imitation']: if jobResults: writeImitationDataToTensorboard(global_summary, metrics, curr_episode) numImitationEpisodes += 1 else: if jobResults: tensorboardData.append(metrics) numRLEpisodes += 1 # Write ratio of RL to IL episodes to tensorboard writeEpisodeRatio(global_summary, numImitationEpisodes, numRLEpisodes, sess, curr_episode) if JOB_TYPE == JOB_OPTIONS.getGradient: if jobResults: for gradient in jobResults: apply_gradients(global_network, gradient, sess, curr_episode) elif JOB_TYPE == JOB_OPTIONS.getExperience: print("not implemented") assert(1==0) else: print("not implemented") assert(1==0) # Every `SUMMARY_WINDOW` RL episodes, write RL episodes to tensorboard if len(tensorboardData) >= SUMMARY_WINDOW: writeToTensorBoard(global_summary, tensorboardData, curr_episode) tensorboardData = [] # get the updated weights from the global network weight_names = tf.trainable_variables() weights = sess.run(weight_names) curr_episode += 1 # start a new job on the recently completed agent with the updated weights jobList.extend([meta_agents[info['id']].job.remote(weights, curr_episode)]) if curr_episode % 100 == 0: print ('Saving Model', end='\n') saver.save(sess, model_path+'/model-'+str(int(curr_episode))+'.cptk') print ('Saved Model', end='\n') except KeyboardInterrupt: print("CTRL-C pressed. killing remote workers") for a in meta_agents: ray.kill(a) if __name__ == "__main__": main() ================================================ FILE: od_mstar3/SortedCollection.py ================================================ from bisect import bisect_left, bisect_right class SortedCollection(object): """Sequence sorted by a key function. SortedCollection() is much easier to work with than using bisect() directly. It supports key functions like those use in sorted(), min(), and max(). The result of the key function call is saved so that keys can be searched efficiently. Instead of returning an insertion-point which can be hard to interpret, the five find-methods return a specific item in the sequence. They can scan for exact matches, the last item less-than-or-equal to a key, or the first item greater-than-or-equal to a key. Once found, an item's ordinal position can be located with the index() method. New items can be added with the insert() and insert_right() methods. Old items can be deleted with the remove() method. The usual sequence methods are provided to support indexing, slicing, length lookup, clearing, copying, forward and reverse iteration, contains checking, item counts, item removal, and a nice looking repr. Finding and indexing are O(log n) operations while iteration and insertion are O(n). The initial sort is O(n log n). The key function is stored in the 'key' attibute for easy introspection or so that you can assign a new key function (triggering an automatic re-sort). In short, the class was designed to handle all of the common use cases for bisect but with a simpler API and support for key functions. >>> from pprint import pprint >>> from operator import itemgetter >>> s = SortedCollection(key=itemgetter(2)) >>> for record in [ ... ('roger', 'young', 30), ... ('angela', 'jones', 28), ... ('bill', 'smith', 22), ... ('david', 'thomas', 32)]: ... s.insert(record) >>> pprint(list(s)) # show records sorted by age [('bill', 'smith', 22), ('angela', 'jones', 28), ('roger', 'young', 30), ('david', 'thomas', 32)] >>> s.find_le(29) # find oldest person aged 29 or younger ('angela', 'jones', 28) >>> s.find_lt(28) # find oldest person under 28 ('bill', 'smith', 22) >>> s.find_gt(28) # find youngest person over 28 ('roger', 'young', 30) >>> r = s.find_ge(32) # find youngest person aged 32 or older >>> s.index(r) # get the index of their record 3 >>> s[3] # fetch the record at that index ('david', 'thomas', 32) >>> s.key = itemgetter(0) # now sort by first name >>> pprint(list(s)) [('angela', 'jones', 28), ('bill', 'smith', 22), ('david', 'thomas', 32), ('roger', 'young', 30)] """ def __init__(self, iterable=(), key=None): self._given_key = key key = (lambda x: x) if key is None else key decorated = sorted((key(item), item) for item in iterable) self._keys = [k for k, item in decorated] self._items = [item for k, item in decorated] self._key = key def _getkey(self): return self._key def _setkey(self, key): if key is not self._key: self.__init__(self._items, key=key) def _delkey(self): self._setkey(None) key = property(_getkey, _setkey, _delkey, 'key function') def clear(self): self.__init__([], self._key) def copy(self): return self.__class__(self, self._key) def __len__(self): return len(self._items) def __getitem__(self, i): return self._items[i] def __iter__(self): return iter(self._items) def __reversed__(self): return reversed(self._items) def __repr__(self): return '%s(%r, key=%s)' % ( self.__class__.__name__, self._items, getattr(self._given_key, '__name__', repr(self._given_key)) ) def __reduce__(self): return self.__class__, (self._items, self._given_key) def __contains__(self, item): """So if an item has its key value changed, you are not going to be able to recover its value """ k = self._key(item) i = bisect_left(self._keys, k) j = bisect_right(self._keys, k) return item in self._items[i:j] def resort(self): """If all the key values are expected to have changed dramatically, resort the items list, and regenerate the internal representation Note that this operation is not guaranteed to be stable, as it depends on the ordering of a key, item pair, and the ordering of the items is effectively arbitrary """ decorated = sorted((self.key(item), item) for item in self._items) self._keys = [k for k, item in decorated] self._items = [item for k, item in decorated] def index(self, item): """Find the position of an item. Raise ValueError if not found.""" k = self._key(item) i = bisect_left(self._keys, k) j = bisect_right(self._keys, k) return self._items[i:j].index(item) + i def count(self, item): """Return number of occurrences of item""" k = self._key(item) i = bisect_left(self._keys, k) j = bisect_right(self._keys, k) return self._items[i:j].count(item) def insert(self, item): """Insert a new item. If equal keys are found, add to the left""" k = self._key(item) i = bisect_left(self._keys, k) self._keys.insert(i, k) self._items.insert(i, item) def insert_right(self, item): """Insert a new item. If equal keys are found, add to the right""" k = self._key(item) i = bisect_right(self._keys, k) self._keys.insert(i, k) self._items.insert(i, item) def remove(self, item): """Remove first occurence of item. Raise ValueError if not found """ i = self.index(item) del self._keys[i] del self._items[i] def pop(self): """returns the rightmost value (greatest key value)""" del self._keys[-1] return self._items.pop() def consistent_pop(self): """returns the rightmost value (greatest key value) and checks whether its cached key value is consistent with its current cost. returns: value with greatest cached key boolean: True if cached key is same as current key """ cached_key = self._keys.pop() val = self._items.pop() return val, self._key(val) == cached_key def find(self, k): """Return first item with a key == k. Will fail if the key value of k was changed since it was inserted Raise ValueError if not found. """ i = bisect_left(self._keys, k) if i != len(self) and self._keys[i] == k: return self._items[i] raise ValueError('No item found with key equal to: %r' % (k, )) def find_le(self, k): """Return last item with a key <= k. Raise ValueError if not found. """ i = bisect_right(self._keys, k) if i: return self._items[i - 1] raise ValueError('No item found with key at or below: %r' % (k, )) def find_lt(self, k): """Return last item with a key < k. Raise ValueError if not found. """ i = bisect_left(self._keys, k) if i: return self._items[i - 1] raise ValueError('No item found with key below: %r' % (k, )) def find_ge(self, k): """Return first item with a key >= equal to k. Raise ValueError if not found """ i = bisect_left(self._keys, k) if i != len(self): return self._items[i] raise ValueError('No item found with key at or above: %r' % (k, )) def find_gt(self, k): """Return first item with a key > k. Raise ValueError if not found """ i = bisect_right(self._keys, k) if i != len(self): return self._items[i] raise ValueError('No item found with key above: %r' % (k, )) ================================================ FILE: od_mstar3/col_checker.cpp ================================================ #include "col_checker.hpp" #include "col_set.hpp" using namespace mstar; // /** // * Performs simple pebble motion on the graph collision checking // * // * @param c1 source // * @param c2 target // * // * @return collision set of the edge // */ // template // ColSet simple_edge_check(const T &c1, // const T&c2){ // ColSet col; // for (uint i = 0; i < c1.size(); i++){ // for (uint j = i; j < c1.size(); j++){ // if (c2[i] == c2[j] || (c1[i] == c2[j] && c1[j] == c2[i])){ // add_col_set_in_place({{i, j}}, col); // } // } // } // return col; // } /** * Iterator version */ template ColSet simple_edge_check(T source_start, T source_end, T target_start, T target_end){ int size = source_end - source_start; ColSet col; for (uint i = 0; i < size; i++){ for (uint j = i + 1; j < size; j++){ if (*(target_start + i) == *(target_start + j) || (*(source_start + i) == *(target_start + j) && *(source_start + j) == *(target_start + i))){ add_col_set_in_place({{i, j}}, col); } } } return col; } ColSet SimpleGraphColCheck::check_edge(const OdCoord &c1, const OdCoord &c2, const std::vector ids) const{ if (c2.is_standard()){ return simple_edge_check(c1.coord.cbegin(), c1.coord.cend(), c2.coord.cbegin(), c2.coord.cend()); } // c2 is an intermediate vertex, so only check for collisions between // robots with an assigned move in c2 int size = c2.move_tuple.size(); return simple_edge_check(c1.coord.cbegin(), c1.coord.cbegin() + size, c2.move_tuple.cbegin(), c2.move_tuple.cend()); } ================================================ FILE: od_mstar3/col_checker.hpp ================================================ #ifndef MSTAR_COL_CHECKER_H #define MSTAR_COL_CHECKER_H #include "mstar_type_defs.hpp" namespace mstar{ class ColChecker{ public: virtual ~ColChecker(){}; virtual ColSet check_edge(const OdCoord &c1, const OdCoord &c2, const std::vector ids) const = 0; }; /** * Collision checker for simple bidirected graphs, where no edges overlap * * I.e. for pebble motion on the graph where you only have to worry about * robots swapping positions, and not about diagonals crossing. Allows * for rotations */ class SimpleGraphColCheck: public ColChecker{ public: /** * Checks for collision while traversing the edge from c1 to c2 * * Finds collisions both while traversing the edge and when at the * goal configuration. * * @param c1 the source coordinate of the edge * @param c2 the target coordinate of the edge * @param ids list of global robot ids. Necessary for heterogeneous * robots * * @return the collision set containing the colliding robots */ ColSet check_edge(const OdCoord &c1, const OdCoord &c2, const std::vector ids) const; }; }; #endif ================================================ FILE: od_mstar3/col_set.hpp ================================================ #ifndef MSTAR_COL_SET_H #define MSTAR_COL_SET_H #include /*********************************************************************** * Provides logic for combining collision sets * * Assumes that a collision set is of form T> where T are * collections and the inner collection is sorted **********************************************************************/ namespace mstar{ /** * tests if two sets are disjoint * * Currently doesnt try to leverage sorted. Empty sets will always be * treated as disjoint * * @param s1, s2 The sets to check * * @return True if disjoint, else false */ template bool is_disjoint(const T &s1, const T &s2){ for (auto i = s1.cbegin(); i != s1.cend(); ++i){ for (auto j = s2.cbegin(); j != s2.cend(); ++j){ if (*i == *j){ return false; } } } return true; }; /** * Tests if s1 is a superset of s2 * * Uses == to compare elements. Does not leverage sorted values * * @param s1 potential superset * @param s2 potential subset * * @return True if s1 is a superset of s2, otherwise false */ template bool is_superset(const T &s1, const T &s2){ for (auto j = s2.cbegin(); j != s2.cend(); ++j){ bool included = false; for (auto i = s1.cbegin(); i != s1.cend(); ++i){ if (*i == *j){ included = true; break; } } if (!included){ return false; } } return true; }; /** * specialization of is_superset that exploits sorted values */ template bool is_superset(const std::set &s1, const std::set &s2){ return std::includes(s1.cbegin(), s1.cend(), s2.cbegin(), s2.cend()); } /** * Merges two sorted sets * * Elements of the set must be sorted. Container of the sets must be * resizeable for output * */ template T merge(const T &s1, const T &s2){ T out(s1.size() + s2.size()); auto it = std::set_union(s1.begin(), s1.end(), s2.begin(), s2.end(), out.begin()); out.resize(it - out.begin()); return out; } template std::set merge(std::set s1, const std::set &s2){ s1.insert(s2.cbegin(), s2.cend()); return s1; } /** * Adds c1 to c2 * * Mutates c2 * * @param c1 collision set 1 * @param c2 collision set 2 * * @return true if c2 is changed, else false */ template class TT, class... args> bool add_col_set_in_place(TT c1, TT &c2){ bool changed = false; // TODO: This could be more efficient while (c1.size() > 0){ int i = 0; // whether c1[-1] overlaps any element of c2 bool found_overlap = false; while (i < c2.size()){ if (!is_disjoint(c2[i], c1.back())) { // found overlap if (is_superset(c2[i], c1.back())){ // current element in c1 contained by the element in c2, so // the c1 element can be dropped c1.pop_back(); found_overlap = true; break; } // Non-trivial overlap. Need to add the union of the current // elements back to c1 to check if there is any further overlap // with elements of c2 // Could just merge in place, but doubt it really matters c1.back().insert(c2[i].cbegin(), c2[i].cend()); c2.erase(c2.begin() + i); found_overlap = true; changed = true; break; } else{ // no overlap between c1[-1] and c2[i], so check next element // of c2 ++i; } } if (!found_overlap){ // no overlap between c1[-1] and all elements of c2, so can // be added to c2 (although this will force checks against c2.push_back(c1.back()); c1.pop_back(); changed = true; } } return changed; } /** * Adds two collision sets, c1, c2 * * The template monstrosity is necessary because std::vectors require two * parameters of which we care about one (the type), and the other is the * allocator. Other containers may require more * * @param c1 collision set 1 * @param c2 collision set 2 * * @return A new collision set formed by adding c1 and c2 */ template class TT, class... args> TT add_col_set(TT c1, TT c2){ add_col_set_in_place(c1, c2); return c2; } /** * Computes the collision set used for expansion * * Based the generating collision set of a vertex, which is the collision * set of the vertex's predecessor when the predecessor was expanded. It * is useful as it specifies which partial solutions have been cached. * For example, if the generating collision set is {{1, 2}}, then a * subplanner already knows how to get robots 1 and 2 to the goal, and it * is more efficient to directly query that subplanner, rather than set the * collision set to be empty. * * However, you have to account for new collisions, as stored in the * vertex's collision set. If a collision set element is a subset of an * element of the generating collision set, use the element form the * generating collision set. If a generating collision set element has * a non-empty intersection with a element of the collision set that is * not a subset, don't use that generating collision set element * * @param col_set the collision set of the vertex * @param gen_set the generating collision set of the vertex * * @return A new collision set to use when expanding the vertex */ template class TT, class... args> TT col_set_to_expand(TT col_set, TT gen_set){ TT ret; while(gen_set.size() > 0){ // Check the last element of the generating collision set. Either it // can be used, or there is a non-superset intersection, and it must // be removed // Need to keep any elements of the collision set that are subsets // of the generating collision set element, as a later element of the // collision set may invalidate the generating collision set element TT elements_to_remove; uint i = 0; bool gen_set_elem_valid = true; while (i < col_set.size()){ if (is_superset(gen_set.back(), col_set[i])){ elements_to_remove.push_back(col_set[i]); col_set.erase(col_set.begin() + i); } else if (!is_disjoint(gen_set.back(), col_set[i])){ // generating collision set element has a non-empty intersection // with a collision set element that is not a sub-set, so is // invalid gen_set.pop_back(); // Need to return any collision set elements that were removed as // being subsets of gen_set.back col_set.insert(col_set.end(), elements_to_remove.begin(), elements_to_remove.end()); gen_set_elem_valid = false; break; } else{ i += 1; } } if (gen_set_elem_valid){ ret.push_back(gen_set.back()); gen_set.pop_back(); } } // Any remaining collision set elements were not contained by any element // of the generating collision set, so should be used directly ret.insert(ret.end(), col_set.begin(), col_set.end()); return ret; }; } #endif ================================================ FILE: od_mstar3/col_set_addition.py ================================================ """Encapsulates the basic collision set addition functions, so they can be accessible to any code that uses it Also provides exceptions for indicating no solution or out of time """ def add_col_set_recursive(c1, c2): """Returns a new collision set resulting from adding c1 to c2. No side effecting collision set is done for the recursive case, where ({1, 2}, ) + ({3, 4}, ) = ({1, 2}, {3, 4}) c1, c2 - tuples of (immutable) sets returns: recursive collision set containing c1 and c2 """ # Make shallow copies c1 = list(c1) c2 = list(c2) while len(c1) > 0: i = 0 # Whether c1[-1] overlaps with any element of c2 found_overlap = False while i < len(c2): if not c2[i].isdisjoint(c1[-1]): # Found overlap if c2[i].issuperset(c1[-1]): # No change in c2 c1.pop() found_overlap = True break # Have found a non-trivial overlap. Need to add the # union to c1 so that we can check if the union has any # further overlap with elements of c2 temp = c2.pop(i) # replace c2[i] with the union of c2[i] and c1[-1] c1.append(temp.union(c1.pop())) found_overlap = True break else: # No overlap between c1[-1] and c2[i], so check next # element of c2 i += 1 if not found_overlap: # c1[-1] has no overlap with any element of c2, so it can be # added as is to c2 c2.append(c1.pop()) return tuple(c2) def add_col_set(c1, c2): """Adds the collision sets c1 to c2. c2 is assumed to contain a single, possibly empty, set c1, c2 - input collision sets returns: combined collision set containing c1 and c2 """ temp = frozenset([]) if len(c2) >= 1: temp = c2[0] assert len(c2) == 1 for i in c1: temp = temp.union(i) if len(temp) == 0: return () return (temp, ) def col_set_add(c1, c2, recursive): """Adds two collision sets c1, c2 - input collision sets recursive - boolean, whether to perform recursive M* style addition returns: collision set containing c1 and c2 """ if recursive: return add_col_set_recursive(c1, c2) else: return add_col_set(c1, c2) def effective_col_set(col_set, prev_col_set): """Computes the effective collision set to use given the current collision set and the collision set used to get to the current node Only makes sense when used with recursive M* The purpose of this code is that in recursive M*, you invoke a subplanner to figure out how to get to the goal, which caches the entire path to the goal . The next step, you have an empty collision set, so you don't query the subplanner with the cached path, and have to find a bunch of collisions before using the cached solution. This is intended for use with a memory of what the collision set was when you reached a given node. Computes the "effecitve collision set". Elements of the memorized collision set are used if they have no non-empty intersections with elements of the current collision set that are not subsets of the memorized component. elements of col_set are NOT used if they are contained within some element of prev_col_set that is used. Elements of prev_col_set are used if they completely contain all elements of col_set with which they intersect col_set - current collision set prev_col_set - "memorized" collision set, i.e. the collision set of the optimal predecessor at the time the path from the optimal predecessor was first found returns: effective collision set. Consists of the elements of the previous collision set, which should index subplanners which have cached paths available, and elements of the current collision set which are not contained within prev_col_set """ effective_set = [] prev_col_set = list(prev_col_set) col_set = list(col_set) while(len(prev_col_set) > 0): # Need to keep around the elements of col_set that won't be # used, because the containing element of prev_col_set may be # invalidated by a later element of col_set col_set_to_remove = [] j = 0 while (j < len(col_set)): if col_set[j].issubset(prev_col_set[-1]): # this element is contained in prev_col_set, so can be # skipped unless prev_col_set-1] is invalidated by some # later element of col_set col_set_to_remove.append(col_set.pop(j)) elif not col_set[j].isdisjoint(prev_col_set[-1]): # this element partially overlaps prev_col_set, # invalidating it, so cannot use this element of # prev_col_set prev_col_set.pop() # return the elements of col_set we were going to remove col_set.extend(col_set_to_remove) break else: j += 1 else: # Never broke, so prev_col_set can be used as part of the # effective collision set effective_set.append(prev_col_set.pop()) # Just copy over any elements of col_set that survived effective_set.extend(col_set) return tuple(effective_set) class OutOfTimeError(Exception): def __init__(self, value=None): self.value = value def __str__(self): return repr(self.value) class NoSolutionError(Exception): def __init__(self, value=None): self.value = value def __str__(self): return repr(self.value) class OutOfScopeError(NoSolutionError): def __init__(self, value=None, col_set=()): self.value = value self.col_set = col_set def __str__(self): return repr(self.value) ================================================ FILE: od_mstar3/cython_od_mstar.cpp ================================================ /* Generated by Cython 0.29.21 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [ "grid_planning.hpp" ], "extra_compile_args": [ "-std=c++11" ], "language": "c++", "name": "cpp_mstar", "sources": [ "cython_od_mstar.pyx", "policy.cpp", "col_checker.cpp", "od_mstar.cpp", "grid_policy.cpp", "grid_planning.cpp" ] }, "module_name": "cpp_mstar" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_21" #define CYTHON_HEX_VERSION 0x001D15F0 #define CYTHON_FUTURE_DIVISION 0 #include #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef __cplusplus #error "Cython files generated with the C++ option must be compiled with a C++ compiler." #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #else #define CYTHON_INLINE inline #endif #endif template void __Pyx_call_destructor(T& x) { x.~T(); } template class __Pyx_FakeReference { public: __Pyx_FakeReference() : ptr(NULL) { } __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } T *operator->() { return ptr; } T *operator&() { return ptr; } operator T&() { return *ptr; } template bool operator ==(U other) { return *ptr == other; } template bool operator !=(U other) { return *ptr != other; } private: T *ptr; }; #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__cpp_mstar #define __PYX_HAVE_API__cpp_mstar /* Early includes */ #include "ios" #include "new" #include "stdexcept" #include "typeinfo" #include #include #if __cplusplus > 199711L #include namespace cython_std { template typename std::remove_reference::type&& move(T& t) noexcept { return std::move(t); } template typename std::remove_reference::type&& move(T&& t) noexcept { return std::move(t); } } #endif #include "grid_planning.hpp" #ifdef _OPENMP #include #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "cython_od_mstar.pyx", "stringsource", }; /*--- Type declarations ---*/ /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyIntCompare.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* CppExceptionConversion.proto */ #ifndef __Pyx_CppExn2PyErr #include #include #include #include static void __Pyx_CppExn2PyErr() { try { if (PyErr_Occurred()) ; // let the latest Python exn pass through and ignore the current one else throw; } catch (const std::bad_alloc& exn) { PyErr_SetString(PyExc_MemoryError, exn.what()); } catch (const std::bad_cast& exn) { PyErr_SetString(PyExc_TypeError, exn.what()); } catch (const std::bad_typeid& exn) { PyErr_SetString(PyExc_TypeError, exn.what()); } catch (const std::domain_error& exn) { PyErr_SetString(PyExc_ValueError, exn.what()); } catch (const std::invalid_argument& exn) { PyErr_SetString(PyExc_ValueError, exn.what()); } catch (const std::ios_base::failure& exn) { PyErr_SetString(PyExc_IOError, exn.what()); } catch (const std::out_of_range& exn) { PyErr_SetString(PyExc_IndexError, exn.what()); } catch (const std::overflow_error& exn) { PyErr_SetString(PyExc_OverflowError, exn.what()); } catch (const std::range_error& exn) { PyErr_SetString(PyExc_ArithmeticError, exn.what()); } catch (const std::underflow_error& exn) { PyErr_SetString(PyExc_ArithmeticError, exn.what()); } catch (const std::exception& exn) { PyErr_SetString(PyExc_RuntimeError, exn.what()); } catch (...) { PyErr_SetString(PyExc_RuntimeError, "Unknown exception"); } } #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'libcpp' */ /* Module declarations from 'libcpp.vector' */ /* Module declarations from 'libcpp.utility' */ /* Module declarations from 'libcpp.pair' */ /* Module declarations from 'cpp_mstar' */ static std::pair __pyx_convert_pair_from_py_int__and_int(PyObject *); /*proto*/ static std::vector > __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(PyObject *); /*proto*/ static PyObject *__pyx_convert_pair_to_py_int____int(std::pair const &); /*proto*/ static PyObject *__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(const std::vector > &); /*proto*/ static PyObject *__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(const std::vector > > &); /*proto*/ #define __Pyx_MODULE_NAME "cpp_mstar" extern int __pyx_module_is_main_cpp_mstar; int __pyx_module_is_main_cpp_mstar = 0; /* Implementation of 'cpp_mstar' */ static PyObject *__pyx_builtin_range; static const char __pyx_k_e[] = "e"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_obs[] = "obs"; static const char __pyx_k_row[] = "row"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_name[] = "__name__"; static const char __pyx_k_temp[] = "temp"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_goals[] = "goals"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_world[] = "world"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_init_pos[] = "init_pos"; static const char __pyx_k_resource[] = "resource"; static const char __pyx_k_RLIMIT_AS[] = "RLIMIT_AS"; static const char __pyx_k_cpp_mstar[] = "cpp_mstar"; static const char __pyx_k_find_path[] = "find_path"; static const char __pyx_k_inflation[] = "inflation"; static const char __pyx_k_setrlimit[] = "setrlimit"; static const char __pyx_k_time_limit[] = "time_limit"; static const char __pyx_k_No_Solution[] = "No Solution"; static const char __pyx_k_Out_of_Time[] = "Out of Time"; static const char __pyx_k_OutOfTimeError[] = "OutOfTimeError"; static const char __pyx_k_NoSolutionError[] = "NoSolutionError"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_cython_od_mstar_pyx[] = "cython_od_mstar.pyx"; static const char __pyx_k_od_mstar3_col_set_addition[] = "od_mstar3.col_set_addition"; static PyObject *__pyx_n_s_NoSolutionError; static PyObject *__pyx_kp_s_No_Solution; static PyObject *__pyx_n_s_OutOfTimeError; static PyObject *__pyx_kp_s_Out_of_Time; static PyObject *__pyx_n_s_RLIMIT_AS; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_cpp_mstar; static PyObject *__pyx_kp_s_cython_od_mstar_pyx; static PyObject *__pyx_n_s_e; static PyObject *__pyx_n_s_find_path; static PyObject *__pyx_n_s_goals; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_inflation; static PyObject *__pyx_n_s_init_pos; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_obs; static PyObject *__pyx_n_s_od_mstar3_col_set_addition; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_resource; static PyObject *__pyx_n_s_row; static PyObject *__pyx_n_s_setrlimit; static PyObject *__pyx_n_s_temp; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_time_limit; static PyObject *__pyx_n_s_world; static PyObject *__pyx_pf_9cpp_mstar_find_path(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_world, PyObject *__pyx_v_init_pos, PyObject *__pyx_v_goals, PyObject *__pyx_v_inflation, PyObject *__pyx_v_time_limit); /* proto */ static PyObject *__pyx_int_1; static PyObject *__pyx_int_8589934592; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_codeobj__3; /* Late includes */ /* "cython_od_mstar.pyx":16 * double inflation, int time_limit) except + * * def find_path(world, init_pos, goals, inflation, time_limit): # <<<<<<<<<<<<<< * """Finds a path invoking C++ implementation * */ /* Python wrapper */ static PyObject *__pyx_pw_9cpp_mstar_1find_path(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_9cpp_mstar_find_path[] = "Finds a path invoking C++ implementation\n\n Uses recursive ODrM* to explore a 4 connected grid\n\n world - matrix specifying obstacles, 1 for obstacle, 0 for free\n init_pos - [[x, y], ...] specifying start position for each robot\n goals - [[x, y], ...] specifying goal position for each robot\n inflation - inflation factor for heuristic\n time_limit - time until failure in seconds\n\n returns:\n [[[x1, y1], ...], [[x2, y2], ...], ...] path in the joint\n configuration space\n\n raises:\n NoSolutionError if problem has no solution\n OutOfTimeError if the planner ran out of time\n "; static PyMethodDef __pyx_mdef_9cpp_mstar_1find_path = {"find_path", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_9cpp_mstar_1find_path, METH_VARARGS|METH_KEYWORDS, __pyx_doc_9cpp_mstar_find_path}; static PyObject *__pyx_pw_9cpp_mstar_1find_path(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_world = 0; PyObject *__pyx_v_init_pos = 0; PyObject *__pyx_v_goals = 0; PyObject *__pyx_v_inflation = 0; PyObject *__pyx_v_time_limit = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("find_path (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_world,&__pyx_n_s_init_pos,&__pyx_n_s_goals,&__pyx_n_s_inflation,&__pyx_n_s_time_limit,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_world)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_init_pos)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("find_path", 1, 5, 5, 1); __PYX_ERR(0, 16, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_goals)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("find_path", 1, 5, 5, 2); __PYX_ERR(0, 16, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inflation)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("find_path", 1, 5, 5, 3); __PYX_ERR(0, 16, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_time_limit)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("find_path", 1, 5, 5, 4); __PYX_ERR(0, 16, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "find_path") < 0)) __PYX_ERR(0, 16, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_world = values[0]; __pyx_v_init_pos = values[1]; __pyx_v_goals = values[2]; __pyx_v_inflation = values[3]; __pyx_v_time_limit = values[4]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("find_path", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("cpp_mstar.find_path", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_9cpp_mstar_find_path(__pyx_self, __pyx_v_world, __pyx_v_init_pos, __pyx_v_goals, __pyx_v_inflation, __pyx_v_time_limit); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_9cpp_mstar_find_path(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_world, PyObject *__pyx_v_init_pos, PyObject *__pyx_v_goals, PyObject *__pyx_v_inflation, PyObject *__pyx_v_time_limit) { PyObject *__pyx_v_resource = NULL; std::vector > __pyx_v_obs; std::vector __pyx_v_temp; PyObject *__pyx_v_row = NULL; PyObject *__pyx_v_i = NULL; PyObject *__pyx_v_e = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); std::vector __pyx_t_9; Py_ssize_t __pyx_t_10; PyObject *(*__pyx_t_11)(PyObject *); bool __pyx_t_12; PyObject *__pyx_t_13 = NULL; PyObject *__pyx_t_14 = NULL; PyObject *__pyx_t_15 = NULL; std::vector > __pyx_t_16; std::vector > __pyx_t_17; double __pyx_t_18; std::vector > > __pyx_t_19; int __pyx_t_20; PyObject *__pyx_t_21 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("find_path", 0); /* "cython_od_mstar.pyx":36 * """ * * import resource # <<<<<<<<<<<<<< * resource.setrlimit(resource.RLIMIT_AS, (2**33,2**33)) # 8Gb * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_resource, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_resource = __pyx_t_1; __pyx_t_1 = 0; /* "cython_od_mstar.pyx":37 * * import resource * resource.setrlimit(resource.RLIMIT_AS, (2**33,2**33)) # 8Gb # <<<<<<<<<<<<<< * * # convert to boolean. For some reason coercion doesn't seem to */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_resource, __pyx_n_s_setrlimit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_resource, __pyx_n_s_RLIMIT_AS); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; __pyx_t_5 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); __pyx_t_5 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_tuple_}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_tuple_}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_t_3); __Pyx_INCREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_tuple_); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cython_od_mstar.pyx":43 * cdef vector[vector[bool]] obs * cdef vector[bool] temp * for row in world: # <<<<<<<<<<<<<< * temp = vector[bool]() * for i in row: */ if (likely(PyList_CheckExact(__pyx_v_world)) || PyTuple_CheckExact(__pyx_v_world)) { __pyx_t_1 = __pyx_v_world; __Pyx_INCREF(__pyx_t_1); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_world); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 43, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_1))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_2); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 43, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_2); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 43, __pyx_L1_error) #else __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } } else { __pyx_t_2 = __pyx_t_8(__pyx_t_1); if (unlikely(!__pyx_t_2)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 43, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_2); } __Pyx_XDECREF_SET(__pyx_v_row, __pyx_t_2); __pyx_t_2 = 0; /* "cython_od_mstar.pyx":44 * cdef vector[bool] temp * for row in world: * temp = vector[bool]() # <<<<<<<<<<<<<< * for i in row: * temp.push_back(i == 1) */ try { __pyx_t_9 = std::vector (); } catch(...) { __Pyx_CppExn2PyErr(); __PYX_ERR(0, 44, __pyx_L1_error) } __pyx_v_temp = __pyx_t_9; /* "cython_od_mstar.pyx":45 * for row in world: * temp = vector[bool]() * for i in row: # <<<<<<<<<<<<<< * temp.push_back(i == 1) * obs.push_back(temp) */ if (likely(PyList_CheckExact(__pyx_v_row)) || PyTuple_CheckExact(__pyx_v_row)) { __pyx_t_2 = __pyx_v_row; __Pyx_INCREF(__pyx_t_2); __pyx_t_10 = 0; __pyx_t_11 = NULL; } else { __pyx_t_10 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_row); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_11 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 45, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_11)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_10 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_6 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) __PYX_ERR(0, 45, __pyx_L1_error) #else __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif } else { if (__pyx_t_10 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_10); __Pyx_INCREF(__pyx_t_6); __pyx_t_10++; if (unlikely(0 < 0)) __PYX_ERR(0, 45, __pyx_L1_error) #else __pyx_t_6 = PySequence_ITEM(__pyx_t_2, __pyx_t_10); __pyx_t_10++; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 45, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif } } else { __pyx_t_6 = __pyx_t_11(__pyx_t_2); if (unlikely(!__pyx_t_6)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 45, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_6); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6); __pyx_t_6 = 0; /* "cython_od_mstar.pyx":46 * temp = vector[bool]() * for i in row: * temp.push_back(i == 1) # <<<<<<<<<<<<<< * obs.push_back(temp) * try: */ __pyx_t_6 = __Pyx_PyInt_EqObjC(__pyx_v_i, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_12 == ((bool)-1)) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; try { __pyx_v_temp.push_back(__pyx_t_12); } catch(...) { __Pyx_CppExn2PyErr(); __PYX_ERR(0, 46, __pyx_L1_error) } /* "cython_od_mstar.pyx":45 * for row in world: * temp = vector[bool]() * for i in row: # <<<<<<<<<<<<<< * temp.push_back(i == 1) * obs.push_back(temp) */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "cython_od_mstar.pyx":47 * for i in row: * temp.push_back(i == 1) * obs.push_back(temp) # <<<<<<<<<<<<<< * try: * return find_grid_path(obs, init_pos, goals, inflation, time_limit) */ try { __pyx_v_obs.push_back(__pyx_v_temp); } catch(...) { __Pyx_CppExn2PyErr(); __PYX_ERR(0, 47, __pyx_L1_error) } /* "cython_od_mstar.pyx":43 * cdef vector[vector[bool]] obs * cdef vector[bool] temp * for row in world: # <<<<<<<<<<<<<< * temp = vector[bool]() * for i in row: */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cython_od_mstar.pyx":48 * temp.push_back(i == 1) * obs.push_back(temp) * try: # <<<<<<<<<<<<<< * return find_grid_path(obs, init_pos, goals, inflation, time_limit) * except Exception as e: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15); __Pyx_XGOTREF(__pyx_t_13); __Pyx_XGOTREF(__pyx_t_14); __Pyx_XGOTREF(__pyx_t_15); /*try:*/ { /* "cython_od_mstar.pyx":49 * obs.push_back(temp) * try: * return find_grid_path(obs, init_pos, goals, inflation, time_limit) # <<<<<<<<<<<<<< * except Exception as e: * if str(e) == "Out of Time": */ __Pyx_XDECREF(__pyx_r); __pyx_t_16 = __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(__pyx_v_init_pos); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L7_error) __pyx_t_17 = __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(__pyx_v_goals); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L7_error) __pyx_t_18 = __pyx_PyFloat_AsDouble(__pyx_v_inflation); if (unlikely((__pyx_t_18 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L7_error) __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_v_time_limit); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L7_error) try { __pyx_t_19 = mstar::find_grid_path(__pyx_v_obs, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_5); } catch(...) { __Pyx_CppExn2PyErr(); __PYX_ERR(0, 49, __pyx_L7_error) } __pyx_t_1 = __pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(__pyx_t_19); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 49, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L11_try_return; /* "cython_od_mstar.pyx":48 * temp.push_back(i == 1) * obs.push_back(temp) * try: # <<<<<<<<<<<<<< * return find_grid_path(obs, init_pos, goals, inflation, time_limit) * except Exception as e: */ } __pyx_L7_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cython_od_mstar.pyx":50 * try: * return find_grid_path(obs, init_pos, goals, inflation, time_limit) * except Exception as e: # <<<<<<<<<<<<<< * if str(e) == "Out of Time": * raise OutOfTimeError() */ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_5) { __Pyx_AddTraceback("cpp_mstar.find_path", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 50, __pyx_L9_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_2); __Pyx_GOTREF(__pyx_t_6); __Pyx_INCREF(__pyx_t_2); __pyx_v_e = __pyx_t_2; /* "cython_od_mstar.pyx":51 * return find_grid_path(obs, init_pos, goals, inflation, time_limit) * except Exception as e: * if str(e) == "Out of Time": # <<<<<<<<<<<<<< * raise OutOfTimeError() * elif str(e) == "No Solution": */ __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyString_Type)), __pyx_v_e); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 51, __pyx_L9_except_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_20 = (__Pyx_PyString_Equals(__pyx_t_3, __pyx_kp_s_Out_of_Time, Py_EQ)); if (unlikely(__pyx_t_20 < 0)) __PYX_ERR(0, 51, __pyx_L9_except_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(__pyx_t_20)) { /* "cython_od_mstar.pyx":52 * except Exception as e: * if str(e) == "Out of Time": * raise OutOfTimeError() # <<<<<<<<<<<<<< * elif str(e) == "No Solution": * raise NoSolutionError() */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_OutOfTimeError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 52, __pyx_L9_except_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_21 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_21 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_21)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_21); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_21) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_21) : __Pyx_PyObject_CallNoArg(__pyx_t_4); __Pyx_XDECREF(__pyx_t_21); __pyx_t_21 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 52, __pyx_L9_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 52, __pyx_L9_except_error) /* "cython_od_mstar.pyx":51 * return find_grid_path(obs, init_pos, goals, inflation, time_limit) * except Exception as e: * if str(e) == "Out of Time": # <<<<<<<<<<<<<< * raise OutOfTimeError() * elif str(e) == "No Solution": */ } /* "cython_od_mstar.pyx":53 * if str(e) == "Out of Time": * raise OutOfTimeError() * elif str(e) == "No Solution": # <<<<<<<<<<<<<< * raise NoSolutionError() * else: */ __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyString_Type)), __pyx_v_e); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 53, __pyx_L9_except_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_20 = (__Pyx_PyString_Equals(__pyx_t_3, __pyx_kp_s_No_Solution, Py_EQ)); if (unlikely(__pyx_t_20 < 0)) __PYX_ERR(0, 53, __pyx_L9_except_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(__pyx_t_20)) { /* "cython_od_mstar.pyx":54 * raise OutOfTimeError() * elif str(e) == "No Solution": * raise NoSolutionError() # <<<<<<<<<<<<<< * else: * raise e */ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_NoSolutionError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 54, __pyx_L9_except_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_21 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_21 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_21)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_21); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_3 = (__pyx_t_21) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_21) : __Pyx_PyObject_CallNoArg(__pyx_t_4); __Pyx_XDECREF(__pyx_t_21); __pyx_t_21 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 54, __pyx_L9_except_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 54, __pyx_L9_except_error) /* "cython_od_mstar.pyx":53 * if str(e) == "Out of Time": * raise OutOfTimeError() * elif str(e) == "No Solution": # <<<<<<<<<<<<<< * raise NoSolutionError() * else: */ } /* "cython_od_mstar.pyx":56 * raise NoSolutionError() * else: * raise e # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_Raise(__pyx_v_e, 0, 0, 0); __PYX_ERR(0, 56, __pyx_L9_except_error) } } goto __pyx_L9_except_error; __pyx_L9_except_error:; /* "cython_od_mstar.pyx":48 * temp.push_back(i == 1) * obs.push_back(temp) * try: # <<<<<<<<<<<<<< * return find_grid_path(obs, init_pos, goals, inflation, time_limit) * except Exception as e: */ __Pyx_XGIVEREF(__pyx_t_13); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15); goto __pyx_L1_error; __pyx_L11_try_return:; __Pyx_XGIVEREF(__pyx_t_13); __Pyx_XGIVEREF(__pyx_t_14); __Pyx_XGIVEREF(__pyx_t_15); __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15); goto __pyx_L0; } /* "cython_od_mstar.pyx":16 * double inflation, int time_limit) except + * * def find_path(world, init_pos, goals, inflation, time_limit): # <<<<<<<<<<<<<< * """Finds a path invoking C++ implementation * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_21); __Pyx_AddTraceback("cpp_mstar.find_path", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_resource); __Pyx_XDECREF(__pyx_v_row); __Pyx_XDECREF(__pyx_v_i); __Pyx_XDECREF(__pyx_v_e); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "pair.from_py":145 * * @cname("__pyx_convert_pair_from_py_int__and_int") * cdef pair[X,Y] __pyx_convert_pair_from_py_int__and_int(object o) except *: # <<<<<<<<<<<<<< * x, y = o * return pair[X,Y](x, y) */ static std::pair __pyx_convert_pair_from_py_int__and_int(PyObject *__pyx_v_o) { PyObject *__pyx_v_x = NULL; PyObject *__pyx_v_y = NULL; std::pair __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *(*__pyx_t_4)(PyObject *); int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_convert_pair_from_py_int__and_int", 0); /* "pair.from_py":146 * @cname("__pyx_convert_pair_from_py_int__and_int") * cdef pair[X,Y] __pyx_convert_pair_from_py_int__and_int(object o) except *: * x, y = o # <<<<<<<<<<<<<< * return pair[X,Y](x, y) * */ if ((likely(PyTuple_CheckExact(__pyx_v_o))) || (PyList_CheckExact(__pyx_v_o))) { PyObject* sequence = __pyx_v_o; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 146, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); } else { __pyx_t_1 = PyList_GET_ITEM(sequence, 0); __pyx_t_2 = PyList_GET_ITEM(sequence, 1); } __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(__pyx_t_2); #else __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); #endif } else { Py_ssize_t index = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_o); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = Py_TYPE(__pyx_t_3)->tp_iternext; index = 0; __pyx_t_1 = __pyx_t_4(__pyx_t_3); if (unlikely(!__pyx_t_1)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_1); index = 1; __pyx_t_2 = __pyx_t_4(__pyx_t_3); if (unlikely(!__pyx_t_2)) goto __pyx_L3_unpacking_failed; __Pyx_GOTREF(__pyx_t_2); if (__Pyx_IternextUnpackEndCheck(__pyx_t_4(__pyx_t_3), 2) < 0) __PYX_ERR(1, 146, __pyx_L1_error) __pyx_t_4 = NULL; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L4_unpacking_done; __pyx_L3_unpacking_failed:; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(1, 146, __pyx_L1_error) __pyx_L4_unpacking_done:; } __pyx_v_x = __pyx_t_1; __pyx_t_1 = 0; __pyx_v_y = __pyx_t_2; __pyx_t_2 = 0; /* "pair.from_py":147 * cdef pair[X,Y] __pyx_convert_pair_from_py_int__and_int(object o) except *: * x, y = o * return pair[X,Y](x, y) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_v_x); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 147, __pyx_L1_error) __pyx_t_6 = __Pyx_PyInt_As_int(__pyx_v_y); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 147, __pyx_L1_error) __pyx_r = std::pair (((int)__pyx_t_5), ((int)__pyx_t_6)); goto __pyx_L0; /* "pair.from_py":145 * * @cname("__pyx_convert_pair_from_py_int__and_int") * cdef pair[X,Y] __pyx_convert_pair_from_py_int__and_int(object o) except *: # <<<<<<<<<<<<<< * x, y = o * return pair[X,Y](x, y) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("pair.from_py.__pyx_convert_pair_from_py_int__and_int", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_pretend_to_initialize(&__pyx_r); __pyx_L0:; __Pyx_XDECREF(__pyx_v_x); __Pyx_XDECREF(__pyx_v_y); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "vector.from_py":45 * * @cname("__pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___") * cdef vector[X] __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(object o) except *: # <<<<<<<<<<<<<< * cdef vector[X] v * for item in o: */ static std::vector > __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(PyObject *__pyx_v_o) { std::vector > __pyx_v_v; PyObject *__pyx_v_item = NULL; std::vector > __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; std::pair __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___", 0); /* "vector.from_py":47 * cdef vector[X] __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(object o) except *: * cdef vector[X] v * for item in o: # <<<<<<<<<<<<<< * v.push_back(item) * return v */ if (likely(PyList_CheckExact(__pyx_v_o)) || PyTuple_CheckExact(__pyx_v_o)) { __pyx_t_1 = __pyx_v_o; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_o); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 47, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 47, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 47, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 47, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 47, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 47, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 47, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_4); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_4); __pyx_t_4 = 0; /* "vector.from_py":48 * cdef vector[X] v * for item in o: * v.push_back(item) # <<<<<<<<<<<<<< * return v * */ __pyx_t_5 = __pyx_convert_pair_from_py_int__and_int(__pyx_v_item); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 48, __pyx_L1_error) __pyx_v_v.push_back(((std::pair )__pyx_t_5)); /* "vector.from_py":47 * cdef vector[X] __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(object o) except *: * cdef vector[X] v * for item in o: # <<<<<<<<<<<<<< * v.push_back(item) * return v */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "vector.from_py":49 * for item in o: * v.push_back(item) * return v # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_v; goto __pyx_L0; /* "vector.from_py":45 * * @cname("__pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___") * cdef vector[X] __pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___(object o) except *: # <<<<<<<<<<<<<< * cdef vector[X] v * for item in o: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("vector.from_py.__pyx_convert_vector_from_py_std_3a__3a_pair_3c_int_2c_int_3e___", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_pretend_to_initialize(&__pyx_r); __pyx_L0:; __Pyx_XDECREF(__pyx_v_item); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "pair.to_py":158 * * @cname("__pyx_convert_pair_to_py_int____int") * cdef object __pyx_convert_pair_to_py_int____int(const pair[X,Y]& p): # <<<<<<<<<<<<<< * return p.first, p.second * */ static PyObject *__pyx_convert_pair_to_py_int____int(std::pair const &__pyx_v_p) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_convert_pair_to_py_int____int", 0); /* "pair.to_py":159 * @cname("__pyx_convert_pair_to_py_int____int") * cdef object __pyx_convert_pair_to_py_int____int(const pair[X,Y]& p): * return p.first, p.second # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_p.first); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_p.second); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 159, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "pair.to_py":158 * * @cname("__pyx_convert_pair_to_py_int____int") * cdef object __pyx_convert_pair_to_py_int____int(const pair[X,Y]& p): # <<<<<<<<<<<<<< * return p.first, p.second * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("pair.to_py.__pyx_convert_pair_to_py_int____int", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "vector.to_py":60 * * @cname("__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___") * cdef object __pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(vector[X]& v): # <<<<<<<<<<<<<< * return [v[i] for i in range(v.size())] * */ static PyObject *__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(const std::vector > &__pyx_v_v) { size_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; size_t __pyx_t_2; size_t __pyx_t_3; size_t __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___", 0); /* "vector.to_py":61 * @cname("__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___") * cdef object __pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(vector[X]& v): * return [v[i] for i in range(v.size())] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_v_v.size(); __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; __pyx_t_5 = __pyx_convert_pair_to_py_int____int((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 61, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "vector.to_py":60 * * @cname("__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___") * cdef object __pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___(vector[X]& v): # <<<<<<<<<<<<<< * return [v[i] for i in range(v.size())] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("vector.to_py.__pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(const std::vector > > &__pyx_v_v) { size_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; size_t __pyx_t_2; size_t __pyx_t_3; size_t __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___", 0); /* "vector.to_py":61 * @cname("__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___") * cdef object __pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(vector[X]& v): * return [v[i] for i in range(v.size())] # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_v_v.size(); __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; __pyx_t_5 = __pyx_convert_vector_to_py_std_3a__3a_pair_3c_int_2c_int_3e___((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 61, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 61, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "vector.to_py":60 * * @cname("__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___") * cdef object __pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(vector[X]& v): # <<<<<<<<<<<<<< * return [v[i] for i in range(v.size())] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("vector.to_py.__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_cpp_mstar(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_cpp_mstar}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "cpp_mstar", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_NoSolutionError, __pyx_k_NoSolutionError, sizeof(__pyx_k_NoSolutionError), 0, 0, 1, 1}, {&__pyx_kp_s_No_Solution, __pyx_k_No_Solution, sizeof(__pyx_k_No_Solution), 0, 0, 1, 0}, {&__pyx_n_s_OutOfTimeError, __pyx_k_OutOfTimeError, sizeof(__pyx_k_OutOfTimeError), 0, 0, 1, 1}, {&__pyx_kp_s_Out_of_Time, __pyx_k_Out_of_Time, sizeof(__pyx_k_Out_of_Time), 0, 0, 1, 0}, {&__pyx_n_s_RLIMIT_AS, __pyx_k_RLIMIT_AS, sizeof(__pyx_k_RLIMIT_AS), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_cpp_mstar, __pyx_k_cpp_mstar, sizeof(__pyx_k_cpp_mstar), 0, 0, 1, 1}, {&__pyx_kp_s_cython_od_mstar_pyx, __pyx_k_cython_od_mstar_pyx, sizeof(__pyx_k_cython_od_mstar_pyx), 0, 0, 1, 0}, {&__pyx_n_s_e, __pyx_k_e, sizeof(__pyx_k_e), 0, 0, 1, 1}, {&__pyx_n_s_find_path, __pyx_k_find_path, sizeof(__pyx_k_find_path), 0, 0, 1, 1}, {&__pyx_n_s_goals, __pyx_k_goals, sizeof(__pyx_k_goals), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_inflation, __pyx_k_inflation, sizeof(__pyx_k_inflation), 0, 0, 1, 1}, {&__pyx_n_s_init_pos, __pyx_k_init_pos, sizeof(__pyx_k_init_pos), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_obs, __pyx_k_obs, sizeof(__pyx_k_obs), 0, 0, 1, 1}, {&__pyx_n_s_od_mstar3_col_set_addition, __pyx_k_od_mstar3_col_set_addition, sizeof(__pyx_k_od_mstar3_col_set_addition), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_resource, __pyx_k_resource, sizeof(__pyx_k_resource), 0, 0, 1, 1}, {&__pyx_n_s_row, __pyx_k_row, sizeof(__pyx_k_row), 0, 0, 1, 1}, {&__pyx_n_s_setrlimit, __pyx_k_setrlimit, sizeof(__pyx_k_setrlimit), 0, 0, 1, 1}, {&__pyx_n_s_temp, __pyx_k_temp, sizeof(__pyx_k_temp), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_time_limit, __pyx_k_time_limit, sizeof(__pyx_k_time_limit), 0, 0, 1, 1}, {&__pyx_n_s_world, __pyx_k_world, sizeof(__pyx_k_world), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 61, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "cython_od_mstar.pyx":37 * * import resource * resource.setrlimit(resource.RLIMIT_AS, (2**33,2**33)) # 8Gb # <<<<<<<<<<<<<< * * # convert to boolean. For some reason coercion doesn't seem to */ __pyx_tuple_ = PyTuple_Pack(2, __pyx_int_8589934592, __pyx_int_8589934592); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 37, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "cython_od_mstar.pyx":16 * double inflation, int time_limit) except + * * def find_path(world, init_pos, goals, inflation, time_limit): # <<<<<<<<<<<<<< * """Finds a path invoking C++ implementation * */ __pyx_tuple__2 = PyTuple_Pack(11, __pyx_n_s_world, __pyx_n_s_init_pos, __pyx_n_s_goals, __pyx_n_s_inflation, __pyx_n_s_time_limit, __pyx_n_s_resource, __pyx_n_s_obs, __pyx_n_s_temp, __pyx_n_s_row, __pyx_n_s_i, __pyx_n_s_e); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); __pyx_codeobj__3 = (PyObject*)__Pyx_PyCode_New(5, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__2, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_cython_od_mstar_pyx, __pyx_n_s_find_path, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__3)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_8589934592 = PyInt_FromString((char *)"8589934592", 0, 0); if (unlikely(!__pyx_int_8589934592)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initcpp_mstar(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initcpp_mstar(void) #else __Pyx_PyMODINIT_FUNC PyInit_cpp_mstar(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_cpp_mstar(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_cpp_mstar(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'cpp_mstar' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_cpp_mstar(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("cpp_mstar", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_cpp_mstar) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "cpp_mstar")) { if (unlikely(PyDict_SetItemString(modules, "cpp_mstar", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); (void)__Pyx_modinit_type_init_code(); (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "cython_od_mstar.pyx":7 * from libcpp.pair cimport pair * * from od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError # <<<<<<<<<<<<<< * * cdef extern from "grid_planning.hpp" namespace "mstar": */ __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_OutOfTimeError); __Pyx_GIVEREF(__pyx_n_s_OutOfTimeError); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_OutOfTimeError); __Pyx_INCREF(__pyx_n_s_NoSolutionError); __Pyx_GIVEREF(__pyx_n_s_NoSolutionError); PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_NoSolutionError); __pyx_t_2 = __Pyx_Import(__pyx_n_s_od_mstar3_col_set_addition, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_OutOfTimeError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_OutOfTimeError, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_NoSolutionError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_NoSolutionError, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "cython_od_mstar.pyx":16 * double inflation, int time_limit) except + * * def find_path(world, init_pos, goals, inflation, time_limit): # <<<<<<<<<<<<<< * """Finds a path invoking C++ implementation * */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_9cpp_mstar_1find_path, NULL, __pyx_n_s_cpp_mstar); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_find_path, __pyx_t_2) < 0) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "cython_od_mstar.pyx":1 * # distutils: language = c++ # <<<<<<<<<<<<<< * # distutils: sources = policy.cpp col_checker.cpp od_mstar.cpp grid_policy.cpp grid_planning.cpp * from libcpp cimport bool */ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "vector.to_py":60 * * @cname("__pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___") * cdef object __pyx_convert_vector_to_py_std_3a__3a_vector_3c_std_3a__3a_pair_3c_int_2c_int_3e____3e___(vector[X]& v): # <<<<<<<<<<<<<< * return [v[i] for i in range(v.size())] * */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init cpp_mstar", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init cpp_mstar"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyIntCompare */ static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) { if (op1 == op2) { Py_RETURN_TRUE; } #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); if (a == b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { int unequal; unsigned long uintval; Py_ssize_t size = Py_SIZE(op1); const digit* digits = ((PyLongObject*)op1)->ob_digit; if (intval == 0) { if (size == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } else if (intval < 0) { if (size >= 0) Py_RETURN_FALSE; intval = -intval; size = -size; } else { if (size <= 0) Py_RETURN_FALSE; } uintval = (unsigned long) intval; #if PyLong_SHIFT * 4 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 4)) { unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 3 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 3)) { unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 2 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 2)) { unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif #if PyLong_SHIFT * 1 < SIZEOF_LONG*8 if (uintval >> (PyLong_SHIFT * 1)) { unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); } else #endif unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); if (unequal == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); if ((double)a == (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE; } return ( PyObject_RichCompare(op1, op2, Py_EQ)); } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; icurexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) #else if (likely(PyCFunction_Check(func))) #endif { if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) { const size_t neg_one = (size_t) ((size_t) 0 - (size_t) 1), const_zero = (size_t) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(size_t) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (size_t) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (size_t) 0; case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0]) case 2: if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) { return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; case 3: if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) { return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; case 4: if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) { return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (size_t) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(size_t) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (size_t) 0; case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, +digits[0]) case -2: if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 2: if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case -3: if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 3: if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case -4: if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; case 4: if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); } } break; } #endif if (sizeof(size_t) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else size_t val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (size_t) -1; } } else { size_t val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (size_t) -1; val = __Pyx_PyInt_As_size_t(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to size_t"); return (size_t) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to size_t"); return (size_t) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; ip) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */ ================================================ FILE: od_mstar3/cython_od_mstar.pyx ================================================ # distutils: language = c++ # distutils: sources = policy.cpp col_checker.cpp od_mstar.cpp grid_policy.cpp grid_planning.cpp from libcpp cimport bool from libcpp.vector cimport vector from libcpp.pair cimport pair from od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError cdef extern from "grid_planning.hpp" namespace "mstar": vector[vector[pair[int, int]]] find_grid_path( const vector[vector[bool]] &obstacles, const vector[pair[int, int]] &init_pos, const vector[pair[int, int]] &goals, double inflation, int time_limit) except + def find_path(world, init_pos, goals, inflation, time_limit): """Finds a path invoking C++ implementation Uses recursive ODrM* to explore a 4 connected grid world - matrix specifying obstacles, 1 for obstacle, 0 for free init_pos - [[x, y], ...] specifying start position for each robot goals - [[x, y], ...] specifying goal position for each robot inflation - inflation factor for heuristic time_limit - time until failure in seconds returns: [[[x1, y1], ...], [[x2, y2], ...], ...] path in the joint configuration space raises: NoSolutionError if problem has no solution OutOfTimeError if the planner ran out of time """ import resource resource.setrlimit(resource.RLIMIT_AS, (2**33,2**33)) # 8Gb # convert to boolean. For some reason coercion doesn't seem to # work properly cdef vector[vector[bool]] obs cdef vector[bool] temp for row in world: temp = vector[bool]() for i in row: temp.push_back(i == 1) obs.push_back(temp) try: return find_grid_path(obs, init_pos, goals, inflation, time_limit) except Exception as e: if str(e) == "Out of Time": raise OutOfTimeError() elif str(e) == "No Solution": raise NoSolutionError() else: raise e ================================================ FILE: od_mstar3/grid_planning.cpp ================================================ #include #include #include #include "grid_planning.hpp" #include "grid_policy.hpp" #include "od_mstar.hpp" #include "mstar_type_defs.hpp" using namespace mstar; /** * Converts from (row, column) coordinates to vertex index */ OdCoord to_internal(std::vector> coord, int cols){ std::vector out; for (auto &c: coord){ out.push_back(c.first * cols + c.second); } return OdCoord(out, {}); }; /** * Converts from vertex index to (row, column) format */ std::vector> from_internal(OdCoord coord, int cols){ std::vector> out; for (auto &c: coord.coord){ out.push_back({c / cols, c % cols}); } return out; }; std::vector>> mstar::find_grid_path( const std::vector> &obstacles, const std::vector> &init_pos, const std::vector> &goals, double inflation, int time_limit){ // compute time limit first, as the policies fully compute // Need to convert time limit to std::chrono format time_point t = std::chrono::system_clock::now(); t += Clock::duration(std::chrono::seconds(time_limit)); int cols = (int) obstacles[0].size(); OdCoord _init = to_internal(init_pos, cols); OdCoord _goal = to_internal(goals, cols); std::vector> policies = {}; for (const auto &goal: goals){ policies.push_back(std::shared_ptr( grid_policy_ptr(obstacles, goal))); } OdMstar planner(policies, _goal, inflation, t, std::shared_ptr(new SimpleGraphColCheck())); OdPath path = planner.find_path(_init); std::vector>> out; for (auto &coord: path){ out.push_back(from_internal(coord, cols)); } return out; } ================================================ FILE: od_mstar3/grid_planning.hpp ================================================ #ifndef MSTAR_GRID_PLANNING_H #define MSTAR_GRID_PLANNING_H #include #include /********************************************************************* * Provides convienence functions for planning on 4-connected graphs ********************************************************************/ namespace mstar{ /** * Helper function for finding paths in 4 connected paths * * The world is specified as a matrix where true indicates the presence * of obstacles and false indicates a clear space. Coordinates for * individual robots are indicated as (row, column) * * @param obstacles matrix indicating obstacle positions. True is obstacle * @param init_pos list of (row, column) pairs definining the initial * position of the robots * @param goals list of (row, column) pairs defining the goal configuration * of the robots * @param inflation inflation factor used to weight the heuristic * @param time_limit seconds until the code declares failure * * @return Path in the joint configuration space. Each configuration is * a vector of (row, col) pairs specifying the position of * individual robots */ std::vector > > find_grid_path( const std::vector > &obstacles, const std::vector > &init_pos, const std::vector > &goals, double inflation, int time_limit); } #endif ================================================ FILE: od_mstar3/grid_policy.cpp ================================================ #include "grid_policy.hpp" using namespace mstar; Graph get_graph(const std::vector> &world_map, const std::pair &goal){ int rows = (int) world_map.size(); int columns = (int) world_map[0].size(); typedef std::pair E; std::vector edges; std::vector weights; std::vector> offsets = {{-1, 0}, {0, 1}, {1, 0}, {0, -1}, {0, 0}}; for (int row = 0; row < rows; ++row){ for (int col = 0; col < columns; ++col){ if (world_map[row][col]){ continue; } for (auto &off: offsets){ int r = row + off.first; int c = col + off.second; if( r >= 0 && r < rows && c >= 0 && c < columns && ! world_map[r][c]){ // edge from (row, col) to (r, c) // should be a more direct way, but boost is hating me edges.push_back({row * columns + col, r * columns + c}); if (row == r && col == c && r == goal.first && c == goal.second){ weights.push_back(0.); }else{ weights.push_back(1.); } } } } } return Graph(edges.begin(), edges.end(), weights.begin(), rows * columns); } /** * Generates a policy for a 4 connected grid * * The internal coordinates are of the form row * num_rows + col * Allows for weighting at the goal for free * * @param world_map matrix of values describing grid true for obstacle, * false for clear * @param goal (row, column) of goal * * @return Policy object describing problem */ Policy mstar::grid_policy(const std::vector> &world_map, const std::pair &goal){ int columns = (int) world_map[0].size(); return Policy(get_graph(world_map, goal), goal.first * columns + goal.second); } Policy* mstar::grid_policy_ptr(const std::vector> &world_map, const std::pair &goal){ int columns = (int) world_map[0].size(); return new Policy(get_graph(world_map, goal), goal.first * columns + goal.second); } ================================================ FILE: od_mstar3/grid_policy.hpp ================================================ #ifndef MSTAR_GRID_POLICY_H #define MSTAR_GRID_POLICY_H /************************************************************************** * Generates policy for grid maps **************************************************************************/ #include #include #include "mstar_type_defs.hpp" #include "policy.hpp" #include #include namespace mstar{ /** * Generates a policy for a 4 connected grid * * The internal coordinates are of the form row * num_rows + col * Allows for weighting at the goal for free * * @param world_map matrix of values describing grid true for obstacle, * false for clear * @param goal (row, column) of goal * * @return Policy object describing problem */ Policy grid_policy(const std::vector> &world_map, const std::pair &goal); Policy* grid_policy_ptr(const std::vector> &world_map, const std::pair &goal); } #endif ================================================ FILE: od_mstar3/interface.py ================================================ """This module defines interfaces for the low-level graphs and policies used in Mstar. In general terms, these classes represent: 1. Graphs representing the configuration space. These graphs are structured so that each node in the graph represents a configuration, and each edge represents a permissible transition between two different configurations. *All of these graphs subclass the Graph_Interface class 2. Policies, which define paths in a configuration space from an initial configuration to a goal configuration. Policies are comprised of nodes, each of which represents a configuration in the configuration space. Each node in a policy has a pointer to its optimal neighbor, i.e., the next node in the optimal path to the goal node. Policy classes compute optimal paths by using some search algorithm to search the graphs generated in the classes described above. *All of these graphs subclass the Policy_Interface class 3. Configuration graph edge checking, which determines whether moving between two configurations is permissible. For example, configuration graph edge checking should not allow a robot to move out of bounds of the workspace. 4. Planner edge checking, which determines whether moving between two states of robot positions will result in any collisions. For example, planner edge checking should check to see if two robots pass through each other as they move between positions. """ class Graph_Interface(object): """Interface for configuration space generators This graph interface enumerates the methods that any configuration space generator should implement. These graphs are used by policy graphs such as A*. """ def get_edge_cost(self, coord1, coord2): """Returns edge_cost of going from coord1 to coord2.""" raise NotImplementedError def get_neighbors(self, coord): """Returns the collision free neighbors of the specified coord. Return value is a list of tuples each of which are a coordinate """ raise NotImplementedError # This is a function to return the in neighbors of a coordinate. # Designed by default to handle un-directed graphs get_in_neighbors = get_neighbors class Policy_Interface(object): """Interface showing required implemented functions for all policies This interface enumerates the functions that must be exposed by policies for M* to function correctly. A policy object with this interface provides a route for a single robot. Underneath the policy interface is a graph object which describes the configuration space through which robots can move. The underlying graph object does all of the work of calculating the configuration space based on the actual environment in which the robot is moving **All config inputs must be hashable** """ def get_cost(self, config): """Returns the cost of moving from given position to goal""" raise NotImplementedError def get_edge_cost(self, config1, config2): """Returns the cost of traversing an edge in the underlying graph """ raise NotImplementedError def get_step(self, config): """Returns the configurations of the optimal neighbor of config""" raise NotImplementedError def get_neighbors(self, config): """Returns neighboring configurations of config This function returns the configurations which are next to config Return list of tuples, each of which is a coordinate """ raise NotImplementedError def get_graph_size(self, correct_for_size=True): """Returns number of nodes in graph""" raise NotImplementedError def get_limited_offset_neighbors(self, config, max_offset, min_offset=0): """Returns set of neighbors between the offset arguments""" raise NotImplementedError def get_offset_neighbors(self, config, offset): """Returns neighbors of coord with offset specified by argument""" raise NotImplementedError def get_offsets(self, config): """Return the offsets of the neighbors""" raise NotImplementedError class Config_Edge_Checker(object): """Checks robot collisions with objects and edges of workspace""" def col_check(self, state, recursive): """Checks for collisions at a single state state - list of coordinates of robots recursive - generate collisions sets for rM* Returns: M* collision set in type set if recursive false rM* collision set in type set if recursive true """ raise NotImplementedError class Planner_Edge_Checker(object): """Checks for robot collisions on an edge in a planner's graph Currently, no methods have to be implemented because the collision methods change based on the graph. """ def pass_through(self, state1, state2, recursive=False): """Detects pass through collisions state1 - list of robot coordinates describing initial state state2 - list of robot coordinates describing final state, Returns: M* collision set in type set if recursive false rM* collision set in type set if recursive true """ raise NotImplementedError def col_check(self, state, recursive): """Checks for collisions at a single state state - list of coordinates of robots recursive - generate collisions sets for rM* Returns: M* collision set in type set if recursive false rM* collision set in type set if recursive true """ raise NotImplementedError def cross_over(self, state1, state2, recursive=False): """Detects cross over and pass through collisions state1 - list of robot coordinates describing initial state state2 - list of robot coordinates describing final state Returns: M* collision set in type set if recursive false rM* collision set in type set if recursive true """ raise NotImplementedError def simple_pass_through(self, state1, state2): """Check for pass through collisions state1 - list of robot coordinates describing initial state state2 - list of robot coordinates describing final state Returns: True if pass through collision False otherwise """ raise NotImplementedError def simple_col_check(self, state): """Checks for robot-robot collisions at state, state - list of robot coordinates returns: True if collision False otherwise """ raise NotImplementedError def simple_cross_over(self, state1, state2): """Check for cross over collisions in 8-connected worlds state1 - list of robot coordinates describing initial state state2 - list of robot coordinates describing final state returns: True if collision exists False otherwise """ raise NotImplementedError def simple_incremental_cross_over(self, state1, state2): """Check for cross over collisions in 8-connected worlds. Assumes that collision checking has been performed for everything but the last robot in the coordinates. To be used to save a bit of time for partial expansion approaches state1 - list of robot coordinates describing initial state state2 - list of robot coordinates describing final state returns: True if collision exists False otherwise """ raise NotImplementedError def simple_incremental_col_check(self, state1): """Checks for robot-robot collisions at c1, Assumes that collision checking has been performed for everything but the last robot in the coordinates. To be used to save a bit of time for partial expansion approaches state1 - list of robot coordinates returns: True if collision exists False otherwise """ raise NotImplementedError def single_bot_outpath_check(self, cur_coord, prev_coord, cur_t, paths): """Tests for collisions from prev_coord to cur_coord Checks for cross over collisions and collisions at the same location when moving from cur_coord to prev_coord while robots are moving in paths cur_coord - position of a single robot Returns: True if collision exists False otherwise """ raise NotImplementedError def simple_prio_col_check(self, coord, t, paths, pcoord=None, conn_8=False): """Returns true, if collision is detected, false otherwise at the moment only used to check the obstacle collisions, but didn't want to reject the other code already coord - coord of potential new neighbor t - current time step paths - previously found paths pcoord - previous coordinate of the path Returns: True if collision exists False otherwise """ raise NotImplementedError def incremental_col_check(self, state, recursive): """Checks for robot-robot collisions in state state - list of coordinates of robots recursive - generate collisions sets for rM* Only checks whether the last robot is involved in a collision, for use with incremental methods Returns: M* collision set in type set if recursive false rM* collision set in type set if recursive true """ raise NotImplementedError def incremental_cross_over(self, state1, state2, recursive=False): """Detects cross over collisions as well as pass through collisions. Only checks if the last robot is involved in a collision, for use with partial expansion approaches. state1 - list of robot coordinates describing initial state state2 - list of robot coordinates describing final state, Returns: M* collision set in type set if recursive false rM* collision set in type set if recursive true """ raise NotImplementedError def single_bot_cross_over(self, coord1, pcoord1, coord2, pcoord2): """Checks for cross-over and collisions between robots 1 and 2 Robots are moving from pcoord to coord pcoord1 - first position of first robot coord1 - second position of first robot pcoord2 - first position of second robot coord2 - second position of second robot Returns: True if collision False otherwise """ raise NotImplementedError def prio_col_check(self, coord, pcoord, t, paths=None, conn_8=False, recursive=False): """Collision checking with paths passed as constraints coord - current node pcoord - previous node t - timestep paths - paths that need to be avoided Returns: (collision sets are of type set) M* collision set if collision exists and recursive is false rM* collision set if collision exists and recursive is true None if no collision exists """ raise NotImplementedError ================================================ FILE: od_mstar3/mstar_type_defs.hpp ================================================ #ifndef MSTAR_TYPE_DEFS #define MSTAR_TYPE_DEFS /************************************************************************** * Provides type defs that are used in multiple files *************************************************************************/ #include #include #include #include namespace mstar{ /** * Defines the graph type for individual robots. * * Assumes robot positions are indicated by integers, costs by doubles, * and assumes that the edge_weight property is filled */ typedef boost::adjacency_list< boost::vecS, boost::vecS, boost::bidirectionalS, boost::no_property, boost::property> Graph; // type that defines the position of the robot typedef int RobCoord; // represents the coordinate of an OD node, also used to index graphs struct OdCoord{ std::vector coord, move_tuple; OdCoord(std::vector in_coord, std::vector in_move){ coord = in_coord; move_tuple = in_move; } OdCoord(): coord(), move_tuple(){} bool operator==(const OdCoord &other) const{ return (coord == other.coord) && (move_tuple == other.move_tuple); } bool is_standard() const{ return move_tuple.size() == 0; } }; // Holds a path in the joint configuration space typedef std::vector OdPath; // defines a single set of mutually colliding robots. // Must be sorted in order of increasing value for logic to hold typedef std::set ColSetElement; // Defines a full collision set typedef std::vector ColSet; // defines times for checking purposes typedef std::chrono::system_clock Clock; typedef Clock::time_point time_point; } #endif ================================================ FILE: od_mstar3/mstar_utils.hpp ================================================ #ifndef MSTAR_UTILS_H #define MSTAR_UTILS_H /** * Defines convinence functions for testing or other purposes not directly * related to the actual planning */ #include #include "mstar_type_defs.hpp" namespace mstar{ void print_od_path(const OdPath &path){ for (const OdCoord &pos: path){ std::cout << "{"; for (const RobCoord &i: pos.coord){ std::cout << i << " "; } std::cout << "}" << std::endl; } }; void print_path(const std::vector>> &path){ for (const auto &coord: path){ std::cout << "{"; for (const auto &c: coord){ std::cout << "(" << c.first << ", " << c.second << ") "; } std::cout << "}" << std::endl; } }; }; #endif ================================================ FILE: od_mstar3/od_mstar.cpp ================================================ #include #include #include "od_mstar.hpp" using namespace mstar; OdMstar::OdMstar(std::vector> policies, OdCoord goals, double inflation, time_point end_time, std::shared_ptr col_checker){ subplanners_ = new std::unordered_map>(); policies_ = policies; // top-level planner, so construct a set of all robot ids for (int i = 0; i < (int) goals.coord.size(); ++i){ ids_.push_back(i); } goals_ = goals; end_time_ = end_time; inflation_ = inflation; planning_iter_ = 0; num_bots_ = (int) ids_.size(); col_checker_ = col_checker; top_level_ = true; } OdMstar::OdMstar(const ColSetElement &robots, OdMstar &parent){ subplanners_ = parent.subplanners_; policies_ = parent.policies_; for (int i: robots){ ids_.push_back(parent.ids_[i]); goals_.coord.push_back(parent.goals_.coord[i]); } end_time_ = parent.end_time_; inflation_ = parent.inflation_; planning_iter_ = 0; num_bots_ = (int) ids_.size(); col_checker_ = parent.col_checker_; top_level_ = false; } OdMstar::~OdMstar(){ if (top_level_){ delete subplanners_; } } OdPath OdMstar::find_path(OdCoord init_pos){ reset(); // Configure the initial vertex // identified by setting the back_ptr to itself OdVertex *first = get_vertex(init_pos); first->reset(planning_iter_); first->back_ptr = first; first->cost = 0; first->open = true; OpenList open_list; open_list.push(first); while (open_list.size() > 0){ if (std::chrono::system_clock::now() > end_time_){ throw OutOfTimeError(); } OdVertex *vert = open_list.top(); open_list.pop(); vert->open = false; if (vert->closed){ continue; } // check if this is the goal vertex if (vert->coord == goals_){ vert->forwards_ptr = vert; } if (vert->forwards_ptr != nullptr){ // Either the goal or on a previous found path to the goal return trace_path(vert); } expand(vert, open_list); } throw NoSolutionError(); } void OdMstar::reset(){ planning_iter_++; } double OdMstar::heuristic(const OdCoord &coord){ // Heuristic is computed from the assigned move for elements of the // move tuple, and from the base coordinate for all others double h = 0; uint i = 0; while (i < coord.move_tuple.size()){ h += policies_[ids_[i]]->get_cost(coord.move_tuple[i]); ++i; } while (i < coord.coord.size()){ h += policies_[ids_[i]]->get_cost(coord.coord[i]); ++i; } return h * inflation_; } OdVertex* OdMstar::get_vertex(const OdCoord &coord){ // returns a pair with the first element an interator to a // pair and the second to a bool which is true if there was not a // preexisting value auto p = graph_.emplace(coord, coord); p.first->second.reset(planning_iter_); if (p.second){ // new vertex, so need to set heuristic p.first->second.h = heuristic(coord); } return &p.first->second; } OdCoord get_vertex_step(OdVertex * vert){ assert(vert != nullptr); while (1){ if (vert->forwards_ptr->coord.is_standard()){ return vert->forwards_ptr->coord; } vert = vert->forwards_ptr; assert(vert != nullptr); } } OdCoord OdMstar::get_step(const OdCoord &init_pos){ OdVertex* vert = OdMstar::get_vertex(init_pos); if (vert->forwards_ptr != nullptr){ return get_vertex_step(vert); } find_path(init_pos); return get_vertex_step(vert); } void OdMstar::expand(OdVertex *vertex, OpenList &open_list){ vertex->closed = true; ColSet gen_set = col_set_to_expand(vertex->col_set, vertex->gen_set); if (gen_set.size() == 1 && (int) gen_set[0].size() == num_bots_){ // the generating collision set contains all robots, so no caching // would be possible. Therefore, don't use gen_set = vertex->col_set; } std::vector neighbors = get_neighbors(vertex->coord, gen_set); // accumulates the collision sets that occur while trying to move to // any of the neighbors ColSet col_set; for (OdCoord &new_coord: neighbors){ ColSet new_col = col_checker_->check_edge(vertex->coord, new_coord, ids_); if (!new_col.empty()){ // State not accessible due to collisions add_col_set_in_place(new_col, col_set); continue; } OdVertex *new_vert = get_vertex(new_coord); new_vert->back_prop_set.insert(vertex); // Always need to at the collision set of any vertex we can reach // to its successors, as otherwise we would need to wait for another // robot to collide downstream before triggering back propagation add_col_set_in_place(new_vert->col_set, col_set); if (new_vert->closed){ continue; } double new_cost = vertex->cost + edge_cost(vertex->coord, new_coord); if (new_cost >= new_vert->cost){ continue; } new_vert->cost = new_cost; new_vert->back_ptr = vertex; new_vert->open = true; new_vert->gen_set = gen_set; open_list.push(new_vert); // Add an intermediate vertex's parent's col_set to its col_set, so // moves for later robots can be explored. Not necessary, but should // reduce thrashing if (!new_vert->coord.is_standard()){ add_col_set_in_place(vertex->col_set, new_vert->col_set); } } back_prop_col_set(vertex, col_set, open_list); } std::vector OdMstar::get_neighbors(const OdCoord &coord, const ColSet &col_set){ // If the collision set contains all robots, invoke the non-recursive // base case if (col_set.size() == 1 && (int) col_set[0].size() == num_bots_){ return get_all_neighbors(coord); } assert(coord.is_standard()); // Generate the step along the joint policy std::vector policy_step; for (int i = 0; i < num_bots_; i++){ policy_step.push_back(policies_[ids_[i]]->get_step(coord.coord[i])); } // Iterate over colliding sets of robots, and integrate the results // of the sub planning for each set for (const ColSetElement &elem: col_set){ // The collision set contains the local ids (relative to the robots in // this subplanner) of the robots in collision // To properly index child subplanners, need to convert to global robot // ids, so that the subplanners will be properly globally accessible ColSetElement global_col; for (auto &local_id: elem){ global_col.insert(ids_[local_id]); } // Get, and if necessary construct, the appropriate subplanner. // returns a pair where bool is true if a new subplanner // was generated, and p is an iterator to a pair if (subplanners_->find(global_col) == subplanners_->end()){ subplanners_->insert( {global_col, std::shared_ptr(new OdMstar(elem, *this))}); } OdMstar *planner = subplanners_->at(global_col).get(); // create the query point std::vector new_base; for (const int &i: elem){ new_base.push_back(coord.coord[i]); } OdCoord step; try{ step = planner->get_step(OdCoord(new_base, {})); } catch(NoSolutionError &e){ // no solution for that subset of robots, so return no neighbors // only likely to be relevant on directed graphs return {}; } int elem_dex = 0; // now need to copy into the relevant positions in policy_step for (auto i: elem){ policy_step[i] = step.coord[elem_dex]; ++elem_dex; // could play with post appending, but don't want to } } return {OdCoord({policy_step}, {})}; } std::vector OdMstar::get_all_neighbors(const OdCoord &coord){ // get the coordinate of the robot to assign a new move uint move_index = coord.move_tuple.size(); std::vector> new_moves; for (RobCoord &move: policies_[ids_[move_index]]->get_out_neighbors( coord.coord[move_index])){ std::vector new_move(coord.move_tuple); new_move.push_back(move); new_moves.push_back(new_move); } std::vector ret; if (move_index + 1 < coord.coord.size()){ // generating intermediate vertices for (auto &move_tuple: new_moves){ ret.push_back(OdCoord(coord.coord, move_tuple)); } } else { // generating standard vertices for (auto &move_tuple: new_moves){ ret.push_back(OdCoord(move_tuple, {})); } } return ret; } double OdMstar::edge_cost(const OdCoord &source, const OdCoord &target){ if (source.is_standard() && target.is_standard()){ // transition between standard vertex, so all robots are assigned moves and // incur costs double cost = 0; for (int i = 0; i < num_bots_; ++i){ cost += policies_[ids_[i]]->get_edge_cost(source.coord[i], target.coord[i]); } return cost; } else { // transition from intermediate vertex, so only one robot is assigned // a move and incurs cost uint move_index = source.move_tuple.size(); if (target.is_standard()){ return policies_[ids_[move_index]]->get_edge_cost( source.coord[move_index], target.coord[move_index]); } else{ return policies_[ids_[move_index]]->get_edge_cost( source.coord[move_index], target.move_tuple[move_index]); } } } OdPath OdMstar::trace_path(OdVertex *vert){ OdPath path; back_trace_path(vert, vert->forwards_ptr, path); forwards_trace_path(vert, path); return path; } void OdMstar::back_trace_path(OdVertex *vert, OdVertex *successor, OdPath &path){ vert->forwards_ptr = successor; // check if this is the final, terminal state, which is not required // to have a zero-cost self loop, so could get problems if (vert != successor){ vert->h = successor->h + edge_cost(vert->coord, successor->coord); } else{ vert->h = 0; } if (vert->coord.is_standard()){ path.insert(path.begin(), vert->coord); } if (vert->back_ptr != vert){ back_trace_path(vert->back_ptr, vert, path); } } void OdMstar::forwards_trace_path(OdVertex *vert, OdPath &path){ if (vert->forwards_ptr != vert){ if (vert->forwards_ptr->coord.is_standard()){ path.push_back(vert->forwards_ptr->coord); } forwards_trace_path(vert->forwards_ptr, path); } } void OdMstar::back_prop_col_set(OdVertex *vert, const ColSet &col_set, OpenList &open_list){ bool further = add_col_set_in_place(col_set, vert->col_set); if (further){ vert->closed = false; if (! vert->open){ vert->open = true; open_list.push(vert); } for(OdVertex *predecessor: vert->back_prop_set){ back_prop_col_set(predecessor, vert->col_set, open_list); } } } ================================================ FILE: od_mstar3/od_mstar.hpp ================================================ #ifndef MSTAR_OD_MSTAR_H #define MSTAR_OD_MSTAR_H #include #include #include #include #include #include #include "mstar_type_defs.hpp" #include "col_set.hpp" #include "od_vertex.hpp" #include "col_checker.hpp" #include "policy.hpp" namespace std{ template <> struct hash{ size_t operator()(const mstar::OdCoord &val) const{ size_t hash = boost::hash_range(val.coord.cbegin(), val.coord.cend()); boost::hash_combine( hash, boost::hash_range(val.move_tuple.cbegin(), val.move_tuple.cend())); return hash; } }; template <> struct hash>{ size_t operator()(const std::vector &val) const{ return boost::hash_range(val.cbegin(), val.cend()); } }; template <> struct hash{ size_t operator()(const mstar::ColSetElement &val) const{ return boost::hash_range(val.cbegin(), val.cend()); } }; } namespace mstar{ struct greater_cost{ bool operator()(const mstar::OdVertex *x, const mstar::OdVertex *y) const{ if (x == nullptr || y == nullptr){ return true; } return *x > *y; } }; // Sort in decreasing order to give cheap access to the cheapest elements typedef std::priority_queue, greater_cost> OpenList; class OdMstar { public: /** * Constructs a new, top level M* planner * * @param policies pointer to vector of policies. * OdMstar does not take ownership * @param goals goal configuration of entire system * @param inflation inflation factor * @param end_time time at which M* will declare failure * @param checker collision checking object */ OdMstar( std::vector> policies, OdCoord goals, double inflation, time_point end_time, std::shared_ptr col_checker); /** * Creates a subplanner for a subsest of the robots * * robots is a collision set element in the frame of parent, not global * robot ids */ OdMstar(const ColSetElement &robots, OdMstar &parent); ~OdMstar(); /** * Computes the optimal path to the goal from init_pos * * @param init_pos coordinate of the initial joint configuration * * @return the path in the joint configuration graph to the goal * * @throws OutOfTimeError ran out of planning time * @throws NoSolutionError no path to goal from init_pos */ OdPath find_path(OdCoord init_pos); private: /**TODO: fix * This is kind of horrifying, but I cannot store the OdMstar objects * directly in the unordered map, as I get ungodly errors that look * like they come from an allocator. Adding copy constructor and * assignment operator doesn't work, so its something involved about * STL. Think this works, but annoying */ std::unordered_map> *subplanners_; std::vector> policies_; // ids of the robots this planner handles. Assumed to be in ascending // order std::vector ids_; OdCoord goals_; // holds the nodes in the joint configuration space std::unordered_map graph_; time_point end_time_; // When planning will be halted double inflation_; // inflation factor for heuristic int planning_iter_; // current planning iteration int num_bots_; std::shared_ptr col_checker_; bool top_level_; // tracks if the top level planner OdMstar(const OdMstar &that) = delete; /** * Resets planning for a new planning iteration. * * Does not reset forwards_ptrs, as those should be valid across * iterations */ void reset(); /** * Computes the heuristic value of a vertex at a given coordinate * * @param coord coordinate for which to compute a heuristic value * * @return the (inflated) heuristic value */ double heuristic(const OdCoord &coord); /** * Returns a reference to the vertex at a given coordinate * * this->graph retains ownership of the vertex. Will create the vertex * if it does not already exist. * * @param coord coordinate of the desired vertex * * @return pointer to the vertex at coord. */ OdVertex* get_vertex(const OdCoord &coord); /** * Returns the optimal next step from init_pos * * Will compute the full path if necessary, but preferentially uses * cached results in forwards_ptrs. Expected to only be called from * a standard coordinate, and to only return a standard coordinate * * @param init_pos coordinate to compute the optimal next step from * * @returns the coordinate of the optimal next step towards the goal */ OdCoord get_step(const OdCoord &init_pos); /** * Generates the neighbors of vertex and add them to the open list * * @param vertex OdVertex to expand * @param open_list the sorted open list being used */ void expand(OdVertex *vertex, OpenList &open_list); /** * Returns the limited neighbors of coord using recursive calculation * * @param coord Coordinates of vertex to generate neighbor thereof * @param col_set collision set of vertex to generate neighbors * * @return list of limited neighbors */ std::vector get_neighbors( const OdCoord &coord, const ColSet &col_set); /** * Returns the limited neighbors of coord using non-recursive computation * * Called when the collision set contains all of the robots, as a base * case for get_neighbors, thus always generate all possible neighbors * * @param coord Coordinates of vertex to generate neighbor thereof * * @return list of limited neighbors */ std::vector get_all_neighbors( const OdCoord &coord); /** * Returns the cost of traversing a given edge * * @param source coordinate of the source vertex * @param target coordinate of the target vertex * * @return the cost of the edge */ double edge_cost(const OdCoord &source, const OdCoord &target); /** * Returns the path through a vertex * * Assumes that back_ptr and forwards_ptr are set and non-none at vert * Identifies each end of the path by looking for a back_ptr/forwards_ptr * pointed at the holder * * @param vert the vertex to trace a path through * * @return the path passing through vert containing only standard vertices */ OdPath trace_path(OdVertex *vert); /** * Generates the path to the specified vertex * * Sets forward_ptrs to cache the path, and updates the heuristic * values of the vertices on the path so we can end the moment a * vertex on a cached path is expanded. * * TODO: double check that making the heuristic inconsistent in this * fashion is OK. * * @param vert the vertex to trace the path to * @param successor the successor of vert on the path * @param path place to construct path */ void back_trace_path(OdVertex *vert, OdVertex *successor, OdPath &path); /** * Genertes the path from the specified vertex to the goal * * Non-trivial only if vert lies on a previously cached path * * @param vert the vertex to trace the path from * @param path place to construct path */ void forwards_trace_path(OdVertex *vert, OdPath &path); /** * Backpropagates collision set information to all predecessors of a * vertex. * * Adds vertices whose collision set changes back to the open list * * @param vertex pointer to the vertex to back propagate from * @param col_set the collision set that triggered backpropagation * @param open_list the current open list */ void back_prop_col_set(OdVertex *vert, const ColSet &col_set, OpenList &open_list); }; struct OutOfTimeError : public std::exception{ const char * what () const throw(){ return "Out of Time"; } }; struct NoSolutionError : public std::exception{ const char * what () const throw(){ return "No Solution"; } }; }; #endif ================================================ FILE: od_mstar3/od_mstar.py ================================================ """Implementation of subdimensional expansion using operator decomposition instead of vanilla A*, with better graph abstraction. All coordinates are to be tuples and all collision sets are to be lists of immutable sets (frozenset). This partial rewrite will focus on converting everything that can be immutable into an immutable structure Intended to support both mstar and rMstar.""" from od_mstar3 import workspace_graph import sys import time as timer # So that we can use the time command in ipython from od_mstar3 import SortedCollection from od_mstar3.col_set_addition import add_col_set_recursive, add_col_set from od_mstar3.col_set_addition import effective_col_set from od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError, col_set_add try: import ipdb as pdb except ImportError: # Default to pdb import pdb MAX_COST = workspace_graph.MAX_COST PER_ROBOT_COST = 1 # Cost a robot accrues for not being at its goal position POSITION = 0 MOVE_TUPLE = 1 # Tuple of destination coordinate tuples for each robot's move global_move_list = [] # Used for visualization def find_path(obs_map, init_pos, goals, recursive=True, inflation=1.0, time_limit=5 * 60.0, astar=False, get_obj=False, connect_8=False, full_space=False, return_memory=False, flood_fill_policy=False, col_checker=None, epemstar=False, makespan=False, col_set_memory=True): """Finds a path in the specified obstacle environment from the initial position to the goal. obs_map - obstacle map, matrix with 0 for free, 1 for obstacle init_pos - ((x1, y1), (x2, y2), ...) coordinates of the initial state, should be tuples goals - ((x1, y1), (x2, y2), ...) coordinates of the goal should be tuples recursive - True for rM*, false for basic M* inflation - factor by which the metric will be inflated time_limit - how long to run before raising an error (declaring timeout) astar - use basic A* instead of operator decomposition to search the graph produced by M* (i.e. run M* not ODM*) get_obj - Return the Od_Mstar instance used in path planning, default False connect_8 - True (default) for 8 connected graph, False for 4 connected graph full_space - If True, run pure A* or OD (depending on the astar flag) instead of subdimensional expansion. Default False return_memory - Returns information on memory useage. Default False flood_fill_policy - compute policy with flood fill instead of resumable A* col_checker - Optional custom collision checker object, used for searching non-grid graphs. Default None epemstar - Use EPEA* to search the graph rather than A* or OD makespan - minimize makespan (time to solution), instead of minimizing time robots spend away from their robots col_set_memory - remember previous step collision set, intended to provide more efficient cached path utillization. True by default """ global global_move_list if (col_checker is None or isinstance(col_checker, workspace_graph.Edge_Checker)): goals = tuple(map(tuple, goals)) init_pos = tuple(map(tuple, init_pos)) global_move_list = [] o = Od_Mstar(obs_map, goals, recursive=recursive, inflation=inflation, astar=astar, connect_8=connect_8, full_space=full_space, flood_fill_policy=flood_fill_policy, col_checker=col_checker, epeastar=epemstar, makespan=makespan, col_set_memory=col_set_memory) # Need to make sure that the recursion limit is great enough to # actually construct the path longest = max([o.sub_search[(i, )].get_cost(init_pos[i]) for i in range(len(init_pos))]) # Guess that the longest path will not be any longer than 5 times the # longest individual robot path sys.setrecursionlimit(max(sys.getrecursionlimit(), longest * 5 * len(init_pos))) path = o.find_path(init_pos, time_limit=time_limit) num_nodes = o.get_memory_useage(False) corrected_mem = o.get_memory_useage(True) if get_obj: return path, o if return_memory: return path, num_nodes, corrected_mem return path class Od_Mstar(object): """Implements M* and rM* using operator decomposition instead of basic M* as the base computation. """ def __init__(self, obs_map, goals, recursive, sub_search=None, col_checker=None, rob_id=None, inflation=1.0, end_time=10 ** 15, connect_8=False, astar=False, full_space=False, flood_fill_policy=False, epeastar=False, offset_increment=1, makespan=False, col_set_memory=False): """ obs_map - obstacle map, matrix with 0 for free, 1 for obstacle goals - ((x1, y1), (x2, y2), ...) coordinates of the goal, should be tuples recursive - True for rM*, false for basic M* sub_search - Sub planners, should be None for the full configuration space col_checker - object to handle robot-robot collision checking. Should implement the same interface as workspace_graph.Edge_Checker rob_id - maps local robot identity to full configuration space identity, should be None for the full configuration space instance inflation - how much the metric should be inflated by end_time - when the search should be terminated connect_8 - True for 8 connected graph, False for 4 connected graph astar - use basic A* instead of operator decomposition full_space - whether to perform a full configuration space search flood_fill_policy - compute policy with flood fill instead of resumable A* epeastar - Uses EPEA* instead of OD or A* for graph search offset_increment - how much to increase the EPEA* offset after every expansion makespan - minimize makespan (time to solution), instead of minimizing time robots spend away from their robots col_set_memory - remember previous step collision set, intended to provide more efficient cached path utillization. False by default """ # visualize - turn on visualization code - DISABLED self.obs_map = obs_map self.recursive = recursive self.sub_search = sub_search # Stores the global ids of the robots in order of their position # in coord self.rob_id = rob_id self.goals = goals # Graph that holds the graph representing the joint configuration space self.graph = {} self.end_time = end_time self.inflation = float(inflation) self.connect_8 = connect_8 self.astar = astar self.epeastar = epeastar self.offset_increment = offset_increment self._makespan = makespan # Store some useful values self.updated = 0 self.num_bots = len(goals) # self.visualize = visualize self.full_space = full_space # Need a different key incorporating the offset for EPEM* if self.epeastar: self.open_list_key = lambda x: (-x.cost - x.h * self.inflation - x.offset) else: self.open_list_key = lambda x: -x.cost - x.h * self.inflation if self.rob_id is None: self.rob_id = tuple(range(len(goals))) self.col_checker = col_checker if self.col_checker is None: self.col_checker = workspace_graph.Edge_Checker() self.flood_fill_policy = flood_fill_policy # Making everything that can be immutable, immutable self._col_set_memory = col_set_memory self.gen_policy_planners(sub_search, self.obs_map, self.goals) def gen_policy_planners(self, sub_search, obs_map, goals): """Creates the sub-planners and necessary policy keys. This is because pretty much every sub-planner I've made requires adjusting the graph used to create the policies and passing around dummy sub_searches side effects to generate self.sub_search and self.policy_keys """ self.policy_keys = tuple([(i, ) for i in self.rob_id]) self.sub_search = sub_search if self.sub_search is None: self.sub_search = {} # Wrapping the robot number in a tuple so we can use the same # structure for planners for compound robots if self.flood_fill_policy: for dex, key in enumerate(self.policy_keys): self.sub_search[key] = workspace_graph.Workspace_Graph( obs_map, goals[dex], connect_8=self.connect_8) else: for dex, key in enumerate(self.policy_keys): self.sub_search[key] = workspace_graph.Astar_Graph( obs_map, goals[dex], connect_8=self.connect_8, makespan=self._makespan) def get_graph_size(self, correct_for_size=True): """Returns the number of nodes in the current graph""" if correct_for_size: return len(self.graph) * len(self.rob_id) return len(self.graph) def get_memory_useage(self, correct_for_size=True): """Returns the total number of nodes allocated in this planner and any subplanners. """ temp_sum = self.get_graph_size(correct_for_size) for i in self.sub_search: temp_sum += self.sub_search[i].get_graph_size() return temp_sum def reset(self): """resets the map for later searches, does not remove forwards_pointer """ self.updated += 1 def heuristic(self, coord, standard_node): """Returns the heuristic value of the specified coordinate. Does not handle inflation naturally so we can update the heuristic properly coord - coordinate of the node at which to compute the heuristic standard_node - whether this is a standard node """ if standard_node: cost = sum(self.sub_search[key].get_cost(coord[dex]) for dex, key in enumerate(self.policy_keys)) # return self.inflation * cost return cost else: # Compute heuristic for robots which have moved cost = sum(self.sub_search[key].get_cost(coord[MOVE_TUPLE][dex]) for dex, key in enumerate( self.policy_keys[:len(coord[MOVE_TUPLE])])) # compute heuristic for robots which have not moved cost += sum(self.sub_search[key].get_cost( coord[POSITION][dex + len(coord[MOVE_TUPLE])]) for dex, key in enumerate(self.policy_keys[len(coord[ MOVE_TUPLE]):])) return cost def pass_through(self, coord1, coord2): """Tests for a collision during transition from coord 1 to coord 2. coord1, coord2 - joint coordinates of multirobot system returns: collision set for the edge, empty list if there are no collisions """ # return self.col_checker.pass_through(coord1, coord2, self.recursive) return self.col_checker.cross_over(coord1, coord2, self.recursive) def incremental_col_check(self, start_coord, new_coord): """Performs an incremental collision check for new coord. Assumes that the position of a single new robot has been added to a list of coordinates of robots. Checks whether adding this new robot will lead to a collision. Start coord is the joint state before the action being built in new_coord, and may contain more robots than new_coord. counts on the implementation of the incremental collision checks to be intelligent to avoid issues start_coord - coordinate at which the system starts new_coord - coordinate to which the system moves returns: collision_set formed form the colliding robots during the move """ col_set = self.col_checker.incremental_cross_over( start_coord, new_coord, self.recursive) if col_set: return col_set return self.col_checker.incremental_col_check( new_coord, self.recursive) def get_node(self, coord, standard_node): """Returns the node at the specified coordinates. Remember intermediate nodes are of the form (base_coord, move_tuple) coord - coordinates of the node, potentially an intermediate node standard_node - whether this is a standard node or an intermediate node """ if coord in self.graph: # Node already exists. reset if necessary t_node = self.graph[coord] t_node.reset(self.updated) return t_node # Need to instantiate the node if standard_node: col = self.col_checker.col_check(coord, self.recursive) else: # Only check for collisions between robots whose move has # been determined col = self.col_checker.col_check(coord[MOVE_TUPLE], self.recursive) free = (len(col) == 0) t_node = mstar_node(coord, free, self.recursive, standard_node) # Cache the resultant col_set t_node.col_set = col t_node.updated = self.updated t_node.h = self.heuristic(coord, standard_node) # Add the node to the graph self.graph[coord] = t_node return t_node def get_step(self, init_pos, standard_node=True): """Get the optimal step from init_pos. Computes the entire optimal path if necessary, but preferentially relying on the cached paths stored in mstar_node.forwards_ptr. init_pos - coordinate of the node to compute the step from standard_node - standard_node whether init_pos represents a standard node returns: coordinate of the optimal step towards the goal """ cur_node = self.get_node(init_pos, standard_node) temp = cur_node.get_step() if temp is not None: return temp # Use a zero time limit, so the end time will not be modified path = self.find_path(init_pos, time_limit=-1) return cur_node.get_step() def gen_init_nodes(self, init_pos): """Generate the initial search nodes. Potentially more than one node is generated, but in practice will usually just one will be generated init_pos - initial position returns: list of initial nodes """ first = self.get_node(init_pos, True) first.open = True first.cost = 0 first.back_ptr = first return [first] def find_path(self, init_pos, time_limit=5 * 60): """Finds a path from init_pos to the goal specified when self was instantiated. init_pos - ((x1, y1), (x2, y2), ...) coordinates of initial position time_limit - time allocated to find a solution. Will raise an exception if a path cannot be found within this time period """ self.reset() if time_limit > 0: self.end_time = timer.time() + time_limit # For replanning to work correctly, need to update the end # time for all subplanners. Otherwise, the end time of the # subplanners will never be updated, so if you make a query # more than the original time_limit seconds after the first # query to this object, you will always get a timeout, # regardless of the time limit used on the second query for planner in self.sub_search.values(): if hasattr(planner, 'end_time'): planner.end_time = self.end_time # Configure the goal node goal_node = self.get_node(self.goals, True) goal_node.forwards_ptr = goal_node # Use the negation of the cost, so SortedCollection will put the # lowest value item at the right of its internal list init_nodes = self.gen_init_nodes(init_pos) open_list = SortedCollection.SortedCollection(init_nodes, key=self.open_list_key) while len(open_list) > 0: if timer.time() > self.end_time: raise OutOfTimeError(timer.time()) node, consistent = open_list.consistent_pop() if not consistent: continue node.open = False if self.solution_condition(node): path = node.get_path() return tuple(path) self.expand(node, open_list) raise NoSolutionError() def solution_condition(self, node): """Checks whether we have finished finding a path when node has been reached Checks whether node.forwards_ptr indicates that a path to the goal has been found node - node to check for indicating a path to the goal returns: True if goal has been reached or a cached path to the goal has been reached, else False """ if node.forwards_ptr is not None: return True return False def expand(self, node, open_list): """Handles the expansion of the given node and the addition of its neighbors to the open list node - node to expand open_list - open list used during the search """ node.closed = True # ASSUMES THAT get_neighbors HANDLES UPDATING NEIGHBOR COST, # AND DOES NOT RETURN NEIGHBORS FOR WHICH THERE IS ALREADY A # PATH AT LEAST AS GOOD if self.recursive: neighbors, col_set = self.get_neighbors_recursive(node) else: neighbors, col_set = self.get_neighbors_nonrecursive(node) # node is the only element in the backpropagation sets of # neighbors that has changed, so we can backpropagate from here old_col_set = node.col_set if not self.full_space: node.back_prop_col_set(col_set, open_list, epeastar=self.epeastar) for i in neighbors: i.back_ptr = node # Even if the node is already in the open list, removing if # from its old position (given by the old cost value) is too # expensive, requiring an O(N) operation to delete. Simply # add the new value and reject the old copy (which will be # marked as closed), when you come to it i.open = True open_list.insert_right(i) if self.epeastar: # if running epeastar if old_col_set == node.col_set: # If the collision set changed, then adding the node # back to the open list with properly updated collision # set has been handled by the backprop function node.offset += self.offset_increment open_list.insert(node) def od_mstar_neighbors(self, node): """Generates the free neighbors of the given node for the non-recursive case, using operator decomposition Also returns the associated collision set due to neighbors which are non-free due to robot-robot collisions. Only returns nodes which can be most cheaply reached through node node - node to determine neighbors returns: (neighbors, col_set) neighbors - collision free neighbors which can most efficiently be reached from node col_set - collision set for neighbors which are not collision free """ col_set = () if not node.free: # Can't have an out neighbor for a node in collision return col_set, node.col_set rob_dex = 0 # Keeps track of the robot to move in this step # split the coordinates into the start coordinate and the move # list if the node is standard, doing this so variables are # initialized in the preferred namespace, which is probably not # necessary move_list = () start_coord = node.coord if not node.standard_node: start_coord = node.coord[POSITION] move_list = node.coord[MOVE_TUPLE] rob_dex = len(node.coord[MOVE_TUPLE]) if ((len(node.col_set) > 0 and rob_dex in node.col_set[0]) or self.full_space): # This robot is in the collision set, so consider all # possible neighbors neighbors = self.sub_search[ self.policy_keys[rob_dex]].get_neighbors(start_coord[rob_dex]) else: neighbors = [self.sub_search[self.policy_keys[rob_dex]].get_step( start_coord[rob_dex])] # check if this is the last robot to be moved filled = (rob_dex == (self.num_bots - 1)) new_neighbors = [] # visualize_holder = [] for i in neighbors: # Generate the move list with the new robot position new_moves = list(move_list) new_moves.append(i) new_moves = tuple(new_moves) # Check for collisions in the transition to the new # position, only need to consider the robots in the move list # pass through pass_col = self.pass_through(start_coord[:rob_dex + 1], new_moves) if len(pass_col) > 0: # Have robot-robot collisions col_set = col_set_add(pass_col, col_set, self.recursive) continue # Need to branch on whether we have filled the move list if filled: # Generate a standard node. Static collisions are found # in self.get_node() new_node = self.get_node(new_moves, True) else: # Generate an intermediate node new_node = self.get_node((start_coord, new_moves), False) if node not in new_node.back_prop_set: new_node.back_prop_set.append(node) # Always need to add the col_set of any vertex that we can # actually reach, as otherwise, we would need to wait for # another robot to collide downstream of the reached vertex # before that vertex would back propagate its col_set col_set = col_set_add(new_node.col_set, col_set, self.recursive) if not new_node.free: continue # Skip if closed if new_node.closed: continue # Handle costs, which depends soely on the move list, # function to allow for alternate cost functions temp_cost = self.od_mstar_transition_cost(start_coord, node.cost, i, rob_dex) if temp_cost >= new_node.cost: continue new_node.cost = temp_cost new_neighbors.append(new_node) # Set the intermediate nod's col_set equal to its parent, # so later elements will actually be explored. Not # technically required but will cut back on thrashing if not new_node.standard_node: new_node.add_col_set(node.col_set) return new_neighbors, col_set def od_mstar_transition_cost(self, start_coord, prev_cost, neighbor, rob_dex): """Computes the transition cost for a single robot in od_mstar neighbor generation start_coord - base position of robots (prior to move assignment) prev_cost - cost of base node neighbor - proposed move assignmetn rob_dex - robot move is assigned to returns: cost of a single robot transitioning state """ prev_cost += self.sub_search[self.policy_keys[rob_dex]].get_edge_cost( start_coord[rob_dex], neighbor) return prev_cost def gen_epeastar_coords(self, node): """Helper function for generating neighbors of a node using EPEA* Uses a two step process. First the incremental costs are computed, then the neighbors fitting those incremental costs. More directly matches what was done in the EPEA* paper. Performs incremental collision checking during the generation of neighbors, to prune out as many invalid nodes as early as possible node - node for which to generate neighbors """ adder = add_col_set if self.recursive: adder = add_col_set_recursive offset = node.offset coord = node.coord if len(node.col_set) == 0: # have empty collision set new_coord = tuple( self.sub_search[self.policy_keys[dex]].get_step( coord[dex]) for dex in range(self.num_bots)) pass_col = self.pass_through(coord, new_coord) if pass_col: return [], pass_col col = self.col_checker.col_check(new_coord, self.recursive) if col: return [], col return [new_coord], [] search_list = [(0, ())] assert len(node.col_set) == 1 node_col = node.col_set[0] for rob_dex in range(self.num_bots): if rob_dex in node_col: offsets = self.sub_search[ self.policy_keys[rob_dex]].get_offsets(coord[rob_dex]) else: offsets = (0, ) new_list = [] for cost, pos in search_list: for off in offsets: if rob_dex < self.num_bots - 1: if off + cost <= offset: new_list.append((off + cost, pos + (off, ))) elif off + cost == offset: # For the last robot, only want to keep costs which # match perfectly new_list.append((off + cost, pos + (off, ))) search_list = new_list neighbors = [] col_set = [] for offset, costs in search_list: gen_list = [()] for dex, c in enumerate(costs): if dex in node_col: neib = (self.sub_search[ self.policy_keys[dex]].get_offset_neighbors( coord[dex], c)) else: neib = ((0, self.sub_search[ self.policy_keys[dex]].get_step(coord[dex])),) new_list = [] for _, n in neib: for old in gen_list: new_coord = old + (n, ) # Perform collision checking tcol = self.incremental_col_check(coord, new_coord) if tcol: col_set = adder(col_set, tcol) continue new_list.append(new_coord) gen_list = new_list neighbors.extend(gen_list) return neighbors, col_set def get_epeastar_neighbors(self, node): """Generates the free neighbors of the given node for the non-recursive case. Also returns the associated collision set due to neighbors which are non-free due to robot-robot collisions. Only returns nodes which can be most cheaply reached through node node - node to be expanded returns: (neighbors, col_set) neighbors - neighbors that can most be efficiently reached from node, that are collision free col_set - collisions incurred when trying to reach non-collision free nodes """ if not node.free: # Can't have an out neighbor for a node in collision return [], node.col_set start_coord = node.coord neighbor_coords, col_set = self.gen_epeastar_coords(node) neighbors = [] for i in neighbor_coords: new_node = self.get_node(i, True) if node not in new_node.back_prop_set: new_node.back_prop_set.append(node) if not new_node.free: continue # update costs if new_node.closed: continue t_cost = self.epeastar_transition_cost(start_coord, node.cost, i) if t_cost < new_node.cost: new_node.cost = t_cost new_node.back_ptr = node neighbors.append(new_node) return neighbors, col_set def epeastar_transition_cost(self, start_coord, prev_cost, new_coord): """Computes the cost of a new node at the specified coordinates, starting from the given position and cost start_coord - node at which the system starts prev_cost - cost of the node at start_coord new_coord - destination node """ for dex, (source, target) in enumerate(zip(start_coord, new_coord)): prev_cost += self.sub_search[self.policy_keys[dex]].get_edge_cost( source, target) return prev_cost def get_neighbors_nonrecursive(self, node): """Generates neighbors using a non-recursive method. Note that collision sets will still be generated in the style specified by self.recursive node - node for which to generate neighbors """ if self.astar: return self.get_astar_neighbors(node) elif self.epeastar: return self.get_epeastar_neighbors(node) return self.od_mstar_neighbors(node) def create_sub_search(self, new_goals, rob_id): """Creates a new instance of a subsearch for recursive search new_goals - goals for the subset of the robots rob_ids - ids of the robots involved in the subsearch returns: new OD_Mstar instance to perform search for the specified subset of robots""" return Od_Mstar(self.obs_map, new_goals, self.recursive, sub_search=self.sub_search, col_checker=self.col_checker, rob_id=rob_id, inflation=self.inflation, end_time=self.end_time, connect_8=self.connect_8, astar=self.astar, full_space=self.full_space, epeastar=self.epeastar, makespan=self._makespan, col_set_memory=self._col_set_memory) def get_subplanner_keys(self, col_set): """Returns keys to subplanners required for planning for some subset of robots. col_set - collision set to be solved returns: keys for the necessary subplanners in self.sub_search """ # Convert the collision sets into the global indicies, and # convert to tuples. Assumes self.rob_id is sorted global_col = list(map(lambda y: tuple(map(lambda x: self.rob_id[x], y)), col_set)) # generate the sub planners, if necessary for dex, gc in enumerate(global_col): if gc not in self.sub_search: t_goals = tuple([self.goals[k] for k in col_set[dex]]) self.sub_search[gc] = self.create_sub_search(t_goals, gc) return global_col def get_neighbors_recursive(self, node): """Get the neighbors of node for recursive M*. Uses operator decomposition style expansion when necessary, may fail when called on an intermediate node node - node for which to generate neighbors returns: (neighbors, col_set) neighbors - list of coordinates for neighboring, reachable nodes col_set - collisions generated by trying to transition to non-reachable neighbors """ # Handle collision set memory if necessary # use_memory = False if self._col_set_memory: col_set = effective_col_set(node.col_set, node.prev_col_set) effective_set = col_set # if set(col_set) != set(node.col_set): # # using memory # use_memory = True # Sort the collision set, which also converts them into # lists col_set = list(map(sorted, col_set)) else: # Sort the collision set, which also converts them into lists col_set = list(map(sorted, node.col_set)) # Use standard operator decomposition, if appropriate if len(col_set) == 1 and len(col_set[0]) == self.num_bots: # At base of recursion case return self.get_neighbors_nonrecursive(node) start_coord = node.coord if not node.standard_node: assert False # Generate subplanners for new coupled groups of robots and get # their sub_search keys coupled_keys = self.get_subplanner_keys(col_set) # Generate the individually optimal step new_coord = [self.sub_search[self.policy_keys[i]].get_step( start_coord[i]) for i in range(self.num_bots)] # Iterate over the colliding sets of robots, and integrate the # results of the sup planning for each set for i in range(len(col_set)): # if use_memory and frozenset(col_set[i]) in node.prev_col_set: # assert self.sub_search[ # coupled_keys[i]].graph[ # tuple([start_coord[j] # for j in col_set[i]])].forwards_ptr != None try: new_step = self.sub_search[coupled_keys[i]].get_step( tuple([start_coord[j] for j in col_set[i]])) except NoSolutionError: # Can't get to the goal from here return [], [] # Copy the step into position for j in range(len(col_set[i])): new_coord[col_set[i][j]] = new_step[j] new_coord = tuple(new_coord) # process the neighbor pass_col = self.pass_through(start_coord, new_coord) if len(pass_col) > 0: # Have collisions before reaching node return [], pass_col new_node = self.get_node(new_coord, True) if node not in new_node.back_prop_set: new_node.back_prop_set.append(node) if not new_node.free: return [], new_node.col_set # Skip if closed if new_node.closed: return [], new_node.col_set # Compute the costs. THIS MAY NOT WORK IF node IS AN INTERMEDIATE # NODE t_cost = self.get_node(start_coord, True).cost t_cost = self.od_rmstar_transition_cost(start_coord, t_cost, new_node.coord) if t_cost < new_node.cost: new_node.cost = t_cost if self._col_set_memory: new_node.prev_col_set = effective_set return [new_node], new_node.col_set return [], new_node.col_set def od_rmstar_transition_cost(self, start_coord, prev_cost, new_coord): """Computes the transition cost for a single robot in od_rmstar neighbor generation start_coord - base position of robots (prior to move assignment) prev_cost - cost of base node new_coord - proposed move assignmetn returns: total cost of reaching new_coord via start_coord """ for dex, (source, target) in enumerate(zip(start_coord, new_coord)): prev_cost += self.sub_search[self.policy_keys[dex]].get_edge_cost( source, target) return prev_cost def alt_get_astar_neighbors(self, node): """Gets neighbors of a specified node using the standard A* approach. assumes working with standard nodes node - node for which to generate neighbors returns: (neighbors, col_set) neighbors - coordinates of collision free neighboring nodes col_set - collisions resulting from trying to reach non-collision free neighbors """ start_coord = node.coord # Generate the individually optimal setp base_coord = [self.sub_search[self.policy_keys[i]].get_step( start_coord[i]) for i in range(self.num_bots)] old_coords = [base_coord] assert len(node.col_set) <= 1 to_explore = node.col_set if self.full_space: to_explore = [range(self.num_bots)] for i in to_explore: for bot in i: new_coords = [] neighbors = self.sub_search[self.policy_keys[bot]]\ .get_neighbors(start_coord[bot]) for neigh in neighbors: for k in old_coords: temp = k[:] temp[bot] = neigh new_coords.append(temp) old_coords = new_coords col_set = [] neighbors = [] old_coords = list(map(tuple, old_coords)) for i in old_coords: # Check if we can get there pass_col = self.pass_through(start_coord, i) if len(pass_col) > 0: col_set = col_set_add(pass_col, col_set, self.recursive) continue new_node = self.get_node(i, True) col_set = col_set_add(new_node.col_set, col_set, self.recursive) if node not in new_node.back_prop_set: new_node.back_prop_set.append(node) if not new_node.free: continue # update costs if new_node.closed: continue t_cost = node.cost for j in range(len(start_coord)): t_cost += self.sub_search[self.policy_keys[j]].get_edge_cost( start_coord[i], new_node.coord[j]) if t_cost < new_node.cost: new_node.cost = t_cost new_node.back_ptr = node neighbors.append(new_node) return neighbors, col_set def get_astar_neighbors(self, node): """Gets neighbors of a specified node using the standard A* approach, assumes working with standard nodes node - node for which to generate neighbors returns: (neighbors, col_set) neighbors - coordinates of collision free neighboring nodes col_set - collisions resulting from trying to reach non-collision free neighbors """ start_coord = node.coord # Generate the individually optimal setp base_coord = [self.sub_search[self.policy_keys[i]].get_step( start_coord[i]) for i in range(self.num_bots)] old_coords = [base_coord] assert len(node.col_set) <= 1 to_explore = node.col_set if self.full_space: to_explore = [range(self.num_bots)] for i in to_explore: for bot in i: new_coords = [] neighbors = self.sub_search[self.policy_keys[bot]]\ .get_neighbors(start_coord[bot]) for neigh in neighbors: for k in old_coords: temp = k[:] temp[bot] = neigh new_coords.append(temp) old_coords = new_coords col_set = [] neighbors = [] old_coords = list(map(tuple, old_coords)) for i in old_coords: # First check if this path is relevant. I.e. if there is already a # better path to the node, then the search will never try to use # that route, so we don't need to consider collisions new_node = self.get_node(i, True) if node.free: t_cost = node.cost for j in range(len(start_coord)): t_cost += self.sub_search[ self.policy_keys[j]].get_edge_cost(start_coord[j], i[j]) if t_cost >= new_node.cost: continue # Check if we can get there pass_col = self.pass_through(start_coord, i) if len(pass_col) > 0: col_set = col_set_add(pass_col, col_set, self.recursive) continue new_node = self.get_node(i, True) col_set = col_set_add(new_node.col_set, col_set, self.recursive) if node not in new_node.back_prop_set: new_node.back_prop_set.append(node) if not new_node.free: continue # update costs if new_node.closed: continue if t_cost < new_node.cost: new_node.cost = t_cost new_node.back_ptr = node neighbors.append(new_node) return neighbors, col_set class mstar_node(object): """Holds the data needed for a single node in operator decomposition m* coord should be a tuple of tuples. Standard nodes have coordinates of the form ((x1, y1), (x2, y2), ...), while intermediate nodes have coordinates of the form (((x1, y1), ...), move_tuple) """ __slots__ = ['free', 'coord', 'updated', 'open', 'closed', 'standard_node', 'h', 'cost', 'back_ptr', 'back_prop_set', 'col_set', 'recursive', 'forwards_ptr', 'assignment', 'colset_changed', 'offset', 'prev_col_set'] def __init__(self, coord, free, recursive, standard_node, back_ptr=None, forwards_ptr=None): """Constructor for mstar_node Assumes the col_set is empty by default coord - tuple giving coordinates, may store partial moves if not standard node free - part of the free configuration space standard_node - represents a standard node, and not a partial move back_ptr - pointer to best node to get to self forwards_ptr - pointer along the best path to the goal """ self.free = free self.coord = coord self.updated = -1 # Whether already in the open list self.open = False # Whether this has been expanded. Note that a node can be added # back to the open list after it has been expanded, but will # still be marked as closed. It cannot have its cost changed, # but it can add neighbors, but not be added as a neighbor self.closed = False self.standard_node = standard_node # Heuristic cost to go, None to ensure it will be properly # calculated self.h = None # Cost to reach self.cost = MAX_COST # Optimal way to reach this node. Point to self to indicate the # initial position self.back_ptr = back_ptr self.back_prop_set = [] # Ways found to reach this node self.col_set = () # store the collision set of back_ptr when the path from # back_ptr to self was first found. Used for hopefully more # efficient cached path access self.prev_col_set = () self.recursive = recursive # Keeps track of solutions that have already been found, # replace forwards_tree. Denote the goal node by pointing # forwards_ptr # to itself self.forwards_ptr = forwards_ptr self.assignment = None # Used for multiassignment mstar # Used to track whether new assignments need to be generated for # MURTY mstar self.colset_changed = False # Tracks current offset for multiple re-expansion a la EPEA* self.offset = 0 def reset(self, t): """Resets if t > last update time""" if t > self.updated: self.updated = t self.open = False self.closed = False self.cost = MAX_COST self.back_ptr = None self.back_prop_set = [] self.offset = 0 def get_path(self): """Gets the path passing through path to the goal, assumes that self is either the goal node, or a node connected to the goal node through forwards_pointers """ path = self.backtrack_path() return self.forwards_extend_path(path) def backtrack_path(self, path=None, prev=None): """Finds the path that leads up to this node, updating forwards_ptr so that we can recover this path quickly, only returns standard nodes path - current reconstructed path for use in recusion, must start as None prev - pointer to the last node visited by backtrack_path, used to update forwards_ptr to record the best paths to the goal """ if path is None: path = [] if prev is not None: self.forwards_ptr = prev if isinstance(self.h, tuple): # Needed for constrained od_mstar, and don't feel like # coming up with a better solution for now self.h = (prev.h[0] + prev.cost[0] - self.cost[0], self.h[1]) else: self.h = prev.h + (prev.cost - self.cost) if self.standard_node: assert self.coord not in path path.insert(0, self.coord) if self.back_ptr == self: # Done so that it cannot terminate on a node that wasn't # properly initialized return path return self.back_ptr.backtrack_path(path, self) def forwards_extend_path(self, path): """Extends the path from self to the goal node, following forwards pointers, only includes standard nodes path - current path to extend towards the goal, as list of joint configuration space coordinates """ if self.forwards_ptr == self: return path if self.forwards_ptr.standard_node: path.append(self.forwards_ptr.coord) return self.forwards_ptr.forwards_extend_path(path) def add_col_set(self, c): """Adds the contents of c to self.col_set. c - collision set to add to the current node's collision set returns: True if modifications were made, else False """ if len(c) == 0: return False if self.recursive: temp = add_col_set_recursive(c, self.col_set) else: temp = add_col_set(c, self.col_set) modified = (temp != self.col_set) if modified: self.col_set = temp return True return False def back_prop_col_set(self, new_col, open_list, epeastar=False): """Propagates the collision dependencies found by its children to the parent, which adds any new dependencies to this col_set new_col - the new collision set to add open_list - the open list to which nodes with changed collisoin sets are added, assumed to be a SortedCollection """ further = self.add_col_set(new_col) if further: self.colset_changed = True if not self.open: # assert self.closed self.open = True # self.closed = False self.offset = 0 # Inserting to the left of any node with the same key # value, to encourage exploring closer to the collison open_list.insert(self) elif epeastar and self.offset != 0: # Need to reset the offset and reinsert to allow a path # to be found even if the node is already in the open # list self.offset = 0 # Inserting to the left of any node with the same key # value, to encourage exploring closer to the collison open_list.insert(self) for j in self.back_prop_set: j.back_prop_col_set(self.col_set, open_list, epeastar=epeastar) def get_step(self): """Returns the coordinate of the next standard node in the path, returns: None if no such thing """ if self.forwards_ptr is None: return None if self.forwards_ptr.standard_node: return self.forwards_ptr.coord else: return self.forwards_ptr.get_step() def individually_optimal_paths(obs_map, init_pos, goals): """Returns the individually optimal paths for a system""" path = [] for i in range(len(init_pos)): path.append(find_path(obs_map, [init_pos[i]], [goals[i]])) # Need to convert to full space max_length = max(list(map(len, path))) for i in path: while len(i) < max_length: i.append(i[-1]) jpath = [] for i in range(max_length): temp = [] for j in path: temp.append(j[i][0]) jpath.append(temp) return jpath def find_path_limited_graph(obs_map, init_pos, goals, recursive=True, inflation=1.0, time_limit=5 * 60.0, astar=False, get_obj=False, connect_8=False, full_space=False, return_memory=False, flood_fill_policy=False, pruning_passes=5): global global_move_list global_move_list = [] o = Od_Mstar(obs_map, goals, recursive=recursive, inflation=inflation, astar=astar, connect_8=connect_8, full_space=full_space, flood_fill_policy=flood_fill_policy) import prune_graph G = prune_graph.to_networkx_graph(obs_map) for i in range(pruning_passes): G = prune_graph.prune_opposing_edge(G, num_edges=5) # Replace the individual policies with limited graphs for i in range(len(o.goals)): o.sub_search[(i, )] = workspace_graph.Networkx_Graph( obs_map, goals[i], graph=G, connect_8=connect_8) # Need to make sure that the recursion limit is great enough to # actually construct the path longest = max([o.sub_search[(i, )].get_cost(init_pos[i]) for i in range(len(init_pos))]) # Guess that the longest path will not be any longer than 5 times the # longest individual robot path sys.setrecursionlimit(max(sys.getrecursionlimit(), longest * 5 * len(init_pos))) path = o.find_path(init_pos, time_limit=time_limit) num_nodes = o.get_memory_useage(False) corrected_mem = o.get_memory_useage(True) if get_obj: return path, o # if visualize: # return path, global_move_list if return_memory: return path, num_nodes, corrected_mem return path ================================================ FILE: od_mstar3/od_vertex.hpp ================================================ #ifndef MSTAR_OD_VERTEX_H #define MSTAR_OD_VERTEX_H #include #include "mstar_type_defs.hpp" namespace mstar{ struct OdVertex{ OdCoord coord; ColSet col_set, gen_set; // Collision set and generating collision set int updated; // last planning iteration used bool closed, open; double cost, h; OdVertex* back_ptr; // optimal way to reach this std::set back_prop_set; // all explored ways to reach this OdVertex* forwards_ptr; // way to goal from this OdVertex(OdCoord coord): coord(coord), col_set(), updated(0), closed(false), open(false), cost(std::numeric_limits::max()), h(), back_ptr(nullptr), back_prop_set(), forwards_ptr(nullptr) {}; bool operator>=(const OdVertex &other) const{ return cost + h >= other.cost + other.h; } bool operator>(const OdVertex &other) const{ return cost + h > other.cost + other.h; } bool operator<=(const OdVertex &other) const{ return cost + h <= other.cost + other.h; } bool operator<(const OdVertex &other) const{ return cost + h < other.cost + other.h; } /** * Resets a vertex used in a previous planning iteration * * @param t Current planning iteration */ void reset(int t){ if (t > updated){ updated = t; open = false; closed = false; cost = std::numeric_limits::max(); back_ptr = nullptr; back_prop_set = std::set(); } } }; } #endif ================================================ FILE: od_mstar3/policy.cpp ================================================ #include #include #include "policy.hpp" using namespace mstar; Policy::Policy(const Graph &g, const RobCoord goal){ g_ = g; costs_ = std::vector(num_vertices(g_)); predecessors_.resize(boost::num_vertices(g_)); boost::dijkstra_shortest_paths( boost::make_reverse_graph(g_), goal, boost::predecessor_map(&predecessors_[0]).distance_map(&costs_[0])); edge_weight_map_ = boost::get(boost::edge_weight_t(), g_); } double Policy::get_cost(RobCoord coord){ return costs_[coord]; } double Policy::get_edge_cost(RobCoord u, RobCoord v){ // boost::edge returns pair return boost::get(edge_weight_map_, boost::edge(u, v, g_).first); } std::vector Policy::get_out_neighbors(RobCoord coord){ std::vector out; for (auto adj_verts = boost::adjacent_vertices(coord, g_); adj_verts.first != adj_verts.second; adj_verts.first++){ out.push_back(*(adj_verts.first)); } return out; } RobCoord Policy::get_step(RobCoord coord){ return predecessors_[coord]; } ================================================ FILE: od_mstar3/policy.hpp ================================================ #ifndef MSTAR_POLICY_H #define MSTAR_POLICY_H /**************************************************************************** * Provides a wrapper for the Boost graphs ***************************************************************************/ #include #include #include #include "mstar_type_defs.hpp" namespace mstar{ /** * Generates an individual policy for a robot to reach a specified goal */ class Policy{ private: Graph g_; // the boost graph this wraps std::vector costs_; // holds cost to goal from every configuration boost::property_map::type edge_weight_map_; std::vector predecessors_; public: /** * @param g The graph describing the workspace * @param goal The goal coordinate of the robot */ Policy(const Graph &g, const RobCoord goal); /** * Returns the cost-to-go from a vertex * @param vert Vertex to query cost from * * @return the cost to go until the goal is reached */ double get_cost(RobCoord coord); /** * Returns cost of traversing the edge (u, v) * * Does not check whether the edge exists * * @param u Source vertex of the edge * @param v Destination vertex of the dge * * @return the cost of the edge */ double get_edge_cost(RobCoord u, RobCoord v); /** * Returns the out-neighbors of a given coordinate * @param coord Vertex to get out neighbors of */ std::vector get_out_neighbors(RobCoord coord); /** * Returns the successor of the specified coordinate * * @param coord coordinate to compute the successor thereof * * @return coordinate of next step */ RobCoord get_step(RobCoord coord); }; } #endif ================================================ FILE: od_mstar3/prune_graph.py ================================================ from od_mstar3 import workspace_graph import networkx as nx #Python network analysis module def to_networkx_graph(obs_map): '''Reads in a standard obs_map list and converts it to a networkx digraph obs_map - list of lists, 0 for empty cell, 1 for obstacle''' #Create a workspace_graph object to generate neighbors g = workspace_graph.Astar_Graph(obs_map,[0,0]) G = nx.DiGraph() #Creates the graph object #Populate graph with nodes for x in range(len(obs_map)): for y in range(len(obs_map[x])): if obs_map[x][y] == 0: G.add_node((x,y)) #Add edges for i in G.nodes(): #Stored nodes by their coordinates in G for j in g.get_neighbors(i): G.add_edge(i,j) return G def prune_opposing_edge(G,num_edges=1): '''Reads in a networkx digraph and prunes the edge opposing the most between (i.e. edge on the most shortest path connections). If this edge doesn't have an opposing edge, or if the removal of said edge would reduce the connectivity of the space, the next most between edge is pruned instead. Since computing completeness can be expensive, allows multiple edges to be pruned before computing the impact of said prunning on completeness is computed''' #Get the current number of strongly connected components, can't decrease #without preventing some paths from being found num_components = nx.number_strongly_connected_components(G) pruned = 0 # print 'computing betweeness' betweenness = nx.edge_betweenness_centrality(G) # print 'betweenness computed' while pruned < num_edges: max_bet = max(betweenness.values()) if max_bet <= 0: #Set betweeness to -1 if can't prune, set to 0 not between return G edge = betweenness.keys()[betweenness.values().index(max_bet)] if not (edge[1],edge[0]) in G.edges(): #Already been pruned betweenness[edge] = -1 # print 'no edge' continue #Test if pruning the edge will break connectivity temp_graph = G.copy() temp_graph.remove_edge(edge[1],edge[0]) if num_components == nx.number_strongly_connected_components(temp_graph): #Can safely prune this edge G = temp_graph pruned+=1 betweenness[edge] = -1 betweenness.pop((edge[1],edge[0])) # print 'pruned' #Need to prevent further edges from being pruned from this vertex for neighbor in G.neighbors(edge[1]): betweenness[(edge[1],neighbor)] = -1 else: betweenness[edge] = -1 # print 'breaks con %s' %(str(edge)) return G ================================================ FILE: od_mstar3/setup.py ================================================ from distutils.core import setup, Extension from Cython.Build import cythonize setup(ext_modules = cythonize(Extension( "cpp_mstar", sources=["cython_od_mstar.pyx"], extra_compile_args=["-std=c++11"] ))) ================================================ FILE: od_mstar3/workspace_graph.py ================================================ """ workspace_graph.py This module defines all of the classes for the low-level graphs and policies used in Mstar. In general terms, these classes represent: 1. Graphs representing the configuration space. These graphs are structured so that each node in the graph represents a configuration, and each edge represents a permissible transition between two different configurations. *All of these graphs subclass the Graph_Interface class 2. Policies, which define paths in a configuration space from an initial configuration to a goal configuration. Policies are comprised of nodes, each of which represents a configuration in the configuration space. Each node in a policy has a pointer to its optimal neighbor, i.e., the next node in the optimal path to the goal node. Policy classes compute optimal paths by using some search algorithm to search the graphs generated in the classes described above. *All of these graphs subclass the Policy_Interface class There are specific implementations of policies and classes within this module. These are: 1. Grid_Graph and Grid_Graph_Conn_8: These subclass Graph_Interface and are used to represent simple configuration spaces in a 2-dimensional grid. Each point on the grid is delegated with either a zero or a one, to represent a free space or an object in that location, respectively. Grid_Graph specifies a configuration space with 4 connectivity; i.e., each robot can only go to the space immediately above its current position, below its current position, or to the left or right of its current position. Grid_Graph_Conn_8 specifies a configuration space with all the moves described in Grid_Graph, but with additional options options of moving diagonally. 2. Flood_Fill_Policy: This subclasses Policy_Interface and generates an optimal path to a goal configuration by using a flood fill. This method of policy generation relies on a series of pointers between nodes to generate a policy. It starts with the goal node on an open list. At each step, the algorithm pops a node off of the open list and calculates its neighbors, appending them to the open list. It iterates through the generated neighbors and checks to see if they should point to the popped node, based on the popped node's cost and their own cost. If they should, their pointer is changed and cost is updated. Eventually, the algorithm finds the starting node, and an optimal policy has been generated. 2.1 To reduce the amount of code that has to be copied each time a new workspace is generated, actions that deal with the workspace itself (rather than the configuration graph) are passed into Flood_Fill_Policy as functions 3. Astar_Policy: This subclasses Policy_Interface and generates an optimal policy to a goal configuration by using the A* search algorithm. A* uses a Best-First Search approach to generate optimal paths in lower-order average time than flood fill. 3.1 To reduce code that needs to copied for each new workspace, a scheme similar to that described in 2.1 has also been implemented in Astar. 4. Priority_Graph: This subclasses Policy_Interface and generates an optimal policy to a goal configuration using an Astar_Policy graph. However, Priority_Graph also adds a time slot to each coordinate. This way, routes can be planned for time in addition to space. 5. Back_Priority_Graph: This subclasses Priority_Graph and generates an optimal policy to a goal configuration. Differs from Priority Graph in that time dynamics are configured for planning backwards in time. Finally, an Edge_Checker class is implemented in the bottom of this module. This class checks for collisions occurring when two robots attempt to move past each other. Module urrently assumes that all actions have equal cost (including diagonal vs non-diagonal move """ from od_mstar3.col_set_addition import add_col_set_recursive, add_col_set from od_mstar3.col_set_addition import NoSolutionError from od_mstar3 import SortedCollection from collections import defaultdict from functools import wraps try: import ipdb as pdb except ImportError: import pdb from od_mstar3 import interface import math # Define values delegated to free spaces and spaces with obstacles # in the matrix of the workspace descriptor FREE = 0 OBS = 1 # Actions for 4 connected graph CONNECTED_4 = ((0, 0), (1, 0), (0, 1), (-1, 0), (0, -1)) # Actions for 8 connected graph CONNECTED_8 = ((0, 0), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)) MAX_COST = 1000000 # DIAGONAL_COST, note that team policies imports this value as well #DIAGONAL_COST = 2 ** .5 DIAGONAL_COST = 1.4 class wrk_node(object): """Holds information about a node in a policy's graph Defines __slots__ to decrease program memory usage by allocating a fixed amount of space to wrk_node object, rather than a dictionary holding all attributes Public interface of instance variables defined below: coord - coordinate representing configuration corresponding to this node in the configuration space policy - coordinate of neighboring configuration which is optimal for the policy to get to the goal configuration opt_neighbors - list of all neighbors which lead to paths that are considered optimal by the policy (more than one path can be optimal) h - heuristic cost of configuration specified by coord closed, open - specify when a policy is finalized iteration - current step of policy """ __slots__ = ['coord', 'policy', 'opt_neighbors', 'cost', 'h', 'closed', 'iteration', 'open'] def __init__(self, coord): """Initialization function for nodes of astar policy graph. coord - coordinate of configuration which wrk_node represents in astar_policy graph """ self.coord = coord # Want to store as tuples self.policy = None # Holds coordinate of next neighbor to visit # Holds all optimal neighbors, intended to make replanning the # policy to find an optimal, collision avoiding path easier self.opt_neighbors = [] # currently only generated by _road_rules, # also used to store neighbor offsets for EPEA* self.cost = MAX_COST # Cost to goal # Used for extension easier to running resumable A* search self.h = 0 # Used to determine when a policy is finalized self.closed = False self.iteration = -1 self.open = False # Simple memoization decorator, can be used for any function # Although no code in this module has been effectively sped up with this # decorator yet, this will hopefully be useful with more complex graphs # and configuration spaces in the future def memoize(f): memo = {} @wraps(f) def inner(*args, **kwargs): try: return memo[args] except KeyError: z = memo[args] = f(*args) return z return inner def node_cmp(n1, n2): """ Sort nodes by cost """ if n1.cost < n2.cost: return -1 elif n1.cost > n2.cost: return 1 # Returning 0 allows for stable sorting, i.e. equal objects stay in # the same order, which should provide a bit of a performance boost, # as well as a bit of consistency return 0 class Networkx_DiGraph(interface.Graph_Interface): """Simple wrapper for networkx graphs, in particular, supports digraphs. Requires a modified policy which can account for DiGraphs, because the forward and backword neighbors are not the same thing """ def __init__(self, graph): """graph - networkx.DiGraph specifying the configuration space. assumes, cost is stored in the cost parameter """ self.graph = graph def get_edge_cost(self, coord1, coord2): """Returns edge_cost of going from coord1 to coord2 coord1, coord2 - node identification returns: edge cost """ return self.graph[coord1][coord2]['cost'] def get_neighbors(self, coord): """Returns the out-neighbors of the specified node coord - identifier of the node to query returns: list of node identifiers of neighboring nodes """ return self.graph.neighbors(coord) def get_in_neighbors(self, coord): """Returns the in-neighbors of the specified node coord - identifier of the node to query returns: list of node identifiers of in-neighbors """ return [c[0] for c in self.graph.in_edges(coord)] class Grid_Graph(interface.Graph_Interface): """ Represents configuration space for grid workspace This graph serves to generate the configuration graph for a gridded workspace. This workspace must be 4-connected, so that a robot can go to the grid spaces located one space up, one space to the left, one space to the right, and one space down from its current coordinate. """ def __init__(self, world_descriptor, diagonal_cost=False): """Initialization for grid graph world_descriptor - Rectangular matrix, 0 for free cell, 1 for obstacle diagonal_cost - Boolean, apply 2**.5 for diagonal cost """ self.world_descriptor = world_descriptor self.width = len(world_descriptor) self.height = len(world_descriptor[0]) self.actions = CONNECTED_4 if diagonal_cost: self._diagonal_cost = DIAGONAL_COST else: self._diagonal_cost = 0 def get_edge_cost(self, coord1, coord2): """Retrieves config edge cost between two configurations Grid_Graph has a fixed edge cost of one, effectively optimizing make-span coord1 - coordinate of source vertex coord2 - coordinate of target vertex Returns edge_cost of going from coord1 to coord2. """ if (self._diagonal_cost and coord1[0] != coord2[0] and coord1[1] != coord2[1]): return self._diagonal_cost return 1 def get_neighbors(self, coord): """Returns collision free neighbors of the specified coordinate. coord - (x, y) coordinate of the node for which neighbors are being generated Return value in form of list of (x, y) tuples giving coordinates of neighbors, including self """ neighbors = [] min_cost = MAX_COST for i in self.actions: new_coord = (i[0] + coord[0], i[1] + coord[1]) # check if points to a coordinate in the graph if (new_coord[0] < 0 or new_coord[0] >= self.width or new_coord[1] < 0 or new_coord[1] >= self.height): continue if self.world_descriptor[new_coord[0]][new_coord[1]] == OBS: # Points to obstacle continue # Valid single robot action neighbors.append(new_coord) return neighbors def get_in_neighbors(self, coord): """Returns the collision free in-neighbors of the specified coordinate. Equivalent to get_neighbors, because the graph is undirected coord - (x, y) coordinate of vertex for which to return the in-neighbors Returns: List of coordinates of in-neighbors """ return self.get_neighbors(coord) class Grid_Graph_Conn_8(Grid_Graph): """ Configuration graph for gridded workspace with 8 connection This graph serves to generate the configuration graph for gridded workspace where each point in the grid has eight neighbors. """ def __init__(self, world_descriptor, diagonal_cost=False): """Initialization for grid graph with 8 connectivity world_descriptor - Rectangular matrix, 0 for free cell, 1 for obstacle """ super(Grid_Graph_Conn_8, self).__init__(world_descriptor, diagonal_cost=diagonal_cost) self.actions = CONNECTED_8 class GridGraphConn4WaitAtGoal(Grid_Graph): """Variant of workspace_graph.Grid_Graph that allows for the robot to wait at its goal with reduced cost Note: this can not be used directly for CMS to allow reduced waiting cost when a team is not ready to be formed, as cost should not be reduced when a team is ready to be formed. """ def __init__(self, world_descriptor, goal, wait_cost=.0, diagonal_cost=False): """Initialization for grid graph world_descriptor - Rectangular matrix, 0 for free cell, 1 for obstacle goal - goal of the robot wait_cost - cost to incur for waiting at the goal configuration diagonal_cost - incur DIAGONAL_COST for moving diagonally, 1 otherwise. included to support subclasses """ super(GridGraphConn4WaitAtGoal, self).__init__( world_descriptor, diagonal_cost=diagonal_cost) self._goal = goal self._wait_cost = wait_cost def get_edge_cost(self, coord1, coord2): """Retrieves edge cost between two configurations Waiting at the goal incurs cost wait_cost, while any other action incurs cost self._wait_cost coord1 - coordinate of source vertex coord2 - coordinate of target vertex Returns edge_cost of going from coord1 to coord2. """ if coord1 == self._goal and coord2 == self._goal: return self._wait_cost return super(GridGraphConn4WaitAtGoal, self).get_edge_cost(coord1, coord2) class GridGraphConn8WaitAtGoal(GridGraphConn4WaitAtGoal): """Variant of workspace_graph.Grid_Graph__con_8 that allows for the robot to wait at its goal with reduced cost Note: this can not be used directly for CMS to allow reduced waiting cost when a team is not ready to be formed, as cost should not be reduced when a team is ready to be formed. """ def __init__(self, world_descriptor, goal, wait_cost=.0, diagonal_cost=False): """Initialization for grid graph world_descriptor - Rectangular matrix, 0 for free cell, 1 for obstacle goal - goal of the robot wait_cost - cost to incur for waiting at the goal configuration diagonal_cost - incur DIAGONAL_COST for moving diagonally if True, incur 1 if False """ super(GridGraphConn8WaitAtGoal, self).__init__( world_descriptor, goal, wait_cost=wait_cost, diagonal_cost=diagonal_cost) self.actions = CONNECTED_8 def Workspace_Graph(world_descriptor, goal=None, connect_8=False, road_rules=True): """Wrapper function for returning Flood_Fill_Policy objects Function returns objects with different args depending on the connect_8 flag world_descriptor - two-dimensional matrix representing the space in which the robot can travel. A value of 1 in the space represents an obstacle, and a value of 0 represents an open space goal - position [x,y] of the goal of the policy connect_8 - boolean determining whether Grid_Graph or Grid_Graph_Conn_8 is used road_rules - boolean supplied to policy object to determine if rightmost neighbor node should always be used """ if connect_8: return Flood_Fill_Policy(world_descriptor, Grid_Graph_Conn_8, goal, road_rules) return Flood_Fill_Policy(world_descriptor, Grid_Graph, goal, road_rules) def compute_heuristic_conn_8(init_pos, coord): """Returns a heuristic for distance between coord and init_pos init_pos - coordinate of position of goal configuration coord - coordinate of configuration for which heuristic is being computed Returns the heuristic distance to goal """ return max(map(lambda x, y: abs(x - y), coord, init_pos)) def compute_heuristic_conn_8_diagonal(init_pos, coord): """Returns a heuristic for distance between coord and init_pos Used when moving diagonally costs DIAGONAL_COST instead of 1 init_pos - coordinate of position of goal configuration coord - coordinate of configuration for which heuristic is being computed Returns the heuristic distance to goal """ x_diff = abs(init_pos[0] - coord[0]) y_diff = abs(init_pos[1] - coord[1]) min_dist = min(x_diff, y_diff) max_dist = max(x_diff, y_diff) return DIAGONAL_COST * min_dist + (max_dist - min_dist) def compute_heuristic_conn_4(init_pos, coord): """Returns Manhattan heuristic for distance from coord to init_pos init_pos - coordinate of position of goal configuration coord - coordinate of configuration for which heursitic is being computed Returns the heuristic distance to goal through a Manhattan metric calculation. """ return sum(map(lambda x, y: abs(x - y), coord, init_pos)) def Astar_Graph(world_descriptor, goal=None, connect_8=False, diagonal_cost=False, makespan=False, wait_cost=0.): """Wrapper function for returning Astar_Policy objects Different heuristic functions are given to Astar_Policy object depending on whether the gridworld is 8 connected or not world_descriptor - two-dimensional matrix which describes the gridworld with obstacles. Each point in the matrix is either a zero (no obstacle) or a one (obstacle) goal - position (x, y) of the goal of the policy connect_8 - boolean determining whether each coordinate in the gridworld has eight neighbors (including all diagonal neighbors) or only four (cardinal neighbors) diagonal_cost - boolean, apply DIAGONAL_COST for diagonal costs if True, apply 1 if False makespan - minimize makespan instead of minimizing time wait_cost - cost of waiting at the goal """ if makespan: if connect_8: if diagonal_cost: h_func = compute_heuristic_conn_8_diagonal else: h_func = compute_heuristic_conn_8 return Astar_Policy( world_descriptor, lambda x: Grid_Graph_Conn_8(x, diagonal_cost=diagonal_cost), goal=goal, compute_heuristic=h_func) else: return Astar_Policy(world_descriptor, Grid_Graph, goal=goal, compute_heuristic=compute_heuristic_conn_4) if connect_8: if diagonal_cost: h_func = compute_heuristic_conn_8_diagonal else: h_func = compute_heuristic_conn_8 return Astar_Policy( world_descriptor, lambda x: GridGraphConn8WaitAtGoal(x, goal, wait_cost=wait_cost, diagonal_cost=diagonal_cost, ), goal, h_func) return Astar_Policy(world_descriptor, lambda x: GridGraphConn4WaitAtGoal( x, goal, wait_cost=wait_cost, diagonal_cost=diagonal_cost), goal, compute_heuristic_conn_4) class Astar_Policy(interface.Policy_Interface): """Class that implements Astar to search config space Uses resumable A* search instead of the flood fill used in workspace graph, as the optimal policy computation is dominating the time required for rM* when inflated. To avoid copying large amounts of code for each new workspace, all functions interacting with the workspace are passed into this class as arguments. """ def __init__(self, world_descriptor, config_graph, goal=None, compute_heuristic=compute_heuristic_conn_4): """Initialization function for Astar_Policy world_descriptor - two-dimensional matrix which describes the gridworld with obstacles. Each point in the matrix is either a zero (no obstacle) or a one (obstacle) config_graph - a callable that takes a single argument, the world descriptor, and returns an object that represents the configuration graph, which implements the methods defined by Graph_Interface goal - (x, y) target, optional, if not supplied, will not generate policy compute_heuristic - helper function used to calculate the heuristic distance to the goal. Passed in because it interacts with the workspace """ self.cspace = config_graph(world_descriptor) self.graph = {} self.iteration = 0 self.goal = goal self.init_pos = self.goal self.compute_heuristic = compute_heuristic self.goal_node = self._get_node(self.goal) # We implicitly assume a self loop by setting the goal node's # policy to be its own coordin self.goal_node.policy = self.goal_node.coord self.goal_node.cost = 0 self.goal_node.open = True self.open_list = SortedCollection.SortedCollection( [self.goal_node], key=lambda x: -x.cost - x.h) def _get_node(self, coord): """Returns node specified by coord In addition, updates its heursitic and iteration values. If no such node exists, it is created. coord - coordinate of node to return """ try: node = self.graph[coord] except KeyError: node = self.graph[coord] = wrk_node(coord) if self.iteration > node.iteration: node.iteration = self.iteration node.h = self.compute_heuristic(self.init_pos, coord) return node def _compute_path(self, coord): """Extends the search to reach the specified node coord - (x,y) coordinate of targeted configuration Tries to compute path from coord to goal. If successful, returns next coordinate in path to goal from coord. If not successful, raises an NoSolutionError. """ if self.init_pos == self.goal: self.init_pos = coord # First need to update the heuristic for nodes in the open # list # Only change the heuristic for the intial coordinate, when the # open list is empty, so don't actually have to resort the open # list # Open list may be empty if trying after trying to find paths to # two unreachable nodes. This will only be done my # multi_assignment_mstar while trying to compute the assignment # cost matrix. Besides which, this will trigger a # NoSolutionError in case such a situtation is not supposed to # be found assert len(self.open_list) > 0 while len(self.open_list) > 0: node = self.open_list.pop() if node.closed: continue node.closed = True node.open = False # Need to add the neighbors before checking if this is the # goal, so search can be resumed without being blocked by # this position neighbors = self.get_neighbors(node.coord) for i in neighbors: tnode = self._get_node(i) if (tnode.closed or tnode.cost <= node.cost + self.get_edge_cost(i, node.coord)): continue tnode.cost = node.cost + self.get_edge_cost( i, node.coord) tnode.policy = node.coord tnode.open = True # Can add tnode directly, and will just skip any # inconsistent copies self.open_list.insert_right(tnode) if node.coord == coord: # Done, so return the next step return node.policy raise NoSolutionError('Couldn\'t finish individual policy') def get_step(self, coord): """Gets the policy for the given coordinate If no policy exists, extends planning to reach the coordinate coord - (x, y) configuration Returns a coordinate of the next node in the policy """ node = self._get_node(coord) if node.closed: # Have already computed the optimal policy here return node.policy self.iteration += 1 try: return self._compute_path(coord) except NoSolutionError: # Couldn't find a path to goal, so return None return None def get_cost(self, coord): """Returns the cost of moving from given position to goal Cost is for moving from coordinate specified at coord to the goal configuration. coord - (x, y) configuration """ node = self._get_node(coord) if node.closed: return node.cost self.iteration += 1 self._compute_path(coord) assert node.closed return node.cost def get_edge_cost(self, coord1, coord2): """Returns cost of config transition from coord1 to coord2 Wrapper function for returning the config space's get_edge_cost from coord1 to coord2 coord1 - initial coordinate in transition coord2 - final coordinate in transition returns: edge cost of going from coord1 to coord2 """ return self.cspace.get_edge_cost(coord1, coord2) def _gen_limited_offset_neighbors(self, coord): """Stores the neighbors of a node by changes in f-value f-value - the sum of cost to reach and cost to go. coord - (x, y) configuration for which limited offset neighbors are generated """ # Repurposing a preexisting field, so need to change to a # defaultdict node = self._get_node(coord) node.opt_neighbors = defaultdict(lambda: []) base_cost = self.get_cost(coord) # Need to compute offsets for neib in self.get_neighbors(coord): # difference in path cost using different paths, need to # handle staying at the goal seperately if neib == self.goal and neib == coord: offset = 0 else: offset = self.get_cost(neib) - base_cost + 1 node.opt_neighbors[offset].append((offset, neib)) node.opt_neighbors = dict(node.opt_neighbors) def get_limited_offset_neighbors(self, coord, max_offset, min_offset=0): """Returns set of neighbors specified by the offsets More specifically, returns the set of neighbors for which the maximum difference in path cost if passed through is less than the specified value. (i.e. if you are forced to pass through coordinate x, instead of the optimal step, what is the difference in cost)? coord - coordinates of the node to find neighbors of max_offset - the maximum increase in path cost to encur in choice of neighbors min_offset - minimum increae in path cost to encur in a neighbor returns: a list of tuples of the form (offset, coordinate) """ node = self._get_node(coord) if not node.opt_neighbors: self._gen_limited_offset_neighbors(coord) # Have already pre-computed the results out = [] for offset, neighbors in node.opt_neighbors.iteritems(): if offset < min_offset: continue if offset > max_offset: return out out.extend(neighbors) return out def get_offset_neighbors(self, coord, offset): """Generates offset neighbors for node specified by coord If no offset neighbors exist, they are created Only offset neighbors at a certain offset are returned coord - (x,y) configuration for which neighbors are being generated offset - value of offset determing which neighbors are included in return value returns: list of tuples of form (offset, neighbor) """ node = self._get_node(coord) if not node.opt_neighbors: self._gen_limited_offset_neighbors(coord) return node.opt_neighbors[offset] def get_offsets(self, coord): """Return the possible offsets of the neighbors. The offset of a neighbor is the difference in the cost of the optimal path from coord to the cost of the best path constrained to pass through a specific neighbor. Used in EPEA* coord - (x,y) configuration for which neighbors are being generated and their offsets returned Returns list of offsets of all neighbor nodes to coord """ node = self._get_node(coord) if not node.opt_neighbors: self._gen_limited_offset_neighbors(coord) return node.opt_neighbors.keys() def get_neighbors(self, coord, opt=False): """Wrapper function for get_neighbors function of underlying config_space graph. opt - only optimal neighbors are returned coord - configuration for which neighbors are being returned Returns list of tuples, where each tuple is a coordinate """ neighbors = self.cspace.get_neighbors(coord) if not opt: return neighbors for i in neighbors: if opt: cost = self.get_cost(i) if cost < min_cost: min_cost = cost opt_neighbors = [] for i in neighbors: if self.get_cost(i) == min_cost: opt_neighbors.append(i) return opt_neighbors def get_graph_size(self, correct_for_size=True): """Gets the size of the graph correct_for_size - just intended to match signatures Returns the number of nodes used for this graph """ return sum(map(len, self.graph)) class Astar_DiGraph_Policy(Astar_Policy): """Class that implements Astar to search configuration spaces that are represented as a di graph Differs slightly from Astar_Policy in using the get_in_neighbors function when computing a policy, to explicitly plan back in time Uses resumable A* search instead of the flood fill used in workspace graph, as the optimal policy computation is dominating the time required for rM* when inflated. To avoid copying large amounts of code for each new workspace, all functions interacting with the workspace are passed into this class as arguments. """ def __init__(self, world_descriptor, config_graph, goal=None, compute_heuristic=compute_heuristic_conn_4): """Initialization function for Astar_Policy world_descriptor - two-dimensional matrix which describes the gridworld with obstacles. Each point in the matrix is either a zero (no obstacle) or a one (obstacle) config_graph - a class which is used to represent the config space of the robot goal - (x, y) target, optional, if not supplied, will not generate policy compute_heuristic - helper function used to calculate the heuristic distance to the goal. Passed in because it interacts with the workspace """ super(Astar_DiGraph_Policy, self).__init__( world_descriptor, config_graph, goal=goal, compute_heuristic=compute_heuristic) def _compute_path(self, coord): """Extends the search to reach the specified node Explicitly plans in reverse from the goal to the target, using get_in_neighbors to compute node expansion, instead of Astar_Graph, which uses the get_neighbors function. coord - (x,y) coordinate of targeted configuration Tries to compute path from coord to goal. If successful, returns next coordinate in path to goal from coord. If not successful, raises an NoSolutionError. """ if self.init_pos == self.goal: self.init_pos = coord # First need to update the heuristic for nodes in the open # list # Only change the heuristic for the intial coordinate, when the # open list is empty, so don't actually have to resort the open # list # Open list may be empty if trying after trying to find paths to # two unreachable nodes. This will only be done my # multi_assignment_mstar while trying to compute the assignment # cost matrix. Besides which, this will trigger a # NoSolutionError in case such a situtation is not supposed to # be found assert len(self.open_list) > 0 while len(self.open_list) > 0: node = self.open_list.pop() if node.closed: continue node.closed = True node.open = False # Need to add the neighbors before checking if this is the # goal, so search can be resumed without being blocked by # this position neighbors = self.get_in_neighbors(node.coord) for i in neighbors: tnode = self._get_node(i) if (tnode.closed or tnode.cost <= node.cost + self.get_edge_cost(i, node.coord)): continue tnode.cost = node.cost + self.get_edge_cost( i, node.coord) tnode.policy = node.coord tnode.open = True # Can add tnode directly, and will just skip any # inconsistent copies self.open_list.insert_right(tnode) if node.coord == coord: # Done, so return the next step return node.policy raise NoSolutionError('Couldn\'t finish individual policy') def get_in_neighbors(self, coord): """Wraper for the get_in_neighbors function of the underlying config_space graph coord - coordinate of whom the predecessors (in neighbors) are returned returns: list of coordinates of the predecessors of coord """ return self.cspace.get_in_neighbors(coord) class Priority_Graph(interface.Policy_Interface): """Simple wrapper for A* graph that uses priority planning. Adds/removes a time coordinate to allow for priority planning. Implemented this way to make Indpendence_Detection happier, as it makes use both of basic Astar_Policy and priority planners of various forms. This way, any work done by the Astar_Policy can be leveraged for the priority planner, and vice versa """ def __init__(self, astar_policy, max_t=None): """initialization for Priority_Graph astar_policy - the graph to wrap max_t - greatest t - value allowed """ self.astar_policy = astar_policy self.max_t = max_t def get_step(self, coord): """Gets the policy for the given coordinate, If necessary, extends planning to reach said coordinate. Will increment time by 1 coord - (x, y, t) position and time coordinate for the specified node """ # Can do this by stripping time, querrying the underlying # astar_policy, then appending the appropriate new time t = coord[-1] + 1 # Check if this would exceed maximal value if self.max_t is not None: t = min(self.max_t, t) step = self.astar_policy.get_step(coord[:2]) return step + (t, ) def get_cost(self, coord): """Gets cost of moving to goal from coord coord - (x, y, t) coordinates of node for which to get cost Returns cost of moving from the given position to goal """ return self.astar_policy.get_cost(coord[:2]) def set_max_t(self, max_t): """Sets the maximum time value the graph will use. Allows for easy changes for different constraints """ self.max_t = max_t def get_neighbors(self, coord): """Gets the neighbors of the specified space-time point coord - coordinate of configuration for which neighbors are being returned Returns neighbors of coord in config space, with a time stamp one greater than that of coord """ pos_neighbors = self.astar_policy.get_neighbors((coord[0], coord[1])) return map(lambda x: (x[0], x[1], min(self.max_t, coord[-1] + 1)), pos_neighbors) class Back_Priority_Graph(Priority_Graph): """Simple wrapper for A* graph which just adds/removes a time coordinate to allow for priority planning. Implemented this way to make Indpendence_Detection happier, as it makes use both of basic Astar_Policy and priority planners of various forms. This way, any work done by the Astar_Policy can be leveraged for the priority planner, and vice versa. Differs from Priority Graph in that time dynamics are configured for planning backwards in time. Need to query max_t in each instance, as multiple Constrained_Planners will make use of a single Back_Priority Graph, and no other planner should be using one """ def __init__(self, astar_policy, max_t=None, prune_paths=True): """ astar_policy - the graph to wrap max_t - greatest t-value allowed prune_paths - whether to prune neighbors that cannot reach the goal of astar_policy within the time specified. This is the default behavior. Disabling when running task swapping allows for paths to be found to multiple initial configurations """ Priority_Graph.__init__(self, astar_policy, max_t=max_t) self.prune_paths = prune_paths def get_neighbors(self, coord, max_t): """Gets the neighbors of the specified space-time point""" self.max_t = max_t if coord[-1] == 0 and self.max_t != 0: return [] pos_neighbors = self.astar_policy.get_neighbors((coord[0], coord[1])) if coord[-1] == self.max_t: neighbors = [] for pos in pos_neighbors: neighbors.append((pos[0], pos[1], self.max_t)) # Make sure that you can actually get form the initial # position to the suggested vertex in time if self.prune_paths: if (not self.max_t == 0 and self.astar_policy.get_cost(pos) <= coord[-1] - 1): neighbors.append((pos[0], pos[1], coord[-1] - 1)) else: # Don't check on whether there is time to reach the # intial configuration neighbors.append((pos[0], pos[1], coord[-1] - 1)) return neighbors if self.prune_paths: return [(x[0], x[1], coord[-1] - 1) for x in pos_neighbors if self.astar_policy.get_cost(x) <= coord[-1] - 1] else: return [(x[0], x[1], coord[-1] - 1) for x in pos_neighbors] def get_forwards_neighbors(self, coord, max_t): """Gets the forward time dynamics neighbors of this point""" self.max_t = max_t return Priority_Graph.get_neighbors(self, coord) def get_cost(self, coord, max_t): """Returns the cost of moving from given position to goal coord - (x, y, t) coordinates of node for which to get cost """ self.max_t = max_t return Priority_Graph.get_cost(self, coord) def get_step(self, coord, max_t): """Gets the policy for the given coordinate, extending planning to reach said coordinate if necessary. Will increment time by 1 coord - (x, y, t) position and time coordinate for the specified node """ self.max_t = max_t return Priority_Graph.get_step(self, coord) class Limited_Astar_Policy(Astar_Policy): """Uses resumable A* search instead of the flood fill used in workspace graph, as the optimal policy computation is dominating the time required for rM* when inflated. Also takes a networkx graph, called limit graph, which specifies the legal edges """ def __init__(self, world_descriptor, goal, limit_graph, connect_8=False): Astar_Policy.__init__(self, world_descriptor, goal, connect_8) self.limit_graph = limit_graph def get_neighbors(self, coord): """Returns the neighbors of the given coordinate in the limit graph """ return self.limit_graph.neighbors(coord) class Edge_Checker(interface.Planner_Edge_Checker): """Used to wrap edge checking so more complex graphs can be cleanly handled (may require keeping track of state for non-trivial graphs """ def __init__(self): """Takes no arguments, because on grid graph, only the coordinates matter """ pass def simple_pass_through(self, c1, c2): """Simply check for collisions, avoid the additional overhead for use with basic OD (op_decomp) c1 - coordinate at time t c2 - coordinate at time t + 1 returns: True if pass through collision, else false """ for i in range(len(c1)): for j in range(i + 1, len(c1)): if c1[i] == c2[j] and c1[j] == c2[i]: return True return False def simple_col_check(self, c1): """Checks for robot-robot collisions at c1, for use with basic OD (op_decomp) returns: True if collision exists """ for i in range(len(c1)): for j in range(i + 1, len(c1)): if c1[i] == c1[j]: return True return False def simple_cross_over(self, c1, c2): """Check for cross over collisions in 8-connected worlds returns: True if collision is detected """ for i in range(len(c1)): for j in range(i + 1, len(c1)): # compute displacement vector disp = [c1[i][0] - c1[j][0], c1[i][1] - c1[j][1]] if abs(disp[1]) > 1 or abs(disp[0]) > 1: continue # compute previous? displacement vector. Have a pass # through or cross over collision if the displacement # vector is the opposite if (disp[0] == -(c2[i][0] - c2[j][0]) and disp[1] == -(c2[i][1] - c2[j][1])): return True return False def simple_incremental_cross_over(self, c1, c2): """Check for cross over collisions in 8-connected worlds. Assumes that collision checking has been performed for everything but the last robot in the coordinates. To be used to save a bit of time for partial expansion approaches """ for i in range(len(c1) - 1): disp = [c1[i][0] - c1[-1][0], c1[i][1] - c1[-1][1]] if abs(disp[1]) > 1 or abs(disp[0]) > 1: continue # compute previous? displacement vector. Have a pass through # or cross over collision if the displacement vector is the # opposite if (disp[0] == -(c2[i][0] - c2[-1][0]) and disp[1] == -(c2[i][1] - c2[-1][1])): return True return False def simple_incremental_col_check(self, c1): """Checks for robot-robot collisions at c1, for use with basic OD (op_decomp) returns: True if collision exists """ for i in range(len(c1) - 1): if c1[i] == c1[-1]: return True return False def single_bot_outpath_check(self, cur_coord, prev_coord, cur_t, paths): """Tests for collisions when moving from prev_coord to cur_coord with the robots in paths. cur_coord - position of a single robot Returns: True if a collision is found, False otherwise """ if paths is None: return False prev_t = cur_t - 1 check_t = min(cur_t, len(paths) - 1) new_cols = 0 for bot in range(len(paths[0])): # Check for simultaneous occupation if (cur_coord[0] == paths[check_t][bot][0] and cur_coord[1] == paths[check_t][bot][1]): return True if cur_t >= len(paths): # Can't have edge collisions when out-group robots # aren't moving continue # Check for pass-through/cross over collisions disp = [prev_coord[0] - paths[prev_t][bot][0], prev_coord[1] - paths[prev_t][bot][1]] if abs(disp[1]) > 1 or abs(disp[0]) > 1: continue # Compute current displacement vector, and check for # inversion if (disp[0] == -(cur_coord[0] - paths[cur_t][bot][0]) and disp[1] == -(cur_coord[1] - paths[cur_t][bot][1])): return True return False def simple_prio_col_check(self, coord, t, paths, pcoord=None, conn_8=False): """Returns true, if collision is detected, false otherwise at the moment only used to check the obstacle collisions, but didn't want to reject the other code already coord - coord of potential new neighbor t - current time step paths - previously found paths pcoord - previous coordinate of the path """ if not isinstance(coord, tuple): coord = tuple(coord) if paths is not None: t = min(t, len(paths) - 1) # only one path if isinstance(paths[0][0], int): paths = map(lambda x: [x], paths) for bot in range(len(paths[t])): if not isinstance(paths[t][bot], tuple): paths[t][bot] = tuple(paths[t][bot]) # (a) simultaneous occupation of one node if coord == paths[t][bot]: return True # (b) pass through and cross over collision if pcoord is not None: if not isinstance(pcoord, tuple): pcoord = tuple(pcoord) if not isinstance(paths[t - 1][bot], tuple): paths[t - 1][bot] = tuple(paths[t - 1][bot]) if paths[t - 1][bot] == coord and paths[t][bot] == pcoord: return True # (c) cross over collision in case of conn_8 if conn_8: if self.single_bot_cross_over(paths[t][bot], paths[t - 1][bot], coord, pcoord): return True # No collision return False def col_check(self, c1, recursive): """Checks for collisions at a single point. Returns either a M* or rM* collision set in the form of sets, depending on the setting of recursive. """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive for i in range(len(c1) - 1): for j in range(i + 1, len(c1)): if c1[i] == c1[j]: col_set = adder([frozenset([i, j])], col_set) return col_set def incremental_col_check(self, c1, recursive): """Checks for collisions at a single point. Returns either a M* or rM* collision set in the form of sets, depending on the setting of recursive. Only checks whether the last robot is involved in a collision, for use with incremental methods """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive j = len(c1) - 1 for i in range(len(c1) - 1): if c1[i] == c1[j]: col_set = adder([frozenset((i, j))], col_set) return col_set def cross_over(self, c1, c2, recursive=False): """Detects cross over collisions as well as pass through collisions """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive for i in range(len(c1) - 1): for j in range(i + 1, len(c1)): # compute current displacement vector if c1[i] is None or c1[j] is None or c2[i] is None or c2[j] \ is None: continue disp = (c1[i][0] - c1[j][0], c1[i][1] - c1[j][1]) if abs(disp[1]) > 1 or abs(disp[0]) > 1: continue # Compute previous displacement vector. Have a cross over or # pass through collision if the two displacement vectors are # opposites # pdisp = [c2[i][0] - c2[j][0], c2[i][1] - c2[j][1]] if (disp[0] == -(c2[i][0] - c2[j][0]) and disp[1] == -(c2[i][1] - c2[j][1])): col_set = adder([frozenset([i, j])], col_set) return col_set def incremental_cross_over(self, c1, c2, recursive=False): """Detects cross over collisions as well as pass through collisions. Only checks if the last robot is involved in a collision, for use with partial expansion approaches. c1 - the initial configuration. c2 - the final configuration. c1 may include additional robots, if necessary """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive j = len(c2) - 1 for i in range(len(c2) - 1): # compute current displacement vector disp = (c1[i][0] - c1[j][0], c1[i][1] - c1[j][1]) if abs(disp[1]) > 1 or abs(disp[0]) > 1: continue # Compute previous displacement vector. Have a cross over or # pass through collision if the two displacement vectors are # opposites # pdisp = [c2[i][0] - c2[j][0], c2[i][1] - c2[j][1]] if (disp[0] == -(c2[i][0] - c2[j][0]) and disp[1] == -(c2[i][1] - c2[j][1])): col_set = adder([frozenset([i, j])], col_set) return col_set def pass_through(self, c1, c2, recursive=False): """returns a tuple of colliding robots, or set of tuples if recursive """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive for i in range(len(c1) - 1): for j in range(i + 1, len(c1)): if c1[i] == c2[j] and c1[j] == c2[i]: col_set = adder([frozenset((i, j))], col_set) return col_set def single_bot_cross_over(self, coord1, pcoord1, coord2, pcoord2): """Checks for cross-over and collisions between robots one and 2 moving from pcoord to coord """ disp = (pcoord1[0] - pcoord2[0], pcoord1[1] - pcoord2[1]) if abs(disp[1]) > 1 or abs(disp[0]) > 1: return False if (disp[0] == -(coord1[0] - coord2[0]) and disp[1] == -(coord1[1] - coord2[1])): return True return False def prio_col_check(self, coord, pcoord, t, paths=None, conn_8=False, recursive=False): """Collision checking with paths passed as constraints coord - current node pcoord - previous node t - timestep paths - paths that need to be avoided """ if not isinstance(coord, tuple): coord = tuple(coord) if not isinstance(pcoord, tuple): pcoord = tuple(pcoord) if paths is not None: col_set = [] adder = add_col_set if recursive: adder = add_col_set_recursive else: for i in range(len(coord)): for j in range(len(paths[t])): # simultaneous occupation if coord[i] == paths[t][j]: col_set = adder([frozenset([i])], col_set) return col_set # pass-through and cross-over disp = [pcoord[i][0] - paths[t - 1][j][0], pcoord[i][1] - paths[t - 1][j][1]] if abs(disp[1]) > 1 or abs(disp[0]) > 1: continue if (disp[0] == -(coord[i][0] - paths[t][j][0]) and disp[1] == -(coord[i][0] - paths[t][j][1])): col_set = adder([frozenset([i])], col_set) return col_set return None class NoRotationChecker(interface.Planner_Edge_Checker): """Used to wrap edge checking so more complex graphs can be cleanly handled (may require keeping track of state for non-trivial graphs Collision checking that doesn't allow rotations (i.e. robots moving into the place that was just vacated """ def __init__(self): """Takes no arguments, because on grid graph, only the coordinates matter """ pass def col_check(self, c1, recursive): """Checks for collisions at a single point. Returns either a M* or rM* collision set in the form of sets, depending on the setting of recursive. """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive for i in range(len(c1) - 1): for j in range(i + 1, len(c1)): if c1[i] == c1[j]: col_set = adder([frozenset([i, j])], col_set) return col_set def cross_over(self, c1, c2, recursive=False): """Detects cross over collisions as well as pass through collisions """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive for i in range(len(c1) - 1): for j in range(i + 1, len(c1)): # compute current displacement vector if c1[i] is None or c1[j] is None or c2[i] is None or c2[j] \ is None: continue disp = (c1[i][0] - c1[j][0], c1[i][1] - c1[j][1]) if abs(disp[1]) > 1 or abs(disp[0]) > 1: continue # Compute previous displacement vector. Have a cross over or # pass through collision if the two displacement vectors are # opposites # pdisp = [c2[i][0] - c2[j][0], c2[i][1] - c2[j][1]] if (disp[0] == -(c2[i][0] - c2[j][0]) and disp[1] == -(c2[i][1] - c2[j][1])): col_set = adder([frozenset([i, j])], col_set) elif c1[i] == c2[j] or c1[j] == c2[i]: # There is a rotation, which is banned col_set = adder([frozenset([i, j])], col_set) return col_set class Lazy_Edge_Checker(interface.Planner_Edge_Checker): """Used to wrap edge checking so more complex graphs can be cleanly handled (may require keeping track of state for non-trivial graphs """ def __init__(self): """Takes no arguments, because on grid graph, only the coordinates matter """ pass def col_check(self, c1, recursive): """Checks for collisions at a single point. Returns either a M* or rM* collision set in the form of sets, depending on the setting of recursive. """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive for i in range(len(c1) - 1): for j in range(i + 1, len(c1)): if c1[i] == c1[j]: col_set = adder([frozenset([i, j])], col_set) return col_set return col_set def pass_through(self, c1, c2, recursive=False): """returns a tuple of colliding robots, or set of tuples if recursive """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive for i in range(len(c1) - 1): for j in range(i + 1, len(c1)): if c1[i] == c2[j] and c1[j] == c2[i]: col_set = adder([frozenset([i, j])], col_set) return col_set return col_set def cross_over(self, c1, c2, recursive=False): """Detects cross over collisions as well as pass through collisions """ col_set = [] # Select the function to be used for adding collision sets adder = add_col_set if recursive: adder = add_col_set_recursive for i in range(len(c1) - 1): for j in range(i + 1, len(c1)): # compute current displacement vector disp = [c1[i][0] - c1[j][0], c1[i][1] - c1[j][1]] if abs(disp[1]) > 1 or abs(disp[0]) > 1: continue # Compute previous displacement vector. Have a cross # over or pass through collision if the two displacement # vectors are opposites # pdisp = [c2[i][0] - c2[j][0], c2[i][1] - c2[j][1]] if (disp[0] == -(c2[i][0] - c2[j][0]) and disp[1] == -(c2[i][1] - c2[j][1])): col_set = adder([frozenset([i, j])], col_set) return col_set return col_set ================================================ FILE: parameters.py ================================================ import numpy as np # Learning parameters gamma = .95 # discount rate for advantage estimation and reward discounting LR_Q = 2.e-5 # 8.e-5 / NUM_THREADS # default: 1e-5 ADAPT_LR = True ADAPT_COEFF = 5.e-5 # the coefficient A in LR_Q/sqrt(A*steps+1) for calculating LR EXPERIENCE_BUFFER_SIZE = 256 max_episode_length = 256 IL_MAX_EP_LENGTH = 64 episode_count = 0 # observer parameters OBS_SIZE = 11 # the size of the FOV grid to apply to each agent NUM_FUTURE_STEPS = 3 # environment parameters ENVIRONMENT_SIZE = (10, 60) # the total size of the environment (length of one side) , Starting Point of Curriculum Only WALL_COMPONENTS = (1, 21) # Starting Params of Curriculum = TRUE OBSTACLE_DENSITY = (0, 0.75) # range of densities Starting Params of Curriculum = TRUE DIAG_MVMT = False # Diagonal movements allowed? a_size = 5 + int(DIAG_MVMT) * 4 NUM_META_AGENTS = 9 NUM_IL_META_AGENTS = 4 NUM_THREADS = 8 # int(multiprocessing.cpu_count() / (2 * NUM_META_AGENTS)) NUM_BUFFERS = 1 # NO EXPERIENCE REPLAY int(NUM_THREADS / 2) # training parameters SUMMARY_WINDOW = 10 load_model = False RESET_TRAINER = False training_version = 'astar3_continuous_0.5IL_ray2' model_path = 'model_' + training_version gifs_path = 'gifs_' + training_version train_path = 'train_' + training_version OUTPUT_GIFS = False # Only for RL gifs GIFS_FREQUENCY_RL = 512 OUTPUT_IL_GIFS = False IL_GIF_PROB = 0. # Imitation options PRIMING_LENGTH = 0 # number of episodes at the beginning to train only on demonstrations MSTAR_CALL_FREQUENCY = 1 # observation variables NUM_CHANNEL = 8 + NUM_FUTURE_STEPS # others EPISODE_START = episode_count TRAINING = True EPISODE_SAMPLES = EXPERIENCE_BUFFER_SIZE # 64 GLOBAL_NET_SCOPE = 'global' swarm_reward = [0] * NUM_META_AGENTS swarm_targets = [0] * NUM_META_AGENTS # Shared arrays for tensorboard episode_rewards = [[] for _ in range(NUM_META_AGENTS)] episode_finishes = [[] for _ in range(NUM_META_AGENTS)] episode_lengths = [[] for _ in range(NUM_META_AGENTS)] episode_mean_values = [[] for _ in range(NUM_META_AGENTS)] episode_invalid_ops = [[] for _ in range(NUM_META_AGENTS)] episode_stop_ops = [[] for _ in range(NUM_META_AGENTS)] episode_wrong_blocking = [[] for _ in range(NUM_META_AGENTS)] rollouts = [None for _ in range(NUM_META_AGENTS)] GIF_frames = [] # Joint variables joint_actions = [{} for _ in range(NUM_META_AGENTS)] joint_env = [None for _ in range(NUM_META_AGENTS)] joint_observations =[{} for _ in range(NUM_META_AGENTS)] joint_rewards = [{} for _ in range(NUM_META_AGENTS)] joint_done = [{} for _ in range(NUM_META_AGENTS)] env_params = [[ [WALL_COMPONENTS[0], WALL_COMPONENTS[1]] , [OBSTACLE_DENSITY[0],OBSTACLE_DENSITY[1]]] for _ in range(NUM_META_AGENTS)] class JOB_OPTIONS: getExperience = 1 getGradient = 2 class COMPUTE_OPTIONS: multiThreaded = 1 synchronous = 2 JOB_TYPE = JOB_OPTIONS.getGradient COMPUTE_TYPE = COMPUTE_OPTIONS.multiThreaded ================================================ FILE: requirements.txt ================================================ absl-py==0.9.0 aiohttp==3.6.2 aioredis==1.3.1 appdirs==1.4.4 astor==0.8.1 async-timeout==3.0.1 attrs==19.3.0 backcall==0.1.0 beautifulsoup4==4.9.1 blessings==1.7 cachetools==4.1.1 CairoSVG==2.4.2 certifi==2020.4.5.2 cffi==1.14.0 chardet==3.0.4 click==7.1.2 cloudpickle==1.2.2 colorama==0.4.3 colorful==0.5.4 contextvars==2.4 crowdai-api==0.1.22 cssselect2==0.3.0 cycler==0.10.0 Cython==0.29.21 decorator==4.4.2 defusedxml==0.6.0 dill==0.3.2 distlib==0.3.0 filelock==3.0.12 Flask==1.1.2 Flask-Cors==3.0.8 Flask-SocketIO==4.3.0 future==0.18.2 gast==0.2.2 google==3.0.0 google-api-core==1.22.1 google-auth==1.20.1 google-pasta==0.2.0 googleapis-common-protos==1.52.0 gpustat==0.6.0 grpcio==1.28.1 gym==0.17.3 h5py==2.10.0 hiredis==1.1.0 idna==2.9 idna-ssl==1.1.0 imagecodecs==2020.2.18 imageio==2.8.0 immutables==0.14 importlib-metadata==1.6.1 importlib-resources==1.5.0 ipython==7.14.0 ipython-genutils==0.2.0 itsdangerous==1.1.0 jedi==0.17.0 Jinja2==2.11.2 jsonschema==3.2.0 Keras==2.0.0 Keras-Applications==1.0.8 Keras-Preprocessing==1.1.0 kiwisolver==1.2.0 lxml==4.5.1 Markdown==3.2.1 MarkupSafe==1.1.1 matplotlib==3.2.1 mkl-fft==1.1.0 mkl-random==1.1.1 mkl-service==2.3.0 mock==4.0.2 more-itertools==8.3.0 msgpack==1.0.0 msgpack-numpy==0.4.6.post0 multidict==4.7.6 multiprocess==0.70.10 networkx==2.4 numpy==1.18.2 nvidia-ml-py3==7.352.0 opencensus==0.7.10 opencensus-context==0.1.1 opt-einsum==3.2.1 packaging==20.4 pandas==1.0.4 parso==0.7.0 pathos==0.2.6 pexpect==4.8.0 pickleshare==0.7.5 Pillow==7.1.2 pluggy==0.13.1 pox==0.2.8 ppft==1.6.6.2 prometheus-client==0.8.0 prompt-toolkit==3.0.5 protobuf==3.11.3 psutil==5.7.2 ptyprocess==0.6.0 py==1.8.1 py-spy==0.3.3 pyarrow==0.17.1 pyasn1==0.4.8 pyasn1-modules==0.2.8 pycparser==2.20 pyglet==1.5.0 Pygments==2.6.1 pyparsing==2.4.7 pyrsistent==0.16.0 pytest==5.4.3 pytest-runner==5.2 python-dateutil==2.8.1 python-engineio==3.13.0 python-gitlab==2.3.1 python-socketio==4.6.0 pytz==2020.1 PyWavelets==1.1.1 PyYAML==5.3.1 ray==0.8.7 recordtype==1.3 redis==3.4.1 requests==2.23.0 rsa==4.6 scikit-image==0.17.2 scipy==1.4.1 six==1.14.0 soupsieve==2.0.1 sumolib==1.6.0 svgutils==0.3.1 tensorboard==1.11.0 tensorflow==1.11.0 tensorflow-estimator==1.15.1 termcolor==1.1.0 Theano==1.0.4 tifffile==2020.5.11 timeout-decorator==0.4.1 tinycss2==1.0.2 toml==0.10.1 tox==3.15.2 tqdm==4.50.2 traci==1.6.0 traitlets==4.3.3 typing-extensions==3.7.4.2 urllib3==1.25.9 virtualenv==20.0.21 wcwidth==0.1.9 webencodings==0.5.1 Werkzeug==1.0.1 wrapt==1.12.1 yarl==1.5.1 zipp==3.1.0