Repository: robin-shaun/Multi-UAV-Task-Assignment-Benchmark Branch: master Commit: 60e1e8c2535c Files: 9 Total size: 62.2 KB Directory structure: gitextract_hou5iokn/ ├── .gitignore ├── aco.py ├── evaluate.py ├── ga.py ├── large_size_result.csv ├── medium_size_result.csv ├── pso.py ├── readme.md └── small_size_result.csv ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ __pycache__ ================================================ FILE: aco.py ================================================ import random import numpy as np import math import time import os class ACO(): def __init__(self, vehicle_num, target_num,vehicle_speed, target, time_lim): self.num_type_ant = vehicle_num self.num_city = target_num+1 #number of cities self.group = 200 self.num_ant = self.group*self.num_type_ant #number of ants self.ant_vel = vehicle_speed self.cut_time = time_lim self.oneee = np.zeros((4,1)) self.target = target self.alpha = 1 #pheromone self.beta = 2 self.k1 = 0.03 self.iter_max = 150 #matrix of the distances between cities def distance_matrix(self): dis_mat = [] for i in range(self.num_city): dis_mat_each = [] for j in range(self.num_city): dis = math.sqrt(pow(self.target[i][0]-self.target[j][0],2)+pow(self.target[i][1]-self.target[j][1],2)) dis_mat_each.append(dis) dis_mat.append(dis_mat_each) return dis_mat def run(self): print("ACO start, pid: %s" % os.getpid()) start_time = time.time() #distances of nodes dis_list = self.distance_matrix() dis_mat = np.array(dis_list) value_init = self.target[:,2].transpose() delay_init = self.target[:,3].transpose() pheromone_mat = np.ones((self.num_type_ant,self.num_city,self.num_city)) #velocity of ants path_new = [[0]for i in range (self.num_type_ant)] count_iter = 0 while count_iter < self.iter_max: path_sum = np.zeros((self.num_ant,1)) time_sum = np.zeros((self.num_ant,1)) value_sum = np.zeros((self.num_ant,1)) path_mat=[[0]for i in range (self.num_ant)] value = np.zeros((self.group,1)) atten = np.ones((self.num_type_ant,1)) * 0.2 for ant in range(self.num_ant): ant_type = ant % self.num_type_ant visit = 0 if ant_type == 0: unvisit_list=list(range(1,self.num_city))#have not visit for j in range(1,self.num_city): #choice of next city trans_list=[] tran_sum=0 trans=0 #if len(unvisit_list)==0: #print('len(unvisit_list)==0') for k in range(len(unvisit_list)): # to decide which node to visit trans +=np.power(pheromone_mat[ant_type][visit][unvisit_list[k]],self.alpha)*np.power(value_init[unvisit_list[k]]*self.ant_vel[ant_type]/(dis_mat[visit][unvisit_list[k]]*delay_init[unvisit_list[k]]),self.beta) #trans +=np.power(pheromone_mat[ant_type][unvisit_list[k]],self.alpha)*np.power(0.05*value_init[unvisit_list[k]],self.beta) trans_list.append(trans) tran_sum = trans rand = random.uniform(0,tran_sum) for t in range(len(trans_list)): if(rand <= trans_list[t]): visit_next = unvisit_list[t] break else: continue path_mat[ant].append(visit_next) path_sum[ant] += dis_mat[path_mat[ant][j-1]][path_mat[ant][j]] time_sum[ant] += path_sum[ant] / self.ant_vel[ant_type] + delay_init[visit_next] if time_sum[ant] > self.cut_time: time_sum[ant]-=path_sum[ant] / self.ant_vel[ant_type] + delay_init[visit_next] path_mat[ant].pop() break value_sum[ant] += value_init[visit_next] unvisit_list.remove(visit_next)#update visit = visit_next if (ant_type) == self.num_type_ant-1: small_group = int(ant/self.num_type_ant) for k in range (self.num_type_ant): value[small_group]+= value_sum[ant-k] #iteration if count_iter == 0: value_new = max(value) value = value.tolist() for k in range (0,self.num_type_ant): path_new[k] = path_mat[value.index(value_new)*self.num_type_ant+k] path_new[k].remove(0) else: if max(value) > value_new: value_new = max(value) value = value.tolist() for k in range (0,self.num_type_ant): path_new[k] = path_mat[value.index(value_new)*self.num_type_ant+k] path_new[k].remove(0) #update pheromone pheromone_change = np.zeros((self.num_type_ant,self.num_city,self.num_city)) for i in range(self.num_ant): length = len(path_mat[i]) m = i%self.num_type_ant n = int(i/self.num_type_ant) for j in range(length-1): pheromone_change[m][path_mat[i][j]][path_mat[i][j+1]]+= value_init[path_mat[i][j+1]]*self.ant_vel[m]/(dis_mat[path_mat[i][j]][path_mat[i][j+1]]*delay_init[path_mat[i][j+1]]) atten[m] += (value_sum[i]/(np.power((value_new-value[n]),4)+1))/self.group for k in range (self.num_type_ant): pheromone_mat[k]=(1-atten[k])*pheromone_mat[k]+pheromone_change[k] count_iter += 1 print("ACO result:", path_new) end_time = time.time() print("ACO time:", end_time - start_time) return path_new, end_time - start_time ================================================ FILE: evaluate.py ================================================ import numpy as np import matplotlib.pyplot as plt import random import pandas as pd import copy from multiprocessing import Pool from ga import GA from aco import ACO from pso import PSO class Env(): def __init__(self, vehicle_num, target_num, map_size, visualized=True, time_cost=None, repeat_cost=None): self.vehicles_position = np.zeros(vehicle_num,dtype=np.int32) self.vehicles_speed = np.zeros(vehicle_num,dtype=np.int32) self.targets = np.zeros(shape=(target_num+1,4),dtype=np.int32) if vehicle_num==5: self.size='small' if vehicle_num==10: self.size='medium' if vehicle_num==15: self.size='large' self.map_size = map_size self.speed_range = [10, 15, 30] #self.time_lim = 1e6 self.time_lim = self.map_size / self.speed_range[1] self.vehicles_lefttime = np.ones(vehicle_num,dtype=np.float32) * self.time_lim self.distant_mat = np.zeros((target_num+1,target_num+1),dtype=np.float32) self.total_reward = 0 self.reward = 0 self.visualized = visualized self.time = 0 self.time_cost = time_cost self.repeat_cost = repeat_cost self.end = False self.assignment = [[] for i in range(vehicle_num)] self.task_generator() def task_generator(self): for i in range(self.vehicles_speed.shape[0]): choose = random.randint(0,2) self.vehicles_speed[i] = self.speed_range[choose] for i in range(self.targets.shape[0]-1): self.targets[i+1,0] = random.randint(1,self.map_size) - 0.5*self.map_size # x position self.targets[i+1,1] = random.randint(1,self.map_size) - 0.5*self.map_size # y position self.targets[i+1,2] = random.randint(1,10) # reward self.targets[i+1,3] = random.randint(5,30) # time consumption to finish the mission for i in range(self.targets.shape[0]): for j in range(self.targets.shape[0]): self.distant_mat[i,j] = np.linalg.norm(self.targets[i,:2]-self.targets[j,:2]) self.targets_value = copy.deepcopy((self.targets[:,2])) def step(self, action): count = 0 for j in range(len(action)): k = action[j] delta_time = self.distant_mat[self.vehicles_position[j],k] / self.vehicles_speed[j] + self.targets[k,3] self.vehicles_lefttime[j] = self.vehicles_lefttime[j] - delta_time if self.vehicles_lefttime[j] < 0: count = count + 1 continue else: if k == 0: self.reward = - self.repeat_cost else: self.reward = self.targets[k,2] - delta_time * self.time_cost + self.targets[k,2] if self.targets[k,2] == 0: self.reward = self.reward - self.repeat_cost self.vehicles_position[j] = k self.targets[k,2] = 0 self.total_reward = self.total_reward + self.reward self.assignment[j].append(action) if count == len(action): self.end = True def run(self, assignment, algorithm, play, rond): self.assignment = assignment self.algorithm = algorithm self.play = play self.rond = rond self.get_total_reward() if self.visualized: self.visualize() def reset(self): self.vehicles_position = np.zeros(self.vehicles_position.shape[0],dtype=np.int32) self.vehicles_lefttime = np.ones(self.vehicles_position.shape[0],dtype=np.float32) * self.time_lim self.targets[:,2] = self.targets_value self.total_reward = 0 self.reward = 0 self.end = False def get_total_reward(self): for i in range(len(self.assignment)): speed = self.vehicles_speed[i] for j in range(len(self.assignment[i])): position = self.targets[self.assignment[i][j],:4] self.total_reward = self.total_reward + position[2] if j == 0: self.vehicles_lefttime[i] = self.vehicles_lefttime[i] - np.linalg.norm(position[:2]) / speed - position[3] else: self.vehicles_lefttime[i] = self.vehicles_lefttime[i] - np.linalg.norm(position[:2]-position_last[:2]) / speed - position[3] position_last = position if self.vehicles_lefttime[i] > self.time_lim: self.end = True break if self.end: self.total_reward = 0 break def visualize(self): if self.assignment == None: plt.scatter(x=0,y=0,s=200,c='k') plt.scatter(x=self.targets[1:,0],y=self.targets[1:,1],s=self.targets[1:,2]*10,c='r') plt.title('Target distribution') plt.savefig('task_pic/'+self.size+'/'+self.algorithm+ "-%d-%d.png" % (self.play,self.rond)) plt.cla() else: plt.title('Task assignment by '+self.algorithm +', total reward : '+str(self.total_reward)) plt.scatter(x=0,y=0,s=200,c='k') plt.scatter(x=self.targets[1:,0],y=self.targets[1:,1],s=self.targets[1:,2]*10,c='r') for i in range(len(self.assignment)): trajectory = np.array([[0,0,20]]) for j in range(len(self.assignment[i])): position = self.targets[self.assignment[i][j],:3] trajectory = np.insert(trajectory,j+1,values=position,axis=0) plt.scatter(x=trajectory[1:,0],y=trajectory[1:,1],s=trajectory[1:,2]*10,c='b') plt.plot(trajectory[:,0], trajectory[:,1]) plt.savefig('task_pic/'+self.size+'/'+self.algorithm+ "-%d-%d.png" % (self.play,self.rond)) plt.cla() def evaluate(vehicle_num, target_num, map_size): if vehicle_num==5: size='small' if vehicle_num==10: size='medium' if vehicle_num==15: size='large' re_ga=[[] for i in range(10)] re_aco=[[] for i in range(10)] re_pso=[[] for i in range(10)] for i in range(10): env = Env(vehicle_num,target_num,map_size,visualized=True) for j in range(10): p=Pool(3) ga = GA(vehicle_num,env.vehicles_speed,target_num,env.targets,env.time_lim) aco = ACO(vehicle_num,target_num,env.vehicles_speed,env.targets,env.time_lim) pso = PSO(vehicle_num,target_num ,env.targets,env.vehicles_speed,env.time_lim) ga_result=p.apply_async(ga.run) aco_result=p.apply_async(aco.run) pso_result=p.apply_async(pso.run) p.close() p.join() ga_task_assignmet = ga_result.get()[0] env.run(ga_task_assignmet,'GA',i+1,j+1) re_ga[i].append((env.total_reward,ga_result.get()[1])) env.reset() aco_task_assignmet = aco_result.get()[0] env.run(aco_task_assignmet,'ACO',i+1,j+1) re_aco[i].append((env.total_reward,aco_result.get()[1])) env.reset() pso_task_assignmet = pso_result.get()[0] env.run(pso_task_assignmet,'PSO',i+1,j+1) re_pso[i].append((env.total_reward,pso_result.get()[1])) env.reset() x_index=np.arange(10) ymax11=[] ymax12=[] ymax21=[] ymax22=[] ymax31=[] ymax32=[] ymean11=[] ymean12=[] ymean21=[] ymean22=[] ymean31=[] ymean32=[] for i in range(10): tmp1=[re_ga[i][j][0] for j in range(10)] tmp2=[re_ga[i][j][1] for j in range(10)] ymax11.append(np.amax(tmp1)) ymax12.append(np.amax(tmp2)) ymean11.append(np.mean(tmp1)) ymean12.append(np.mean(tmp2)) tmp1=[re_aco[i][j][0] for j in range(10)] tmp2=[re_aco[i][j][1] for j in range(10)] ymax21.append(np.amax(tmp1)) ymax22.append(np.amax(tmp2)) ymean21.append(np.mean(tmp1)) ymean22.append(np.mean(tmp2)) tmp1=[re_pso[i][j][0] for j in range(10)] tmp2=[re_pso[i][j][1] for j in range(10)] ymax31.append(np.amax(tmp1)) ymax32.append(np.amax(tmp2)) ymean31.append(np.mean(tmp1)) ymean32.append(np.mean(tmp2)) rects1=plt.bar(x_index,ymax11,width=0.1,color='b',label='ga_max_reward') rects2=plt.bar(x_index+0.1,ymax21,width=0.1,color='r',label='aco_max_reward') rects3=plt.bar(x_index+0.2,ymax31,width=0.1,color='g',label='pso_max_reward') plt.xticks(x_index+0.1,x_index) plt.legend() plt.title('max_reward_for_'+size+'_size') plt.savefig('max_reward_'+size+'.png') plt.cla() rects1=plt.bar(x_index,ymax12,width=0.1,color='b',label='ga_max_time') rects2=plt.bar(x_index+0.1,ymax22,width=0.1,color='r',label='aco_max_time') rects3=plt.bar(x_index+0.2,ymax32,width=0.1,color='g',label='pso_max_time') plt.xticks(x_index+0.1,x_index) plt.legend() plt.title('max_time_for_'+size+'_size') plt.savefig('max_time_'+size+'.png') plt.cla() rects1=plt.bar(x_index,ymean11,width=0.1,color='b',label='ga_mean_reward') rects2=plt.bar(x_index+0.1,ymean21,width=0.1,color='r',label='aco_mean_reward') rects3=plt.bar(x_index+0.2,ymean31,width=0.1,color='g',label='pso_mean_reward') plt.xticks(x_index+0.1,x_index) plt.legend() plt.title('mean_reward_for_'+size+'_size') plt.savefig('mean_reward_'+size+'.png') plt.cla() rects1=plt.bar(x_index,ymean12,width=0.1,color='b',label='ga_mean_time') rects2=plt.bar(x_index+0.1,ymean22,width=0.1,color='r',label='aco_mean_time') rects3=plt.bar(x_index+0.2,ymean32,width=0.1,color='g',label='pso_mean_time') plt.xticks(x_index+0.1,x_index) plt.legend() plt.title('mean_time_for_'+size+'_size') plt.savefig('mean_time_'+size+'.png') plt.cla() t_ga=[] r_ga=[] t_aco=[] r_aco=[] t_pso=[] r_pso=[] for i in range(10): for j in range(10): t_ga.append(re_ga[i][j][1]) r_ga.append(re_ga[i][j][0]) t_aco.append(re_aco[i][j][1]) r_aco.append(re_aco[i][j][0]) t_pso.append(re_pso[i][j][1]) r_pso.append(re_pso[i][j][0]) dataframe = pd.DataFrame({'ga_time':t_ga,'ga_reward':r_ga,'aco_time':t_aco,'aco_reward':r_aco,'pso_time':t_pso,'pso_reward':r_pso}) dataframe.to_csv(size+'_size_result.csv',sep=',') if __name__=='__main__': # small scale evaluate(5,30,5e3) # medium scale evaluate(10,60,1e4) # large scale evaluate(15,90,1.5e4) ================================================ FILE: ga.py ================================================ import numpy as np import random import time import os class GA(): def __init__(self, vehicle_num, vehicles_speed, target_num, targets, time_lim): # vehicles_speed,targets in the type of narray self.vehicle_num = vehicle_num self.vehicles_speed = vehicles_speed self.target_num = target_num self.targets = targets self.time_lim = time_lim self.map = np.zeros(shape=(target_num+1, target_num+1), dtype=float) self.pop_size = 50 self.p_cross = 0.6 self.p_mutate = 0.005 for i in range(target_num+1): self.map[i, i] = 0 for j in range(i): self.map[j, i] = self.map[i, j] = np.linalg.norm( targets[i, :2]-targets[j, :2]) self.pop = np.zeros( shape=(self.pop_size, vehicle_num-1+target_num-1), dtype=np.int32) self.ff = np.zeros(self.pop_size, dtype=float) for i in range(self.pop_size): for j in range(vehicle_num-1): self.pop[i, j] = random.randint(0, target_num) for j in range(target_num-1): self.pop[i, vehicle_num+j - 1] = random.randint(0, target_num-j-1) self.ff[i] = self.fitness(self.pop[i, :]) self.tmp_pop = np.array([]) self.tmp_ff = np.array([]) self.tmp_size = 0 def fitness(self, gene): ins = np.zeros(self.target_num+1, dtype=np.int32) seq = np.zeros(self.target_num, dtype=np.int32) ins[self.target_num] = 1 for i in range(self.vehicle_num-1): ins[gene[i]] += 1 rest = np.array(range(1, self.target_num+1)) for i in range(self.target_num-1): seq[i] = rest[gene[i+self.vehicle_num-1]] rest = np.delete(rest, gene[i+self.vehicle_num-1]) seq[self.target_num-1] = rest[0] i = 0 # index of vehicle pre = 0 # index of last target post = 0 # index of ins/seq t = 0 reward = 0 while i < self.vehicle_num: if ins[post] > 0: i += 1 ins[post] -= 1 pre = 0 t = 0 else: t += self.targets[pre, 3] past = self.map[pre, seq[post]]/self.vehicles_speed[i] t += past if t < self.time_lim: reward += self.targets[seq[post], 2] pre = seq[post] post += 1 return reward def selection(self): roll = np.zeros(self.tmp_size, dtype=float) roll[0] = self.tmp_ff[0] for i in range(1, self.tmp_size): roll[i] = roll[i-1]+self.tmp_ff[i] for i in range(self.pop_size): xx = random.uniform(0, roll[self.tmp_size-1]) j = 0 while xx > roll[j]: j += 1 self.pop[i, :] = self.tmp_pop[j, :] self.ff[i] = self.tmp_ff[j] def mutation(self): for i in range(self.tmp_size): flag = False for j in range(self.vehicle_num-1): if random.random() < self.p_mutate: self.tmp_pop[i, j] = random.randint(0, self.target_num) flag = True for j in range(self.target_num-1): if random.random() < self.p_mutate: self.tmp_pop[i, self.vehicle_num+j - 1] = random.randint(0, self.target_num-j-1) flag = True if flag: self.tmp_ff[i] = self.fitness(self.tmp_pop[i, :]) def crossover(self): new_pop = [] new_ff = [] new_size = 0 for i in range(0, self.pop_size, 2): if random.random() < self.p_cross: x1 = random.randint(0, self.vehicle_num-2) x2 = random.randint(0, self.target_num-2)+self.vehicle_num g1 = self.pop[i, :] g2 = self.pop[i+1, :] g1[x1:x2] = self.pop[i+1, x1:x2] g2[x1:x2] = self.pop[i, x1:x2] new_pop.append(g1) new_pop.append(g2) new_ff.append(self.fitness(g1)) new_ff.append(self.fitness(g2)) new_size += 2 self.tmp_size = self.pop_size+new_size self.tmp_pop = np.zeros( shape=(self.tmp_size, self.vehicle_num-1+self.target_num-1), dtype=np.int32) self.tmp_pop[0:self.pop_size, :] = self.pop self.tmp_pop[self.pop_size:self.tmp_size, :] = np.array(new_pop) self.tmp_ff = np.zeros(self.tmp_size, dtype=float) self.tmp_ff[0:self.pop_size] = self.ff self.tmp_ff[self.pop_size:self.tmp_size] = np.array(new_ff) def run(self): print("GA start, pid: %s" % os.getpid()) start_time = time.time() cut = 0 count = 0 while count < 500: self.crossover() self.mutation() self.selection() new_cut = self.tmp_ff.max() if cut < new_cut: cut = new_cut count = 0 gene = self.tmp_pop[np.argmax(self.tmp_ff)] else: count += 1 ins = np.zeros(self.target_num+1, dtype=np.int32) seq = np.zeros(self.target_num, dtype=np.int32) ins[self.target_num] = 1 for i in range(self.vehicle_num-1): ins[gene[i]] += 1 rest = np.array(range(1, self.target_num+1)) for i in range(self.target_num-1): seq[i] = rest[gene[i+self.vehicle_num-1]] rest = np.delete(rest, gene[i+self.vehicle_num-1]) seq[self.target_num-1] = rest[0] task_assignment = [[] for i in range(self.vehicle_num)] i = 0 # index of vehicle pre = 0 # index of last target post = 0 # index of ins/seq t = 0 reward = 0 while i < self.vehicle_num: if ins[post] > 0: i += 1 ins[post] -= 1 pre = 0 t = 0 else: t += self.targets[pre, 3] past = self.map[pre, seq[post]]/self.vehicles_speed[i] t += past if t < self.time_lim: task_assignment[i].append(seq[post]) reward += self.targets[seq[post], 2] pre = seq[post] post += 1 print("GA result:", task_assignment) end_time = time.time() print("GA time:", end_time - start_time) return task_assignment, end_time - start_time ================================================ FILE: large_size_result.csv ================================================ ,aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time 0,296,908.7814054489136,247,55.92802929878235,263,475.8543794155121 1,291,915.7366240024567,259,66.25442147254944,263,472.1400876045227 2,292,917.8597526550293,254,74.90953588485718,263,474.662939786911 3,289,929.387636423111,260,103.34872436523438,263,478.1414213180542 4,289,923.429899930954,251,60.22564744949341,263,468.2993767261505 5,288,921.4861361980438,256,101.24155569076538,263,480.2911822795868 6,293,899.2834107875824,255,80.45500588417053,263,467.8873429298401 7,289,920.9990880489349,258,104.65078663825989,263,473.22990322113037 8,289,915.464262008667,252,55.428141355514526,263,470.7831304073334 9,292,908.4859659671783,242,54.579482316970825,263,468.63810992240906 10,273,951.7187411785126,249,53.21723532676697,267,471.1878535747528 11,273,967.2745745182037,265,127.98299217224121,267,489.7042224407196 12,270,974.3173124790192,260,195.45258331298828,267,495.3276345729828 13,275,963.2233729362488,246,50.30011963844299,267,470.9858467578888 14,271,962.010968208313,248,73.91375541687012,267,471.8012545108795 15,271,962.2612085342407,252,77.44514584541321,267,481.26786375045776 16,267,945.5351057052612,245,58.64225435256958,267,477.9602701663971 17,277,948.2806112766266,255,67.99747490882874,267,468.89854645729065 18,271,967.6417164802551,252,129.3072385787964,267,478.41221261024475 19,274,963.3537228107452,253,133.60208249092102,267,479.5689525604248 20,290,966.2115695476532,250,122.1526083946228,260,489.93380999565125 21,287,943.0081570148468,250,62.05885910987854,260,477.671777009964 22,290,947.1903564929962,254,59.37166452407837,260,467.77480578422546 23,286,959.2566442489624,276,91.52135014533997,260,477.249960899353 24,288,954.6075274944305,268,48.26884913444519,260,464.37573313713074 25,284,936.1187009811401,273,57.24583983421326,260,477.16314125061035 26,286,954.3773159980774,259,82.08687591552734,260,475.90100502967834 27,289,949.4377288818359,254,53.36060166358948,260,470.7586085796356 28,290,952.0964720249176,273,65.8470516204834,260,474.4114272594452 29,290,944.5154075622559,275,43.74000549316406,260,468.4188332557678 30,327,994.9844787120819,294,81.43704128265381,301,474.0977404117584 31,323,1018.6526775360107,273,114.55374956130981,301,498.5897653102875 32,321,1009.1453545093536,285,94.25002026557922,301,486.3076343536377 33,327,1019.1480383872986,278,55.407536029815674,301,488.1904435157776 34,325,1007.914253950119,293,83.80115604400635,301,491.72301745414734 35,325,1024.5869517326355,282,148.9419755935669,301,497.10984230041504 36,323,1020.457249879837,295,108.69291090965271,301,496.7013669013977 37,326,1013.691241979599,271,73.25992369651794,301,494.8483748435974 38,323,1020.1873610019684,278,48.434046030044556,301,490.01100039482117 39,325,1021.3731291294098,292,115.58995175361633,301,488.44017720222473 40,278,976.9366610050201,264,138.9213318824768,273,499.79298758506775 41,275,965.3231558799744,262,145.9869430065155,273,498.0558907985687 42,280,962.862530708313,271,91.95361614227295,273,489.8705041408539 43,279,959.0885939598083,236,54.67619323730469,273,486.26035809516907 44,276,973.558468580246,255,105.7680230140686,273,491.402090549469 45,279,967.0545673370361,248,83.93505239486694,273,485.08744978904724 46,276,957.583824634552,239,93.60761904716492,273,494.0147354602814 47,275,965.79727602005,264,104.30339407920837,273,489.8355438709259 48,280,971.2357912063599,247,81.43815469741821,273,485.04322052001953 49,278,973.0727701187134,254,91.01016688346863,273,489.8650426864624 50,291,952.5716059207916,250,101.36161661148071,275,494.92200326919556 51,294,946.210232257843,254,66.17070937156677,275,481.7474868297577 52,294,946.330258846283,256,88.53258848190308,275,489.36784863471985 53,294,940.2625517845154,248,63.820109605789185,275,485.3837275505066 54,291,951.8322811126709,256,68.07165431976318,275,488.7123284339905 55,299,958.7923038005829,265,68.89578294754028,275,491.05396008491516 56,296,951.3731620311737,245,73.54164481163025,275,486.9369945526123 57,291,958.3713037967682,258,89.91634559631348,275,491.3002550601959 58,290,945.0339353084564,246,56.91977286338806,275,481.18742632865906 59,291,947.7742516994476,261,71.559574842453,275,481.191358089447 60,305,981.3974587917328,291,92.71349763870239,269,488.7560610771179 61,304,957.5966999530792,275,82.32165241241455,269,491.32160925865173 62,307,968.3465480804443,266,79.71064329147339,269,492.24424958229065 63,309,978.8897063732147,269,78.40533828735352,269,490.47363781929016 64,305,976.8462386131287,263,96.9474766254425,269,497.1219482421875 65,310,973.8594441413879,257,72.08272051811218,269,493.34489607810974 66,306,964.727823972702,276,96.79535627365112,269,496.8235650062561 67,309,980.2682957649231,271,110.70756220817566,269,489.6940174102783 68,304,985.0895121097565,266,146.9937801361084,269,510.74099040031433 69,304,970.6574778556824,259,80.50431251525879,269,480.4831213951111 70,331,1002.6550228595734,276,71.99679160118103,282,486.46082282066345 71,330,1068.6351492404938,297,123.51885652542114,282,506.458402633667 72,332,1023.2077965736389,285,78.46024966239929,282,492.73976039886475 73,331,1017.5172808170319,280,48.53227877616882,282,495.07808446884155 74,332,1011.5874664783478,309,67.04316973686218,282,494.5928440093994 75,330,1028.534333705902,274,72.79688119888306,282,492.562472820282 76,332,996.4580583572388,310,69.33413505554199,282,489.1693527698517 77,332,1006.4915940761566,293,63.24198341369629,282,486.631254196167 78,332,1007.7951982021332,278,54.046175479888916,282,487.0864179134369 79,327,1008.2880766391754,306,101.87895131111145,282,491.83132791519165 80,342,1024.3724205493927,311,84.77551889419556,307,490.72554993629456 81,346,1037.724690914154,321,93.26548743247986,307,497.81773042678833 82,345,1045.6583635807037,300,86.76090788841248,307,484.0392985343933 83,344,1043.5157623291016,310,91.16247344017029,307,490.33659172058105 84,344,1042.54065823555,322,59.16938662528992,307,484.3666524887085 85,344,1039.1224558353424,301,86.92423415184021,307,493.7003331184387 86,340,1043.6719121932983,320,77.55270719528198,307,483.9702203273773 87,344,1042.995453596115,306,104.2813949584961,307,494.0151512622833 88,347,1051.3341484069824,321,152.23194694519043,307,496.49487829208374 89,349,1040.0653417110443,349,86.07370352745056,307,498.4700348377228 90,327,981.0026612281799,277,82.25102162361145,310,486.8569030761719 91,327,981.1793773174286,291,83.74289011955261,310,487.12745547294617 92,327,976.752777338028,306,72.23682999610901,310,489.5701353549957 93,326,980.6637990474701,300,54.929299116134644,310,483.11501002311707 94,322,969.3974039554596,284,70.095294713974,310,484.54920268058777 95,324,970.2287967205048,277,90.13793587684631,310,491.29523491859436 96,323,978.8446981906891,279,76.42075634002686,310,490.77434039115906 97,325,968.8402066230774,275,64.63985276222229,310,498.89036893844604 98,324,979.2253255844116,286,160.1146640777588,310,496.08253359794617 99,325,982.7923681735992,275,45.25343370437622,310,482.38406586647034 ================================================ FILE: medium_size_result.csv ================================================ ,aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time 0,216,448.3831191062927,206,49.305721282958984,193,148.66212391853333 1,215,450.0390589237213,201,47.92679977416992,193,148.18085885047913 2,215,446.2680039405823,197,44.56689190864563,193,147.05394744873047 3,214,439.6638705730438,206,30.478964805603027,193,145.65527486801147 4,216,446.25123286247253,213,49.142014026641846,193,146.94575667381287 5,215,449.3586504459381,191,35.995574951171875,193,147.08190488815308 6,215,444.68290305137634,199,47.1325957775116,193,149.1070830821991 7,215,440.68888783454895,191,40.503169775009155,193,146.32232356071472 8,216,444.11085629463196,207,30.60081696510315,193,145.07146883010864 9,216,442.4932496547699,203,41.649266719818115,193,148.24117517471313 10,181,394.1740880012512,182,41.235496282577515,179,145.92154598236084 11,179,398.2101173400879,182,58.84644627571106,179,149.12411260604858 12,182,396.67690896987915,185,28.317508935928345,179,146.20850467681885 13,181,402.9303925037384,178,50.20690608024597,179,150.27634143829346 14,183,399.2702867984772,198,61.198126792907715,179,150.24325561523438 15,181,392.7786009311676,193,27.70851445198059,179,145.60873198509216 16,186,394.2873070240021,187,40.75690317153931,179,146.5531108379364 17,182,400.4948661327362,189,68.48983502388,179,151.8164780139923 18,181,396.15389823913574,176,36.01111102104187,179,146.96473789215088 19,183,407.0688331127167,177,74.86308264732361,179,152.32446765899658 20,257,535.1680011749268,242,45.81318497657776,257,162.57035994529724 21,248,477.0249879360199,257,59.37494111061096,257,151.88625025749207 22,248,481.47851395606995,248,73.29448699951172,257,153.8765308856964 23,248,476.1490092277527,251,46.956031799316406,257,150.61592316627502 24,248,472.28077578544617,245,35.464972496032715,257,147.88373970985413 25,247,465.2167932987213,248,33.261672258377075,257,147.64871048927307 26,248,465.4674003124237,243,46.813090562820435,257,149.02625226974487 27,253,475.7856402397156,253,62.99959635734558,257,152.69138717651367 28,248,471.5851991176605,249,62.63740587234497,257,151.3696005344391 29,248,468.71898126602173,242,40.11980128288269,257,149.03825902938843 30,196,404.4883544445038,190,26.050060272216797,194,144.73951077461243 31,200,409.98085474967957,196,49.781771659851074,194,148.48935222625732 32,197,411.36246395111084,197,45.22318196296692,194,149.27837538719177 33,197,413.2939670085907,198,49.13954973220825,194,149.69017028808594 34,200,408.55559182167053,208,41.79141283035278,194,149.21917819976807 35,196,409.9758207798004,189,27.64315676689148,194,147.62873792648315 36,197,408.1155550479889,194,47.21198105812073,194,147.90398573875427 37,199,412.9177339076996,217,75.3260293006897,194,152.389484167099 38,195,413.65162658691406,197,56.222227573394775,194,148.9140920639038 39,196,406.7831847667694,196,33.56608486175537,194,146.55531525611877 40,162,376.0339472293854,166,32.595866680145264,170,144.9636528491974 41,162,377.5476269721985,183,36.15443444252014,170,147.16523098945618 42,162,380.8216028213501,169,73.12481117248535,170,151.93030548095703 43,163,377.19195222854614,167,26.03180742263794,170,146.38755416870117 44,162,375.4290704727173,165,31.497214555740356,170,145.87686395645142 45,162,381.92375802993774,177,65.6746084690094,170,151.02717685699463 46,162,377.6920804977417,167,41.89390730857849,170,149.11691117286682 47,162,379.4388999938965,168,47.881861448287964,170,146.81237983703613 48,163,386.09958243370056,176,90.63225603103638,170,154.9767725467682 49,162,376.02294278144836,178,49.68327236175537,170,147.34447741508484 50,163,395.1006145477295,170,60.02331042289734,166,149.5303943157196 51,164,398.0407438278198,173,58.35295605659485,166,148.20625829696655 52,161,387.7398009300232,160,36.16767406463623,166,146.93824124336243 53,160,397.3074097633362,181,87.42074513435364,166,151.60932040214539 54,161,394.0010869503021,161,35.39233636856079,166,145.9512619972229 55,168,397.33205008506775,180,93.66990065574646,166,153.4657781124115 56,165,391.67463517189026,177,34.15181350708008,166,146.88838911056519 57,160,394.11439633369446,179,75.18911981582642,166,150.38912177085876 58,163,391.3193950653076,169,50.267693281173706,166,146.59030938148499 59,161,389.8388180732727,169,43.60353207588196,166,146.73621797561646 60,214,458.1954791545868,224,53.538702964782715,213,148.77755308151245 61,216,457.9216077327728,217,74.3746817111969,213,172.45012044906616 62,210,419.3990144729614,198,35.978588342666626,213,148.06180047988892 63,219,429.85972452163696,218,87.86180424690247,213,153.94083642959595 64,214,424.77522015571594,209,65.07919359207153,213,151.17670392990112 65,218,418.9645538330078,201,33.109193086624146,213,146.59753608703613 66,213,419.9968156814575,211,60.90353274345398,213,151.33484530448914 67,213,419.75107407569885,219,41.44067192077637,213,148.6664433479309 68,215,418.3867540359497,215,53.756091594696045,213,149.20613074302673 69,217,426.778751373291,207,55.369181394577026,213,150.33477354049683 70,199,410.5372402667999,215,60.27366662025452,208,149.8002016544342 71,198,409.2937562465668,204,59.25821495056152,208,151.39743947982788 72,196,409.5259928703308,215,35.602235317230225,208,145.2328040599823 73,201,406.344420671463,223,48.74324607849121,208,148.4908196926117 74,199,408.3418302536011,220,70.71654105186462,208,151.31224751472473 75,198,404.72881293296814,194,29.9748272895813,208,145.87402033805847 76,200,407.07944345474243,199,32.814154624938965,208,148.0822048187256 77,198,410.07060742378235,221,83.80286979675293,208,152.27385711669922 78,198,403.81956219673157,205,46.65635085105896,208,149.30409383773804 79,197,410.29728984832764,225,53.520286560058594,208,148.7795009613037 80,152,380.2354018688202,157,35.35539889335632,169,147.90625596046448 81,150,384.5501811504364,159,48.30092000961304,169,149.15482306480408 82,151,376.554048538208,157,25.061031818389893,169,146.199316740036 83,152,379.9583477973938,160,32.2420814037323,169,147.88309359550476 84,154,381.87709069252014,159,46.78597569465637,169,148.42677807807922 85,150,384.82411456108093,176,71.57334589958191,169,152.0538854598999 86,154,380.4787724018097,153,32.610721588134766,169,145.2583565711975 87,154,381.19555377960205,160,35.47093892097473,169,146.1291468143463 88,154,375.84812235832214,158,32.85135459899902,169,146.2271864414215 89,150,377.0024824142456,172,31.29044246673584,169,146.2692093849182 90,188,408.0042040348053,172,33.111896276474,172,146.74985241889954 91,188,415.29174041748047,183,46.34089708328247,172,148.31623196601868 92,186,408.48268270492554,171,65.71342968940735,172,151.4460847377777 93,187,408.3678331375122,173,50.35272192955017,172,149.03712511062622 94,188,410.88290190696716,177,33.343971252441406,172,147.3882360458374 95,187,414.69072556495667,181,56.616501331329346,172,149.22642374038696 96,185,413.3330612182617,178,45.60746383666992,172,148.60242438316345 97,187,406.33209466934204,181,42.515186071395874,172,146.75966262817383 98,187,405.92414903640747,183,27.310251474380493,172,145.60900115966797 99,186,415.5781035423279,190,51.28600215911865,172,146.98746609687805 ================================================ FILE: pso.py ================================================ # coding: utf-8 import numpy as np import random import math import cmath import time import os # ----------------------Optimization scheme---------------------------------- # Optimization ideas: # 1. Increase the convergence factor k; # 2. Dynamic change of inertia factor W; # 3. Using PSO local search algorithm(Ring method) # 4. The probability of position variation is added # ----------------------Set PSO Parameter--------------------------------- class PSO(): def __init__(self, uav_num, target_num, targets, vehicles_speed, time_lim): self.uav_num = uav_num self.dim = target_num self.targets = targets self.vehicles_speed = vehicles_speed self.time_all = time_lim self.pN = 2*(self.uav_num+self.dim) # Number of particles self.max_iter = 0 # Number of iterations # Target distance list (dim+1)*(dim+1) self.Distance = np.zeros((target_num+1, target_num+1)) self.Value = np.zeros(target_num+1) # Value list of targets 1*dim+1 self.Stay_time = [] # UAV flight speed matrix self.w = 0.8 self.c1 = 2 self.c2 = 2 self.r1 = 0.6 self.r2 = 0.3 self.k = 0 # Convergence factor self.wini = 0.9 self.wend = 0.4 self.X = np.zeros((self.pN, self.dim+self.uav_num-1) ) # Position of all particles self.V = np.zeros((self.pN, self.dim+self.uav_num-1) ) # Velocity of all particles # The historical optimal position of each individual self.pbest = np.zeros((self.pN, self.dim+self.uav_num-1)) self.gbest = np.zeros((1, self.dim+self.uav_num-1)) # Global optimal position self.gbest_ring = np.zeros((self.pN, self.dim+self.uav_num-1)) # Historical optimal fitness of each individual self.p_fit = np.zeros(self.pN) self.fit = 0 # Global optimal fitness self.ring = [] self.ring_fit = np.zeros(self.pN) # variation parameter self.p1 = 0.4 # Probability of mutation self.p2 = 0.5 # Proportion of individuals with variation in population self.p3 = 0.5 # Proportion of locations where variation occurs self.TEST = [] self.test_num = 0 self.uav_best = [] self.time_out = np.zeros(self.uav_num) self.cal_time = 0 # ------------------Get Initial parameter------------------ def fun_get_initial_parameter(self): self.max_iter = 40*(self.uav_num+self.dim) if self.max_iter > 4100: self.max_iter = 4100 # Get Stay_time Arrary & Distance Arrary & Value Arrary Targets = self.targets self.Stay_time = Targets[:, 3] self.Distance = np.zeros((self.dim+1, self.dim+1)) self.Value = np.zeros(self.dim+1) for i in range(self.dim+1): self.Value[i] = Targets[i, 2] for j in range(i): self.Distance[i][j] = ( Targets[i, 0]-Targets[j, 0])*(Targets[i, 0]-Targets[j, 0]) self.Distance[i][j] = self.Distance[i][j] + \ (Targets[i, 1]-Targets[j, 1])*(Targets[i, 1]-Targets[j, 1]) self.Distance[i][j] = math.sqrt(self.Distance[i][j]) self.Distance[j][i] = self.Distance[i][j] # ------------------Transfer_Function--------------------- def fun_Transfer(self, X): # Converting continuous sequence X into discrete sequence X_path X1 = X[0:self.dim] X_path = [] l1 = len(X1) for i in range(l1): m = X1[i]*(self.dim-i) m = math.floor(m) X_path.append(m) # Converting the continuous interpolation sequence X into discrete interpolation sequence X_rank X2 = X[self.dim:] l1 = len(X2) X_rank = [] for i in range(l1): m = X2[i]*(self.dim+1) m1 = math.floor(m) X_rank.append(m1) # Rank and Complement c = sorted(X_rank) l1 = len(c) Rank = [] Rank.append(0) for i in range(l1): Rank.append(c[i]) Rank.append(self.dim) # Get Separate_Arrary Sep = [] for i in range(l1+1): sep = Rank[i+1]-Rank[i] Sep.append(sep) return X_path, Sep # -------------------Obtain the Real Flight Path Sequence of Particles-------------------------- def position(self, X): Position_All = list(range(1, self.dim+1)) X2 = [] for i in range(self.dim): m1 = X[i] m1 = int(m1) X2.append(Position_All[m1]) del Position_All[m1] return X2 # ---------------------Fitness_Computing Function----------------------------- def function(self, X): X_path, Sep = self.fun_Transfer(X) # Obtain the Real Flight Path Sequence of Particles X = self.position(X_path) # Get the search sequence of each UAV UAV = [] l = 0 for i in range(self.uav_num): UAV.append([]) k = Sep[i] for j in range(k): UAV[i].append(X[l]) l = l+1 # Calculate Fitness fitness = 0 for i in range(self.uav_num): k = Sep[i] t = 0 for j in range(k): m1 = UAV[i][j] if j == 0: t = t+self.Distance[0, m1] / \ self.vehicles_speed[i]+self.Stay_time[m1] else: m1 = UAV[i][j] m2 = UAV[i][j-1] t = t+self.Distance[m1][m2] / \ self.vehicles_speed[i]+self.Stay_time[m1] if t <= self.time_all: fitness = fitness+self.Value[m1] return fitness # ----------------------------variation------------------------------------------- def variation_fun(self): p1 = np.random.uniform(0, 1) # Probability of mutation if p1 < self.p1: for i in range(self.pN): # Proportion of individuals with variation in population p2 = np.random.uniform(0, 1) if p2 < self.p2: # Numbers of locations where variation occurs m = int(self.p3*(self.dim+self.uav_num-1)) for j in range(m): replace_position = math.floor( np.random.uniform(0, 1)*(self.dim+self.uav_num-1)) replace_value = np.random.uniform(0, 1) self.X[i][replace_position] = replace_value # Update pbest & gbest for i in range(self.pN): temp = self.function(self.X[i]) self.ring_fit[i] = temp if temp > self.p_fit[i]: self.p_fit[i] = temp self.pbest[i] = self.X[i] # Update gbest if self.p_fit[i] > self.fit: self.gbest = self.X[i] self.fit = self.p_fit[i] # ---------------------Population Initialization---------------------------------- def init_Population(self): # Initialization of position(X), speed(V), history optimal(pbest) and global optimal(gbest) for i in range(self.pN): x = np.random.uniform(0, 1, self.dim+self.uav_num-1) self.X[i, :] = x v = np.random.uniform(0, 0.4, self.dim+self.uav_num-1) self.V[i, :] = v self.pbest[i] = self.X[i] tmp = self.function(self.X[i]) self.p_fit[i] = tmp if tmp > self.fit: self.fit = tmp self.gbest = self.X[i] # Calculate the convergence factor k phi = self.c1+self.c2 k = abs(phi*phi-4*phi) k = cmath.sqrt(k) k = abs(2-phi-k) k = 2/k self.k = k # Initialize ring_matrix for i in range(self.pN): self.ring.append([]) self.ring[i].append(i) # Initialize test_set self.TEST = np.zeros((self.test_num, self.dim+self.uav_num-1)) for i in range(self.test_num): test = np.random.uniform(0, 1, self.dim+self.uav_num-1) self.TEST[i, :] = test # ----------------------Update Particle Position---------------------------------- def iterator(self): fitness = [] fitness_old = 0 k = 0 for t in range(self.max_iter): w = (self.wini-self.wend)*(self.max_iter-t)/self.max_iter+self.wend self.w = w # Variation self.variation_fun() l1 = len(self.ring[0]) # Local PSO algorithm # Update ring_arrary if l1 < self.pN: if not(t % 2): k = k+1 for i in range(self.pN): m1 = i-k if m1 < 0: m1 = self.pN+m1 m2 = i+k if m2 > self.pN-1: m2 = m2-self.pN self.ring[i].append(m1) self.ring[i].append(m2) # Update gbest_ring l_ring = len(self.ring[0]) for i in range(self.pN): fitness1 = 0 for j in range(l_ring): m1 = self.ring[i][j] fitness2 = self.ring_fit[m1] if fitness2 > fitness1: self.gbest_ring[i] = self.X[m1] fitness1 = fitness2 # Update velocity for i in range(self.pN): self.V[i] = self.k*(self.w * self.V[i] + self.c1 * self.r1 * (self.pbest[i] - self.X[i])) + \ self.c2 * self.r2 * (self.gbest_ring[i] - self.X[i]) # Update position self.X[i] = self.X[i] + self.V[i] # Global PSO algorithm else: # Update velocity for i in range(self.pN): self.V[i] = self.k*(self.w * self.V[i] + self.c1 * self.r1 * (self.pbest[i] - self.X[i])) + \ self.c2 * self.r2 * (self.gbest - self.X[i]) # Update position self.X[i] = self.X[i] + self.V[i] # Set position boundary for i in range(self.pN): for j in range(self.dim+self.uav_num-1): if self.X[i][j] >= 1: self.X[i][j] = 0.999 if self.X[i][j] < 0: self.X[i][j] = 0 # Update pbest & gbest for i in range(self.pN): temp = self.function(self.X[i]) self.ring_fit[i] = temp if temp > self.p_fit[i]: self.p_fit[i] = temp self.pbest[i] = self.X[i] # Update gbest if self.p_fit[i] > self.fit: self.gbest = self.X[i] self.fit = self.p_fit[i] self.uav_best = self.fun_Data() # print fitness.append(self.fit) if self.fit == fitness_old: continue else: fitness_old = self.fit return fitness # ---------------------Data_Processing Function--------------------------- def fun_Data(self): X_path, Sep = self.fun_Transfer(self.gbest) # Obtain the Real Flight Path Sequence of Particles X = self.position(X_path) # Get the search sequence of each UAV UAV = [] l = 0 for i in range(self.uav_num): UAV.append([]) k = Sep[i] for j in range(k): UAV[i].append(X[l]) l = l+1 # Calculate UAV_Out UAV_Out = [] for i in range(self.uav_num): k = Sep[i] t = 0 UAV_Out.append([]) for j in range(k): m1 = UAV[i][j] if j == 0: t = t+self.Distance[0, m1] / \ self.vehicles_speed[i]+self.Stay_time[m1] else: m2 = UAV[i][j-1] t = t+self.Distance[m2][m1] / \ self.vehicles_speed[i]+self.Stay_time[m1] if t <= self.time_all: UAV_Out[i].append(m1) self.time_out[i] = t return UAV_Out # ---------------------TEST Function------------------------------ def fun_TEST(self): Test_Value = [] for i in range(self.test_num): Test_Value.append(self.function(self.TEST[i])) return Test_Value # ---------------------Main---------------------------------------- def run(self): print("PSO start, pid: %s" % os.getpid()) start_time = time.time() self.fun_get_initial_parameter() self.init_Population() fitness = self.iterator() end_time = time.time() #self.cal_time = end_time - start_time #self.task_assignment = self.uav_best print("PSO result:", self.uav_best) print("PSO time:", end_time - start_time) return self.uav_best, end_time - start_time ================================================ FILE: readme.md ================================================ # Multi-UAV Task Assignment Benchmark ## 多无人机任务分配算法测试基准 ## Introduction A benchmark for multi-UAV task assignment is presented in order to evaluate different algorithms. An extended Team Orienteering Problem is modeled for a kind of multi-UAV task assignment problem. Three intelligent algorithms, i.e., Genetic Algorithm, Ant Colony Optimization and Particle Swarm Optimization are implemented to solve the problem. A series of experiments with different settings are conducted to evaluate three algorithms. The modeled problem and the evaluation results constitute a benchmark, which can be used to evaluate other algorithms used for multi-UAV task assignment problems. Notice that three algorithms run at three CPU cores respectively, which means that there is no parallel optimization in this benchmark. Please refer to the paper to see more detail. K. Xiao, J. Lu, Y. Nie, L. Ma, X. Wang and G. Wang, "A Benchmark for Multi-UAV Task Assignment of an Extended Team Orienteering Problem," 2022 China Automation Congress (CAC), Xiamen, China, 2022, pp. 6966-6970, doi: 10.1109/CAC57257.2022.10054991. ArXiv preprint **[ arXiv:2003.09700](https://arxiv.org/abs/2009.00363)** ## Usage ### 1. Algorithm input and output Algorithm input includes vehicle number (scalar), speeds of vehicles ($n\times1$ array), target number (scalar $n$), targets ($(n+1)\times4$ array, the first line is depot, the first column is x position, the second column is y position, the third column is reward and the forth column is time consumption to finish the mission), time limit (scalar). The code below is the initialization of the class GA in `ga.py`. ```python def __init__(self, vehicle_num, vehicles_speed, target_num, targets, time_lim) ``` There should be a function called `run()` in the algorithm class, and the function should return task assignment plan(array, e.g. [[28, 19, 11], [25, 22, 7, 16, 17, 23], [21, 26, 12, 9, 6, 3], [5, 15, 1], [18, 20, 29]], each subset is a vehicle path) and computational time usage (scalar). ### 2. Evaluate You can replace one algorithm below with another algorithm in `evaluate.py`, and then `python evaluate.py`. If you don't want to evaluate three algorithm together, you should modify the code properly( this is easy). ```python ga = GA(vehicle_num,env.vehicles_speed,target_num,env.targets,env.time_lim) aco = ACO(vehicle_num,target_num,env.vehicles_speed,env.targets,env.time_lim) pso = PSO(vehicle_num,target_num ,env.targets,env.vehicles_speed,env.time_lim) ga_result=p.apply_async(ga.run) aco_result=p.apply_async(aco.run) pso_result=p.apply_async(pso.run) p.close() p.join() ga_task_assignmet = ga_result.get()[0] env.run(ga_task_assignmet,'GA',i+1,j+1) re_ga[i].append((env.total_reward,ga_result.get()[1])) env.reset() aco_task_assignmet = aco_result.get()[0] env.run(aco_task_assignmet,'ACO',i+1,j+1) re_aco[i].append((env.total_reward,aco_result.get()[1])) env.reset() pso_task_assignmet = pso_result.get()[0] env.run(pso_task_assignmet,'PSO',i+1,j+1) re_pso[i].append((env.total_reward,pso_result.get()[1])) ``` ### 3. About reinforcement learning In `Env()` in `evaluate.py`, function `step` is used for reinforcement learning. Because this is still being developed, we cannot supply a demo. If your algorithm is reinforcement learning, you can try to train it with `Env()`. Your pull request and issue are welcome. ## Enhancement This [repository](https://github.com/dietmarwo/Multi-UAV-Task-Assignment-Benchmark) does great enhancement and you can use it for high performance. Thanks to [dietmarwo](https://github.com/dietmarwo) for the nice work. 1) GA uses [numba](https://numba.pydata.org/) for a dramatic speedup. Parameters are adapted so that the execution time remains the same: popsize 50 -> 300, iterations 500 -> 6000 For this reason GA now performs much better compared to the original version. 2) Experiments are configured so that wall time for small size is balanced. This means: increased effort for GA, decreased effort for ACO. For medium / large problem size you see which algorithms scale badly: Increase execution time superlinear in relation to the problem size. Avoid these for large problems. 3) Adds a standard continuous optimization algorithm: [BiteOpt](https://github.com/avaneev/biteopt) from Aleksey Vaneev - using the same fitness function as GA.py. BiteOpt is the only algorithm included which works well with a large problem size. It is by far the simplest implementation, only the fitness function needs to be coded, since we can apply a continuous optimization library [fcmaes](https://github.com/dietmarwo/fast-cma-es). Execute "pip install fcmaes" to use it. 4) Uses NestablePool to enable BiteOpt multiprocessing: Many BiteOpt optimization runs are performed in parallel and the best result is returned. Set workers=1 if you want to test BiteOpt single threaded. 5) All results are created using an AMD 5950x 16 core processor utilizing all cores: 29 parallel BiteOpt threads, the other 3 algorithms remain single threaded. 6) Added test_bite.py where you can monitor the progress of BiteOpt applied to the problem. 7) Added test_mode.py where you can monitor the progress of fcmaes-MODE applied to the problem and compare it to BiteOpt for the same instance. fcmaes-MODE is a multi-objective optimizer applied to a multi-objective variant of the problem. Objectives are: reward (to be maximized), maximal time (to be minimized), energy (to be minimized). The maximal time constraint from the single objective case is still valid. Energy consumption is approximated by `sum(dt*v*v)` ================================================ FILE: small_size_result.csv ================================================ ,aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time 0,89,88.16137266159058,119,12.685494184494019,107,13.414397716522217 1,88,91.05341100692749,106,9.964252710342407,107,16.068947315216064 2,89,91.91174960136414,109,14.250640153884888,107,16.815466165542603 3,89,92.22553205490112,110,21.893151998519897,107,17.22126531600952 4,89,92.13908076286316,101,10.614777565002441,107,16.337905406951904 5,89,92.50885510444641,121,15.014703512191772,107,17.107726097106934 6,90,92.41064667701721,109,17.080315113067627,107,17.380303859710693 7,89,92.84331011772156,119,20.32515835762024,107,16.93877387046814 8,88,92.46119904518127,114,19.172377824783325,107,16.968574047088623 9,90,94.00208711624146,127,21.729254484176636,107,17.066615104675293 10,96,89.81863832473755,117,22.284271717071533,107,17.06852388381958 11,99,88.96507263183594,113,19.54985523223877,107,17.087736129760742 12,96,88.38604950904846,109,14.674241304397583,107,16.747975826263428 13,97,89.2066752910614,110,11.188013553619385,107,16.291066884994507 14,96,89.45146775245667,115,18.54390525817871,107,17.09095311164856 15,97,91.43690013885498,118,34.3544921875,107,17.089507579803467 16,101,87.81503558158875,116,16.678542375564575,107,17.032145738601685 17,95,87.75125932693481,116,13.018948793411255,107,16.583208084106445 18,97,87.48667025566101,103,11.756951570510864,107,16.502718687057495 19,96,89.22999024391174,117,23.240997076034546,107,17.057995319366455 20,96,90.63653469085693,113,20.940587043762207,102,17.042691230773926 21,91,90.38828539848328,104,17.667251586914062,102,17.154610633850098 22,87,90.0098135471344,98,17.66662073135376,102,16.96206521987915 23,87,89.37335777282715,97,12.019228219985962,102,16.48302674293518 24,85,90.64067506790161,114,23.917268753051758,102,17.08442449569702 25,90,91.06858992576599,108,20.25761079788208,102,17.10637593269348 26,87,89.10331749916077,103,10.527708768844604,102,16.272404670715332 27,86,90.21255588531494,111,17.25125241279602,102,17.043729782104492 28,88,90.96662783622742,111,24.268309354782104,102,17.005598783493042 29,88,90.03387355804443,109,13.798240184783936,102,16.55376434326172 30,67,85.81624722480774,75,19.326607704162598,72,17.03825044631958 31,67,85.53707933425903,80,22.211409330368042,72,17.13922429084778 32,67,85.78650307655334,79,19.998062133789062,72,17.11928629875183 33,67,84.82013392448425,81,11.410378456115723,72,16.370493173599243 34,67,86.44931316375732,79,28.120026111602783,72,17.011059761047363 35,67,84.43776869773865,78,14.097299098968506,72,16.78164315223694 36,67,84.38073658943176,75,10.875871896743774,72,16.290704488754272 37,67,85.27193593978882,77,18.085707426071167,72,17.11751627922058 38,67,86.35783362388611,77,20.6546049118042,72,17.204835653305054 39,67,84.17668747901917,70,9.783939123153687,72,16.234663009643555 40,90,81.11320877075195,109,25.354978561401367,94,17.01652693748474 41,92,81.4474401473999,115,25.54797601699829,94,17.27031111717224 42,90,82.45976829528809,105,21.565988302230835,94,17.12051248550415 43,86,81.89453530311584,116,25.38959002494812,94,17.148070096969604 44,90,80.1114649772644,108,12.730968475341797,94,16.551698684692383 45,88,80.46094584465027,112,16.600263595581055,94,17.025696277618408 46,92,80.10696768760681,108,10.806996822357178,94,16.190462350845337 47,94,82.78089380264282,106,23.92259168624878,94,17.245839834213257 48,87,82.29063534736633,110,17.997102975845337,94,17.09163522720337 49,94,79.32447719573975,106,10.254770755767822,94,16.159439086914062 50,63,81.13122916221619,85,19.498087406158447,80,17.062880754470825 51,64,78.95552730560303,73,11.968117952346802,80,16.42030930519104 52,64,80.55506777763367,83,19.04611301422119,80,17.044256925582886 53,64,80.00666880607605,79,11.748615503311157,80,16.27301836013794 54,66,80.77283143997192,88,11.66093134880066,80,16.391525506973267 55,64,82.00660061836243,88,24.136553049087524,80,17.06270742416382 56,66,81.55947065353394,85,16.429919242858887,80,16.999276161193848 57,63,81.114262342453,87,22.340787172317505,80,17.043132543563843 58,63,81.04138517379761,78,18.89263892173767,80,17.08504867553711 59,64,80.52739334106445,93,13.616387367248535,80,16.57269597053528 60,82,86.7540352344513,99,13.066121578216553,94,16.544466018676758 61,81,87.89562153816223,104,25.433122873306274,94,17.053762197494507 62,82,86.26548409461975,99,12.918514966964722,94,16.66313886642456 63,83,87.24347186088562,106,21.98454189300537,94,17.103214263916016 64,82,86.60797476768494,98,12.473426580429077,94,16.57561159133911 65,84,87.11871242523193,103,10.223820686340332,94,16.257148027420044 66,81,88.71003365516663,94,24.597357988357544,94,17.143748998641968 67,82,86.51209807395935,96,14.048967838287354,94,16.72418999671936 68,81,86.93004393577576,91,17.69091010093689,94,17.053284645080566 69,81,87.69031119346619,89,13.707175254821777,94,16.6680109500885 70,79,85.87792015075684,86,9.261616945266724,91,16.168455839157104 71,79,88.14101362228394,96,23.5689058303833,91,17.132070302963257 72,79,88.25272393226624,96,26.78891372680664,91,17.096915006637573 73,79,89.53712511062622,96,28.207839250564575,91,17.064720630645752 74,79,87.57802844047546,97,14.012932777404785,91,16.740302085876465 75,79,87.26567029953003,86,12.413164615631104,91,16.392486333847046 76,79,86.66206288337708,89,11.749331951141357,91,16.469265460968018 77,79,87.8359739780426,85,17.515364170074463,91,17.102705717086792 78,80,88.0450918674469,94,23.927947282791138,91,17.082552194595337 79,79,87.65122652053833,93,17.091141939163208,91,17.03953456878662 80,74,84.19559216499329,91,26.049842834472656,83,16.9984188079834 81,75,82.51725816726685,85,16.961402416229248,83,17.110384464263916 82,75,82.8316662311554,89,23.545618057250977,83,17.114362478256226 83,73,82.98265886306763,84,23.422379732131958,83,17.07512879371643 84,73,81.70284986495972,92,18.57567572593689,83,17.09586262702942 85,76,83.29867267608643,90,24.692944288253784,83,17.056284427642822 86,76,81.04190707206726,93,11.550926208496094,83,16.41378617286682 87,74,82.97272729873657,94,18.214133262634277,83,17.02610754966736 88,72,81.47475123405457,86,10.687882900238037,83,16.277140855789185 89,73,82.54533529281616,89,17.926280975341797,83,17.130127668380737 90,88,89.74022126197815,128,20.152071475982666,107,17.193649530410767 91,88,88.83557057380676,115,14.007786512374878,107,16.752809762954712 92,89,89.99075937271118,120,21.46599245071411,107,17.087929248809814 93,89,90.57273888587952,116,22.19508957862854,107,17.231003284454346 94,91,89.41200828552246,109,13.885975360870361,107,16.87487244606018 95,88,89.83750343322754,115,20.845364570617676,107,17.157454013824463 96,91,91.00219821929932,124,27.728176832199097,107,17.07404375076294 97,93,91.0686559677124,117,25.557191371917725,107,17.23515510559082 98,87,90.42128872871399,112,15.197402715682983,107,16.944777965545654 99,90,89.05649876594543,100,14.382555484771729,107,16.822237491607666