[
  {
    "path": ".gitignore",
    "content": "__pycache__"
  },
  {
    "path": "aco.py",
    "content": "import random\nimport numpy as np\nimport math\nimport time\nimport os\n\nclass ACO():\n    def __init__(self, vehicle_num, target_num,vehicle_speed, target, time_lim):\n        self.num_type_ant = vehicle_num\n        self.num_city = target_num+1 #number of cities\n        self.group = 200\n        self.num_ant = self.group*self.num_type_ant #number of ants\n        self.ant_vel = vehicle_speed\n        self.cut_time = time_lim\n        self.oneee = np.zeros((4,1))\n        self.target = target\n        self.alpha = 1 #pheromone \n        self.beta = 2  \n        self.k1 = 0.03\n        self.iter_max = 150\n    #matrix of the distances between cities \n    def distance_matrix(self):\n        dis_mat = []\n        for i in range(self.num_city):\n            dis_mat_each = []\n            for j in range(self.num_city):\n                dis = math.sqrt(pow(self.target[i][0]-self.target[j][0],2)+pow(self.target[i][1]-self.target[j][1],2))\n                dis_mat_each.append(dis)\n            dis_mat.append(dis_mat_each)\n        return dis_mat\n    def run(self):\n        print(\"ACO start, pid: %s\" % os.getpid())\n        start_time = time.time()\n        #distances of nodes\n        dis_list = self.distance_matrix()\n        dis_mat = np.array(dis_list)\n        value_init = self.target[:,2].transpose()\n        delay_init = self.target[:,3].transpose()        \n        pheromone_mat = np.ones((self.num_type_ant,self.num_city,self.num_city))\n        #velocity of ants\n        path_new = [[0]for i in range (self.num_type_ant)]\n        count_iter = 0\n        while count_iter < self.iter_max:\n            path_sum = np.zeros((self.num_ant,1))\n            time_sum = np.zeros((self.num_ant,1))\n            value_sum = np.zeros((self.num_ant,1))\n            path_mat=[[0]for i in range (self.num_ant)]\n            value = np.zeros((self.group,1))\n            atten = np.ones((self.num_type_ant,1)) * 0.2\n            for ant in range(self.num_ant):\n                ant_type = ant % self.num_type_ant\n                visit = 0\n                if ant_type == 0:\n                    unvisit_list=list(range(1,self.num_city))#have not visit\n                for j in range(1,self.num_city):\n            #choice of next city\n                    trans_list=[]\n                    tran_sum=0\n                    trans=0\n                    #if len(unvisit_list)==0:\n                        #print('len(unvisit_list)==0')\n                    for k in range(len(unvisit_list)):  # to decide which node to visit\n                        trans +=np.power(pheromone_mat[ant_type][visit][unvisit_list[k]],self.alpha)*np.power(value_init[unvisit_list[k]]*self.ant_vel[ant_type]/(dis_mat[visit][unvisit_list[k]]*delay_init[unvisit_list[k]]),self.beta)\n                        #trans +=np.power(pheromone_mat[ant_type][unvisit_list[k]],self.alpha)*np.power(0.05*value_init[unvisit_list[k]],self.beta)\n                        trans_list.append(trans)\n                    tran_sum = trans        \n                    rand = random.uniform(0,tran_sum)\n                    for t in range(len(trans_list)):\n                        if(rand <= trans_list[t]):\n                            visit_next = unvisit_list[t]\n                            break\n                        else:        \n                            continue\n                    path_mat[ant].append(visit_next)\n                    path_sum[ant] += dis_mat[path_mat[ant][j-1]][path_mat[ant][j]]\n                    time_sum[ant] += path_sum[ant] / self.ant_vel[ant_type] + delay_init[visit_next]\n                    if time_sum[ant] > self.cut_time:\n                        time_sum[ant]-=path_sum[ant] / self.ant_vel[ant_type] + delay_init[visit_next]                      \n                        path_mat[ant].pop()                \n                        break\n                    value_sum[ant] += value_init[visit_next]\n                    unvisit_list.remove(visit_next)#update\n                    visit = visit_next\n                if (ant_type) == self.num_type_ant-1:\n                    small_group = int(ant/self.num_type_ant)\n                    for k in range (self.num_type_ant):\n                        value[small_group]+= value_sum[ant-k]\n            #iteration\n            if count_iter == 0:\n                value_new = max(value)\n                value = value.tolist()\n                for k in range (0,self.num_type_ant):\n                    path_new[k] = path_mat[value.index(value_new)*self.num_type_ant+k]\n                    path_new[k].remove(0)\n            else:\n                if max(value) > value_new:\n                    value_new = max(value)\n                    value = value.tolist()\n                    for k in range (0,self.num_type_ant):\n                        path_new[k] = path_mat[value.index(value_new)*self.num_type_ant+k]\n                        path_new[k].remove(0)\n\n            #update pheromone\n            pheromone_change = np.zeros((self.num_type_ant,self.num_city,self.num_city))\n            for i in range(self.num_ant):\n                length = len(path_mat[i])\n                m = i%self.num_type_ant\n                n = int(i/self.num_type_ant)\n                for j in range(length-1):   \n                    pheromone_change[m][path_mat[i][j]][path_mat[i][j+1]]+= value_init[path_mat[i][j+1]]*self.ant_vel[m]/(dis_mat[path_mat[i][j]][path_mat[i][j+1]]*delay_init[path_mat[i][j+1]])\n                atten[m] += (value_sum[i]/(np.power((value_new-value[n]),4)+1))/self.group\n\n            for k in range (self.num_type_ant):\n                pheromone_mat[k]=(1-atten[k])*pheromone_mat[k]+pheromone_change[k]\n            count_iter += 1\n\n        print(\"ACO result:\", path_new)\n        end_time = time.time()\n        print(\"ACO time:\", end_time - start_time)\n        return path_new, end_time - start_time\n\n"
  },
  {
    "path": "evaluate.py",
    "content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport pandas as pd\nimport copy\nfrom multiprocessing import Pool\nfrom ga import GA\nfrom aco import ACO\nfrom pso import PSO\n\nclass Env():\n    def __init__(self, vehicle_num, target_num, map_size, visualized=True, time_cost=None, repeat_cost=None):\n        self.vehicles_position = np.zeros(vehicle_num,dtype=np.int32)\n        self.vehicles_speed = np.zeros(vehicle_num,dtype=np.int32)\n        self.targets = np.zeros(shape=(target_num+1,4),dtype=np.int32)\n        if vehicle_num==5:\n            self.size='small'\n        if vehicle_num==10:\n            self.size='medium'\n        if vehicle_num==15:\n            self.size='large'\n        self.map_size = map_size\n        self.speed_range = [10, 15, 30]\n        #self.time_lim = 1e6\n        self.time_lim = self.map_size / self.speed_range[1]\n        self.vehicles_lefttime = np.ones(vehicle_num,dtype=np.float32) * self.time_lim\n        self.distant_mat = np.zeros((target_num+1,target_num+1),dtype=np.float32)\n        self.total_reward = 0\n        self.reward = 0\n        self.visualized = visualized\n        self.time = 0\n        self.time_cost = time_cost\n        self.repeat_cost = repeat_cost\n        self.end = False\n        self.assignment = [[] for i in range(vehicle_num)]\n        self.task_generator()\n        \n    def task_generator(self):\n        for i in range(self.vehicles_speed.shape[0]):\n            choose = random.randint(0,2)\n            self.vehicles_speed[i] = self.speed_range[choose]\n        for i in range(self.targets.shape[0]-1):\n            self.targets[i+1,0] = random.randint(1,self.map_size) - 0.5*self.map_size # x position\n            self.targets[i+1,1] = random.randint(1,self.map_size) - 0.5*self.map_size # y position\n            self.targets[i+1,2] = random.randint(1,10) # reward\n            self.targets[i+1,3] = random.randint(5,30) # time consumption to finish the mission  \n        for i in range(self.targets.shape[0]):\n            for j in range(self.targets.shape[0]):\n                self.distant_mat[i,j] = np.linalg.norm(self.targets[i,:2]-self.targets[j,:2])\n        self.targets_value = copy.deepcopy((self.targets[:,2]))\n        \n    def step(self, action):\n        count = 0\n        for j in range(len(action)):\n            k = action[j]\n            delta_time = self.distant_mat[self.vehicles_position[j],k] / self.vehicles_speed[j] + self.targets[k,3]\n            self.vehicles_lefttime[j] = self.vehicles_lefttime[j] - delta_time\n            if self.vehicles_lefttime[j] < 0:\n                count = count + 1\n                continue\n            else:\n                if k == 0:\n                    self.reward = - self.repeat_cost\n                else:\n                    self.reward = self.targets[k,2] - delta_time * self.time_cost + self.targets[k,2]\n                    if self.targets[k,2] == 0:\n                        self.reward = self.reward - self.repeat_cost\n                    self.vehicles_position[j] = k\n                    self.targets[k,2] = 0\n                self.total_reward = self.total_reward + self.reward\n            self.assignment[j].append(action)\n        if count == len(action):\n            self.end = True\n        \n    def run(self, assignment, algorithm, play, rond):\n        self.assignment = assignment\n        self.algorithm = algorithm\n        self.play = play\n        self.rond = rond\n        self.get_total_reward()\n        if self.visualized:\n            self.visualize()        \n            \n    def reset(self):\n        self.vehicles_position = np.zeros(self.vehicles_position.shape[0],dtype=np.int32)\n        self.vehicles_lefttime = np.ones(self.vehicles_position.shape[0],dtype=np.float32) * self.time_lim\n        self.targets[:,2] = self.targets_value\n        self.total_reward = 0\n        self.reward = 0\n        self.end = False\n        \n    def get_total_reward(self):\n        for i in range(len(self.assignment)):\n            speed = self.vehicles_speed[i]\n            for j in range(len(self.assignment[i])):\n                position = self.targets[self.assignment[i][j],:4]\n                self.total_reward = self.total_reward + position[2]\n                if j == 0:\n                    self.vehicles_lefttime[i] = self.vehicles_lefttime[i] - np.linalg.norm(position[:2]) / speed - position[3]\n                else:\n                    self.vehicles_lefttime[i] = self.vehicles_lefttime[i] - np.linalg.norm(position[:2]-position_last[:2]) / speed - position[3]\n                position_last = position\n                if self.vehicles_lefttime[i] > self.time_lim:\n                    self.end = True\n                    break\n            if self.end:\n                self.total_reward = 0\n                break\n            \n    def visualize(self):\n        if self.assignment == None:\n            plt.scatter(x=0,y=0,s=200,c='k')\n            plt.scatter(x=self.targets[1:,0],y=self.targets[1:,1],s=self.targets[1:,2]*10,c='r')\n            plt.title('Target distribution')\n            plt.savefig('task_pic/'+self.size+'/'+self.algorithm+ \"-%d-%d.png\" % (self.play,self.rond))\n            plt.cla()\n        else:\n            plt.title('Task assignment by '+self.algorithm +', total reward : '+str(self.total_reward))     \n            plt.scatter(x=0,y=0,s=200,c='k')\n            plt.scatter(x=self.targets[1:,0],y=self.targets[1:,1],s=self.targets[1:,2]*10,c='r')\n            for i in range(len(self.assignment)):\n                trajectory = np.array([[0,0,20]])\n                for j in range(len(self.assignment[i])):\n                    position = self.targets[self.assignment[i][j],:3]\n                    trajectory = np.insert(trajectory,j+1,values=position,axis=0)  \n                plt.scatter(x=trajectory[1:,0],y=trajectory[1:,1],s=trajectory[1:,2]*10,c='b')\n                plt.plot(trajectory[:,0], trajectory[:,1]) \n            plt.savefig('task_pic/'+self.size+'/'+self.algorithm+ \"-%d-%d.png\" % (self.play,self.rond))\n            plt.cla()\n            \ndef evaluate(vehicle_num, target_num, map_size):\n    if vehicle_num==5:\n        size='small'\n    if vehicle_num==10:\n        size='medium'\n    if vehicle_num==15:\n        size='large'\n    re_ga=[[] for i in range(10)]\n    re_aco=[[] for i in range(10)]\n    re_pso=[[] for i in range(10)]\n    for i in range(10):\n        env = Env(vehicle_num,target_num,map_size,visualized=True)\n        for j in range(10):\n            p=Pool(3)\n            ga = GA(vehicle_num,env.vehicles_speed,target_num,env.targets,env.time_lim)\n            aco = ACO(vehicle_num,target_num,env.vehicles_speed,env.targets,env.time_lim)\n            pso = PSO(vehicle_num,target_num ,env.targets,env.vehicles_speed,env.time_lim)\n            ga_result=p.apply_async(ga.run)\n            aco_result=p.apply_async(aco.run)\n            pso_result=p.apply_async(pso.run)\n            p.close()\n            p.join()\n            ga_task_assignmet = ga_result.get()[0]\n            env.run(ga_task_assignmet,'GA',i+1,j+1)\n            re_ga[i].append((env.total_reward,ga_result.get()[1]))\n            env.reset()\n            aco_task_assignmet = aco_result.get()[0]\n            env.run(aco_task_assignmet,'ACO',i+1,j+1)\n            re_aco[i].append((env.total_reward,aco_result.get()[1]))\n            env.reset()\n            pso_task_assignmet = pso_result.get()[0]\n            env.run(pso_task_assignmet,'PSO',i+1,j+1)\n            re_pso[i].append((env.total_reward,pso_result.get()[1]))\n            env.reset()\n    x_index=np.arange(10)\n    ymax11=[]\n    ymax12=[]\n    ymax21=[]\n    ymax22=[]\n    ymax31=[]\n    ymax32=[]\n    ymean11=[]\n    ymean12=[]\n    ymean21=[]\n    ymean22=[]\n    ymean31=[]\n    ymean32=[]\n    for i in range(10):\n        tmp1=[re_ga[i][j][0] for j in range(10)]\n        tmp2=[re_ga[i][j][1] for j in range(10)]\n        ymax11.append(np.amax(tmp1))\n        ymax12.append(np.amax(tmp2))\n        ymean11.append(np.mean(tmp1))\n        ymean12.append(np.mean(tmp2))\n        tmp1=[re_aco[i][j][0] for j in range(10)]\n        tmp2=[re_aco[i][j][1] for j in range(10)]\n        ymax21.append(np.amax(tmp1))\n        ymax22.append(np.amax(tmp2))\n        ymean21.append(np.mean(tmp1))\n        ymean22.append(np.mean(tmp2))\n        tmp1=[re_pso[i][j][0] for j in range(10)]\n        tmp2=[re_pso[i][j][1] for j in range(10)]\n        ymax31.append(np.amax(tmp1))\n        ymax32.append(np.amax(tmp2))\n        ymean31.append(np.mean(tmp1))\n        ymean32.append(np.mean(tmp2))\n    rects1=plt.bar(x_index,ymax11,width=0.1,color='b',label='ga_max_reward')\n    rects2=plt.bar(x_index+0.1,ymax21,width=0.1,color='r',label='aco_max_reward')\n    rects3=plt.bar(x_index+0.2,ymax31,width=0.1,color='g',label='pso_max_reward')\n    plt.xticks(x_index+0.1,x_index)\n    plt.legend()\n    plt.title('max_reward_for_'+size+'_size')\n    plt.savefig('max_reward_'+size+'.png')\n    plt.cla()\n    \n    rects1=plt.bar(x_index,ymax12,width=0.1,color='b',label='ga_max_time')\n    rects2=plt.bar(x_index+0.1,ymax22,width=0.1,color='r',label='aco_max_time')\n    rects3=plt.bar(x_index+0.2,ymax32,width=0.1,color='g',label='pso_max_time')\n    plt.xticks(x_index+0.1,x_index)\n    plt.legend()\n    plt.title('max_time_for_'+size+'_size')\n    plt.savefig('max_time_'+size+'.png')\n    plt.cla()\n    \n    rects1=plt.bar(x_index,ymean11,width=0.1,color='b',label='ga_mean_reward')\n    rects2=plt.bar(x_index+0.1,ymean21,width=0.1,color='r',label='aco_mean_reward')\n    rects3=plt.bar(x_index+0.2,ymean31,width=0.1,color='g',label='pso_mean_reward')\n    plt.xticks(x_index+0.1,x_index)\n    plt.legend()\n    plt.title('mean_reward_for_'+size+'_size')\n    plt.savefig('mean_reward_'+size+'.png')\n    plt.cla()\n    \n    rects1=plt.bar(x_index,ymean12,width=0.1,color='b',label='ga_mean_time')\n    rects2=plt.bar(x_index+0.1,ymean22,width=0.1,color='r',label='aco_mean_time')\n    rects3=plt.bar(x_index+0.2,ymean32,width=0.1,color='g',label='pso_mean_time')\n    plt.xticks(x_index+0.1,x_index)\n    plt.legend()\n    plt.title('mean_time_for_'+size+'_size')\n    plt.savefig('mean_time_'+size+'.png')\n    plt.cla()\n    \n    t_ga=[]\n    r_ga=[]\n    t_aco=[]\n    r_aco=[]\n    t_pso=[]\n    r_pso=[]\n    for i in range(10):\n        for j in range(10):\n            t_ga.append(re_ga[i][j][1])\n            r_ga.append(re_ga[i][j][0])\n            t_aco.append(re_aco[i][j][1])\n            r_aco.append(re_aco[i][j][0])\n            t_pso.append(re_pso[i][j][1])\n            r_pso.append(re_pso[i][j][0])\n    dataframe = pd.DataFrame({'ga_time':t_ga,'ga_reward':r_ga,'aco_time':t_aco,'aco_reward':r_aco,'pso_time':t_pso,'pso_reward':r_pso})\n    dataframe.to_csv(size+'_size_result.csv',sep=',')\n    \n    \nif __name__=='__main__':\n    # small scale\n    evaluate(5,30,5e3)\n    # medium scale\n    evaluate(10,60,1e4)\n    # large scale\n    evaluate(15,90,1.5e4)\n"
  },
  {
    "path": "ga.py",
    "content": "import numpy as np\nimport random\nimport time\nimport os\n\n\nclass GA():\n    def __init__(self, vehicle_num, vehicles_speed, target_num, targets, time_lim):\n        # vehicles_speed,targets in the type of narray\n        self.vehicle_num = vehicle_num\n        self.vehicles_speed = vehicles_speed\n        self.target_num = target_num\n        self.targets = targets\n        self.time_lim = time_lim\n        self.map = np.zeros(shape=(target_num+1, target_num+1), dtype=float)\n        self.pop_size = 50\n        self.p_cross = 0.6\n        self.p_mutate = 0.005\n        for i in range(target_num+1):\n            self.map[i, i] = 0\n            for j in range(i):\n                self.map[j, i] = self.map[i, j] = np.linalg.norm(\n                    targets[i, :2]-targets[j, :2])\n        self.pop = np.zeros(\n            shape=(self.pop_size, vehicle_num-1+target_num-1), dtype=np.int32)\n        self.ff = np.zeros(self.pop_size, dtype=float)\n        for i in range(self.pop_size):\n            for j in range(vehicle_num-1):\n                self.pop[i, j] = random.randint(0, target_num)\n            for j in range(target_num-1):\n                self.pop[i, vehicle_num+j -\n                         1] = random.randint(0, target_num-j-1)\n            self.ff[i] = self.fitness(self.pop[i, :])\n        self.tmp_pop = np.array([])\n        self.tmp_ff = np.array([])\n        self.tmp_size = 0\n\n    def fitness(self, gene):\n        ins = np.zeros(self.target_num+1, dtype=np.int32)\n        seq = np.zeros(self.target_num, dtype=np.int32)\n        ins[self.target_num] = 1\n        for i in range(self.vehicle_num-1):\n            ins[gene[i]] += 1\n        rest = np.array(range(1, self.target_num+1))\n        for i in range(self.target_num-1):\n            seq[i] = rest[gene[i+self.vehicle_num-1]]\n            rest = np.delete(rest, gene[i+self.vehicle_num-1])\n        seq[self.target_num-1] = rest[0]\n        i = 0  # index of vehicle\n        pre = 0  # index of last target\n        post = 0  # index of ins/seq\n        t = 0\n        reward = 0\n        while i < self.vehicle_num:\n            if ins[post] > 0:\n                i += 1\n                ins[post] -= 1\n                pre = 0\n                t = 0\n            else:\n                t += self.targets[pre, 3]\n                past = self.map[pre, seq[post]]/self.vehicles_speed[i]\n                t += past\n                if t < self.time_lim:\n                    reward += self.targets[seq[post], 2]\n                pre = seq[post]\n                post += 1\n        return reward\n\n    def selection(self):\n        roll = np.zeros(self.tmp_size, dtype=float)\n        roll[0] = self.tmp_ff[0]\n        for i in range(1, self.tmp_size):\n            roll[i] = roll[i-1]+self.tmp_ff[i]\n        for i in range(self.pop_size):\n            xx = random.uniform(0, roll[self.tmp_size-1])\n            j = 0\n            while xx > roll[j]:\n                j += 1\n            self.pop[i, :] = self.tmp_pop[j, :]\n            self.ff[i] = self.tmp_ff[j]\n\n    def mutation(self):\n        for i in range(self.tmp_size):\n            flag = False\n            for j in range(self.vehicle_num-1):\n                if random.random() < self.p_mutate:\n                    self.tmp_pop[i, j] = random.randint(0, self.target_num)\n                    flag = True\n            for j in range(self.target_num-1):\n                if random.random() < self.p_mutate:\n                    self.tmp_pop[i, self.vehicle_num+j -\n                                 1] = random.randint(0, self.target_num-j-1)\n                    flag = True\n            if flag:\n                self.tmp_ff[i] = self.fitness(self.tmp_pop[i, :])\n\n    def crossover(self):\n        new_pop = []\n        new_ff = []\n        new_size = 0\n        for i in range(0, self.pop_size, 2):\n            if random.random() < self.p_cross:\n                x1 = random.randint(0, self.vehicle_num-2)\n                x2 = random.randint(0, self.target_num-2)+self.vehicle_num\n                g1 = self.pop[i, :]\n                g2 = self.pop[i+1, :]\n                g1[x1:x2] = self.pop[i+1, x1:x2]\n                g2[x1:x2] = self.pop[i, x1:x2]\n                new_pop.append(g1)\n                new_pop.append(g2)\n                new_ff.append(self.fitness(g1))\n                new_ff.append(self.fitness(g2))\n                new_size += 2\n        self.tmp_size = self.pop_size+new_size\n        self.tmp_pop = np.zeros(\n            shape=(self.tmp_size, self.vehicle_num-1+self.target_num-1), dtype=np.int32)\n        self.tmp_pop[0:self.pop_size, :] = self.pop\n        self.tmp_pop[self.pop_size:self.tmp_size, :] = np.array(new_pop)\n        self.tmp_ff = np.zeros(self.tmp_size, dtype=float)\n        self.tmp_ff[0:self.pop_size] = self.ff\n        self.tmp_ff[self.pop_size:self.tmp_size] = np.array(new_ff)\n\n    def run(self):\n        print(\"GA start, pid: %s\" % os.getpid())\n        start_time = time.time()\n        cut = 0\n        count = 0\n        while count < 500:\n            self.crossover()\n            self.mutation()\n            self.selection()\n            new_cut = self.tmp_ff.max()\n            if cut < new_cut:\n                cut = new_cut\n                count = 0\n                gene = self.tmp_pop[np.argmax(self.tmp_ff)]\n            else:\n                count += 1\n\n        ins = np.zeros(self.target_num+1, dtype=np.int32)\n        seq = np.zeros(self.target_num, dtype=np.int32)\n        ins[self.target_num] = 1\n        for i in range(self.vehicle_num-1):\n            ins[gene[i]] += 1\n        rest = np.array(range(1, self.target_num+1))\n        for i in range(self.target_num-1):\n            seq[i] = rest[gene[i+self.vehicle_num-1]]\n            rest = np.delete(rest, gene[i+self.vehicle_num-1])\n        seq[self.target_num-1] = rest[0]\n        task_assignment = [[] for i in range(self.vehicle_num)]\n        i = 0  # index of vehicle\n        pre = 0  # index of last target\n        post = 0  # index of ins/seq\n        t = 0\n        reward = 0\n        while i < self.vehicle_num:\n            if ins[post] > 0:\n                i += 1\n                ins[post] -= 1\n                pre = 0\n                t = 0\n            else:\n                t += self.targets[pre, 3]\n                past = self.map[pre, seq[post]]/self.vehicles_speed[i]\n                t += past\n                if t < self.time_lim:\n                    task_assignment[i].append(seq[post])\n                    reward += self.targets[seq[post], 2]\n                pre = seq[post]\n                post += 1\n        print(\"GA result:\", task_assignment)\n        end_time = time.time()\n        print(\"GA time:\", end_time - start_time)\n        return task_assignment, end_time - start_time\n\n"
  },
  {
    "path": "large_size_result.csv",
    "content": ",aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time\n0,296,908.7814054489136,247,55.92802929878235,263,475.8543794155121\n1,291,915.7366240024567,259,66.25442147254944,263,472.1400876045227\n2,292,917.8597526550293,254,74.90953588485718,263,474.662939786911\n3,289,929.387636423111,260,103.34872436523438,263,478.1414213180542\n4,289,923.429899930954,251,60.22564744949341,263,468.2993767261505\n5,288,921.4861361980438,256,101.24155569076538,263,480.2911822795868\n6,293,899.2834107875824,255,80.45500588417053,263,467.8873429298401\n7,289,920.9990880489349,258,104.65078663825989,263,473.22990322113037\n8,289,915.464262008667,252,55.428141355514526,263,470.7831304073334\n9,292,908.4859659671783,242,54.579482316970825,263,468.63810992240906\n10,273,951.7187411785126,249,53.21723532676697,267,471.1878535747528\n11,273,967.2745745182037,265,127.98299217224121,267,489.7042224407196\n12,270,974.3173124790192,260,195.45258331298828,267,495.3276345729828\n13,275,963.2233729362488,246,50.30011963844299,267,470.9858467578888\n14,271,962.010968208313,248,73.91375541687012,267,471.8012545108795\n15,271,962.2612085342407,252,77.44514584541321,267,481.26786375045776\n16,267,945.5351057052612,245,58.64225435256958,267,477.9602701663971\n17,277,948.2806112766266,255,67.99747490882874,267,468.89854645729065\n18,271,967.6417164802551,252,129.3072385787964,267,478.41221261024475\n19,274,963.3537228107452,253,133.60208249092102,267,479.5689525604248\n20,290,966.2115695476532,250,122.1526083946228,260,489.93380999565125\n21,287,943.0081570148468,250,62.05885910987854,260,477.671777009964\n22,290,947.1903564929962,254,59.37166452407837,260,467.77480578422546\n23,286,959.2566442489624,276,91.52135014533997,260,477.249960899353\n24,288,954.6075274944305,268,48.26884913444519,260,464.37573313713074\n25,284,936.1187009811401,273,57.24583983421326,260,477.16314125061035\n26,286,954.3773159980774,259,82.08687591552734,260,475.90100502967834\n27,289,949.4377288818359,254,53.36060166358948,260,470.7586085796356\n28,290,952.0964720249176,273,65.8470516204834,260,474.4114272594452\n29,290,944.5154075622559,275,43.74000549316406,260,468.4188332557678\n30,327,994.9844787120819,294,81.43704128265381,301,474.0977404117584\n31,323,1018.6526775360107,273,114.55374956130981,301,498.5897653102875\n32,321,1009.1453545093536,285,94.25002026557922,301,486.3076343536377\n33,327,1019.1480383872986,278,55.407536029815674,301,488.1904435157776\n34,325,1007.914253950119,293,83.80115604400635,301,491.72301745414734\n35,325,1024.5869517326355,282,148.9419755935669,301,497.10984230041504\n36,323,1020.457249879837,295,108.69291090965271,301,496.7013669013977\n37,326,1013.691241979599,271,73.25992369651794,301,494.8483748435974\n38,323,1020.1873610019684,278,48.434046030044556,301,490.01100039482117\n39,325,1021.3731291294098,292,115.58995175361633,301,488.44017720222473\n40,278,976.9366610050201,264,138.9213318824768,273,499.79298758506775\n41,275,965.3231558799744,262,145.9869430065155,273,498.0558907985687\n42,280,962.862530708313,271,91.95361614227295,273,489.8705041408539\n43,279,959.0885939598083,236,54.67619323730469,273,486.26035809516907\n44,276,973.558468580246,255,105.7680230140686,273,491.402090549469\n45,279,967.0545673370361,248,83.93505239486694,273,485.08744978904724\n46,276,957.583824634552,239,93.60761904716492,273,494.0147354602814\n47,275,965.79727602005,264,104.30339407920837,273,489.8355438709259\n48,280,971.2357912063599,247,81.43815469741821,273,485.04322052001953\n49,278,973.0727701187134,254,91.01016688346863,273,489.8650426864624\n50,291,952.5716059207916,250,101.36161661148071,275,494.92200326919556\n51,294,946.210232257843,254,66.17070937156677,275,481.7474868297577\n52,294,946.330258846283,256,88.53258848190308,275,489.36784863471985\n53,294,940.2625517845154,248,63.820109605789185,275,485.3837275505066\n54,291,951.8322811126709,256,68.07165431976318,275,488.7123284339905\n55,299,958.7923038005829,265,68.89578294754028,275,491.05396008491516\n56,296,951.3731620311737,245,73.54164481163025,275,486.9369945526123\n57,291,958.3713037967682,258,89.91634559631348,275,491.3002550601959\n58,290,945.0339353084564,246,56.91977286338806,275,481.18742632865906\n59,291,947.7742516994476,261,71.559574842453,275,481.191358089447\n60,305,981.3974587917328,291,92.71349763870239,269,488.7560610771179\n61,304,957.5966999530792,275,82.32165241241455,269,491.32160925865173\n62,307,968.3465480804443,266,79.71064329147339,269,492.24424958229065\n63,309,978.8897063732147,269,78.40533828735352,269,490.47363781929016\n64,305,976.8462386131287,263,96.9474766254425,269,497.1219482421875\n65,310,973.8594441413879,257,72.08272051811218,269,493.34489607810974\n66,306,964.727823972702,276,96.79535627365112,269,496.8235650062561\n67,309,980.2682957649231,271,110.70756220817566,269,489.6940174102783\n68,304,985.0895121097565,266,146.9937801361084,269,510.74099040031433\n69,304,970.6574778556824,259,80.50431251525879,269,480.4831213951111\n70,331,1002.6550228595734,276,71.99679160118103,282,486.46082282066345\n71,330,1068.6351492404938,297,123.51885652542114,282,506.458402633667\n72,332,1023.2077965736389,285,78.46024966239929,282,492.73976039886475\n73,331,1017.5172808170319,280,48.53227877616882,282,495.07808446884155\n74,332,1011.5874664783478,309,67.04316973686218,282,494.5928440093994\n75,330,1028.534333705902,274,72.79688119888306,282,492.562472820282\n76,332,996.4580583572388,310,69.33413505554199,282,489.1693527698517\n77,332,1006.4915940761566,293,63.24198341369629,282,486.631254196167\n78,332,1007.7951982021332,278,54.046175479888916,282,487.0864179134369\n79,327,1008.2880766391754,306,101.87895131111145,282,491.83132791519165\n80,342,1024.3724205493927,311,84.77551889419556,307,490.72554993629456\n81,346,1037.724690914154,321,93.26548743247986,307,497.81773042678833\n82,345,1045.6583635807037,300,86.76090788841248,307,484.0392985343933\n83,344,1043.5157623291016,310,91.16247344017029,307,490.33659172058105\n84,344,1042.54065823555,322,59.16938662528992,307,484.3666524887085\n85,344,1039.1224558353424,301,86.92423415184021,307,493.7003331184387\n86,340,1043.6719121932983,320,77.55270719528198,307,483.9702203273773\n87,344,1042.995453596115,306,104.2813949584961,307,494.0151512622833\n88,347,1051.3341484069824,321,152.23194694519043,307,496.49487829208374\n89,349,1040.0653417110443,349,86.07370352745056,307,498.4700348377228\n90,327,981.0026612281799,277,82.25102162361145,310,486.8569030761719\n91,327,981.1793773174286,291,83.74289011955261,310,487.12745547294617\n92,327,976.752777338028,306,72.23682999610901,310,489.5701353549957\n93,326,980.6637990474701,300,54.929299116134644,310,483.11501002311707\n94,322,969.3974039554596,284,70.095294713974,310,484.54920268058777\n95,324,970.2287967205048,277,90.13793587684631,310,491.29523491859436\n96,323,978.8446981906891,279,76.42075634002686,310,490.77434039115906\n97,325,968.8402066230774,275,64.63985276222229,310,498.89036893844604\n98,324,979.2253255844116,286,160.1146640777588,310,496.08253359794617\n99,325,982.7923681735992,275,45.25343370437622,310,482.38406586647034\n"
  },
  {
    "path": "medium_size_result.csv",
    "content": ",aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time\n0,216,448.3831191062927,206,49.305721282958984,193,148.66212391853333\n1,215,450.0390589237213,201,47.92679977416992,193,148.18085885047913\n2,215,446.2680039405823,197,44.56689190864563,193,147.05394744873047\n3,214,439.6638705730438,206,30.478964805603027,193,145.65527486801147\n4,216,446.25123286247253,213,49.142014026641846,193,146.94575667381287\n5,215,449.3586504459381,191,35.995574951171875,193,147.08190488815308\n6,215,444.68290305137634,199,47.1325957775116,193,149.1070830821991\n7,215,440.68888783454895,191,40.503169775009155,193,146.32232356071472\n8,216,444.11085629463196,207,30.60081696510315,193,145.07146883010864\n9,216,442.4932496547699,203,41.649266719818115,193,148.24117517471313\n10,181,394.1740880012512,182,41.235496282577515,179,145.92154598236084\n11,179,398.2101173400879,182,58.84644627571106,179,149.12411260604858\n12,182,396.67690896987915,185,28.317508935928345,179,146.20850467681885\n13,181,402.9303925037384,178,50.20690608024597,179,150.27634143829346\n14,183,399.2702867984772,198,61.198126792907715,179,150.24325561523438\n15,181,392.7786009311676,193,27.70851445198059,179,145.60873198509216\n16,186,394.2873070240021,187,40.75690317153931,179,146.5531108379364\n17,182,400.4948661327362,189,68.48983502388,179,151.8164780139923\n18,181,396.15389823913574,176,36.01111102104187,179,146.96473789215088\n19,183,407.0688331127167,177,74.86308264732361,179,152.32446765899658\n20,257,535.1680011749268,242,45.81318497657776,257,162.57035994529724\n21,248,477.0249879360199,257,59.37494111061096,257,151.88625025749207\n22,248,481.47851395606995,248,73.29448699951172,257,153.8765308856964\n23,248,476.1490092277527,251,46.956031799316406,257,150.61592316627502\n24,248,472.28077578544617,245,35.464972496032715,257,147.88373970985413\n25,247,465.2167932987213,248,33.261672258377075,257,147.64871048927307\n26,248,465.4674003124237,243,46.813090562820435,257,149.02625226974487\n27,253,475.7856402397156,253,62.99959635734558,257,152.69138717651367\n28,248,471.5851991176605,249,62.63740587234497,257,151.3696005344391\n29,248,468.71898126602173,242,40.11980128288269,257,149.03825902938843\n30,196,404.4883544445038,190,26.050060272216797,194,144.73951077461243\n31,200,409.98085474967957,196,49.781771659851074,194,148.48935222625732\n32,197,411.36246395111084,197,45.22318196296692,194,149.27837538719177\n33,197,413.2939670085907,198,49.13954973220825,194,149.69017028808594\n34,200,408.55559182167053,208,41.79141283035278,194,149.21917819976807\n35,196,409.9758207798004,189,27.64315676689148,194,147.62873792648315\n36,197,408.1155550479889,194,47.21198105812073,194,147.90398573875427\n37,199,412.9177339076996,217,75.3260293006897,194,152.389484167099\n38,195,413.65162658691406,197,56.222227573394775,194,148.9140920639038\n39,196,406.7831847667694,196,33.56608486175537,194,146.55531525611877\n40,162,376.0339472293854,166,32.595866680145264,170,144.9636528491974\n41,162,377.5476269721985,183,36.15443444252014,170,147.16523098945618\n42,162,380.8216028213501,169,73.12481117248535,170,151.93030548095703\n43,163,377.19195222854614,167,26.03180742263794,170,146.38755416870117\n44,162,375.4290704727173,165,31.497214555740356,170,145.87686395645142\n45,162,381.92375802993774,177,65.6746084690094,170,151.02717685699463\n46,162,377.6920804977417,167,41.89390730857849,170,149.11691117286682\n47,162,379.4388999938965,168,47.881861448287964,170,146.81237983703613\n48,163,386.09958243370056,176,90.63225603103638,170,154.9767725467682\n49,162,376.02294278144836,178,49.68327236175537,170,147.34447741508484\n50,163,395.1006145477295,170,60.02331042289734,166,149.5303943157196\n51,164,398.0407438278198,173,58.35295605659485,166,148.20625829696655\n52,161,387.7398009300232,160,36.16767406463623,166,146.93824124336243\n53,160,397.3074097633362,181,87.42074513435364,166,151.60932040214539\n54,161,394.0010869503021,161,35.39233636856079,166,145.9512619972229\n55,168,397.33205008506775,180,93.66990065574646,166,153.4657781124115\n56,165,391.67463517189026,177,34.15181350708008,166,146.88838911056519\n57,160,394.11439633369446,179,75.18911981582642,166,150.38912177085876\n58,163,391.3193950653076,169,50.267693281173706,166,146.59030938148499\n59,161,389.8388180732727,169,43.60353207588196,166,146.73621797561646\n60,214,458.1954791545868,224,53.538702964782715,213,148.77755308151245\n61,216,457.9216077327728,217,74.3746817111969,213,172.45012044906616\n62,210,419.3990144729614,198,35.978588342666626,213,148.06180047988892\n63,219,429.85972452163696,218,87.86180424690247,213,153.94083642959595\n64,214,424.77522015571594,209,65.07919359207153,213,151.17670392990112\n65,218,418.9645538330078,201,33.109193086624146,213,146.59753608703613\n66,213,419.9968156814575,211,60.90353274345398,213,151.33484530448914\n67,213,419.75107407569885,219,41.44067192077637,213,148.6664433479309\n68,215,418.3867540359497,215,53.756091594696045,213,149.20613074302673\n69,217,426.778751373291,207,55.369181394577026,213,150.33477354049683\n70,199,410.5372402667999,215,60.27366662025452,208,149.8002016544342\n71,198,409.2937562465668,204,59.25821495056152,208,151.39743947982788\n72,196,409.5259928703308,215,35.602235317230225,208,145.2328040599823\n73,201,406.344420671463,223,48.74324607849121,208,148.4908196926117\n74,199,408.3418302536011,220,70.71654105186462,208,151.31224751472473\n75,198,404.72881293296814,194,29.9748272895813,208,145.87402033805847\n76,200,407.07944345474243,199,32.814154624938965,208,148.0822048187256\n77,198,410.07060742378235,221,83.80286979675293,208,152.27385711669922\n78,198,403.81956219673157,205,46.65635085105896,208,149.30409383773804\n79,197,410.29728984832764,225,53.520286560058594,208,148.7795009613037\n80,152,380.2354018688202,157,35.35539889335632,169,147.90625596046448\n81,150,384.5501811504364,159,48.30092000961304,169,149.15482306480408\n82,151,376.554048538208,157,25.061031818389893,169,146.199316740036\n83,152,379.9583477973938,160,32.2420814037323,169,147.88309359550476\n84,154,381.87709069252014,159,46.78597569465637,169,148.42677807807922\n85,150,384.82411456108093,176,71.57334589958191,169,152.0538854598999\n86,154,380.4787724018097,153,32.610721588134766,169,145.2583565711975\n87,154,381.19555377960205,160,35.47093892097473,169,146.1291468143463\n88,154,375.84812235832214,158,32.85135459899902,169,146.2271864414215\n89,150,377.0024824142456,172,31.29044246673584,169,146.2692093849182\n90,188,408.0042040348053,172,33.111896276474,172,146.74985241889954\n91,188,415.29174041748047,183,46.34089708328247,172,148.31623196601868\n92,186,408.48268270492554,171,65.71342968940735,172,151.4460847377777\n93,187,408.3678331375122,173,50.35272192955017,172,149.03712511062622\n94,188,410.88290190696716,177,33.343971252441406,172,147.3882360458374\n95,187,414.69072556495667,181,56.616501331329346,172,149.22642374038696\n96,185,413.3330612182617,178,45.60746383666992,172,148.60242438316345\n97,187,406.33209466934204,181,42.515186071395874,172,146.75966262817383\n98,187,405.92414903640747,183,27.310251474380493,172,145.60900115966797\n99,186,415.5781035423279,190,51.28600215911865,172,146.98746609687805\n"
  },
  {
    "path": "pso.py",
    "content": "# coding: utf-8\nimport numpy as np\nimport random\nimport math\nimport cmath\nimport time\nimport os\n# ----------------------Optimization scheme----------------------------------\n# Optimization ideas：\n# 1. Increase the convergence factor k；\n# 2. Dynamic change of inertia factor W；\n# 3. Using PSO local search algorithm(Ring method)\n# 4. The probability of position variation is added\n# ----------------------Set PSO Parameter---------------------------------\n\n\nclass PSO():\n    def __init__(self, uav_num, target_num, targets, vehicles_speed, time_lim):\n        self.uav_num = uav_num\n        self.dim = target_num\n        self.targets = targets\n        self.vehicles_speed = vehicles_speed\n        self.time_all = time_lim\n        self.pN = 2*(self.uav_num+self.dim)  # Number of particles\n        self.max_iter = 0  # Number of iterations\n        # Target distance list (dim+1）*（dim+1)\n        self.Distance = np.zeros((target_num+1, target_num+1))\n        self.Value = np.zeros(target_num+1)   # Value list of targets 1*dim+1\n        self.Stay_time = []\n        # UAV flight speed matrix\n        self.w = 0.8\n        self.c1 = 2\n        self.c2 = 2\n        self.r1 = 0.6\n        self.r2 = 0.3\n        self.k = 0   # Convergence factor\n        self.wini = 0.9\n        self.wend = 0.4\n\n        self.X = np.zeros((self.pN, self.dim+self.uav_num-1)\n                          )  # Position of all particles\n        self.V = np.zeros((self.pN, self.dim+self.uav_num-1)\n                          )  # Velocity of all particles\n        # The historical optimal position of each individual\n        self.pbest = np.zeros((self.pN, self.dim+self.uav_num-1))\n        self.gbest = np.zeros((1, self.dim+self.uav_num-1))\n        # Global optimal position\n        self.gbest_ring = np.zeros((self.pN, self.dim+self.uav_num-1))\n        # Historical optimal fitness of each individual\n        self.p_fit = np.zeros(self.pN)\n        self.fit = 0  # Global optimal fitness\n        self.ring = []\n        self.ring_fit = np.zeros(self.pN)\n        # variation parameter\n        self.p1 = 0.4  # Probability of mutation\n        self.p2 = 0.5  # Proportion of individuals with variation in population\n        self.p3 = 0.5  # Proportion of locations where variation occurs\n        self.TEST = []\n        self.test_num = 0\n        self.uav_best = []\n\n        self.time_out = np.zeros(self.uav_num)\n        \n        self.cal_time = 0\n    # ------------------Get Initial parameter------------------\n\n    def fun_get_initial_parameter(self):\n        self.max_iter = 40*(self.uav_num+self.dim)\n        if self.max_iter > 4100:\n            self.max_iter = 4100\n\n        # Get Stay_time Arrary & Distance Arrary & Value Arrary\n        Targets = self.targets\n        self.Stay_time = Targets[:, 3]\n        self.Distance = np.zeros((self.dim+1, self.dim+1))\n        self.Value = np.zeros(self.dim+1)\n        for i in range(self.dim+1):\n            self.Value[i] = Targets[i, 2]\n            for j in range(i):\n                self.Distance[i][j] = (\n                    Targets[i, 0]-Targets[j, 0])*(Targets[i, 0]-Targets[j, 0])\n                self.Distance[i][j] = self.Distance[i][j] + \\\n                    (Targets[i, 1]-Targets[j, 1])*(Targets[i, 1]-Targets[j, 1])\n                self.Distance[i][j] = math.sqrt(self.Distance[i][j])\n                self.Distance[j][i] = self.Distance[i][j]\n    # ------------------Transfer_Function---------------------\n\n    def fun_Transfer(self, X):\n        # Converting continuous sequence X into discrete sequence X_path\n        X1 = X[0:self.dim]\n        X_path = []\n        l1 = len(X1)\n        for i in range(l1):\n            m = X1[i]*(self.dim-i)\n            m = math.floor(m)\n            X_path.append(m)\n        # Converting the continuous interpolation sequence X into discrete interpolation sequence X_rank\n        X2 = X[self.dim:]\n        l1 = len(X2)\n        X_rank = []\n        for i in range(l1):\n\n            m = X2[i]*(self.dim+1)\n\n            m1 = math.floor(m)\n            X_rank.append(m1)\n        # Rank and Complement\n        c = sorted(X_rank)\n        l1 = len(c)\n        Rank = []\n        Rank.append(0)\n        for i in range(l1):\n            Rank.append(c[i])\n        Rank.append(self.dim)\n        # Get Separate_Arrary\n        Sep = []\n        for i in range(l1+1):\n            sep = Rank[i+1]-Rank[i]\n            Sep.append(sep)\n        return X_path, Sep\n\n    # -------------------Obtain the Real Flight Path Sequence of Particles--------------------------\n    def position(self, X):\n        Position_All = list(range(1, self.dim+1))\n        X2 = []\n        for i in range(self.dim):\n            m1 = X[i]\n            m1 = int(m1)\n            X2.append(Position_All[m1])\n            del Position_All[m1]\n        return X2\n    # ---------------------Fitness_Computing Function-----------------------------\n\n    def function(self, X):\n        X_path, Sep = self.fun_Transfer(X)\n\n        # Obtain the Real Flight Path Sequence of Particles\n        X = self.position(X_path)\n        # Get the search sequence of each UAV\n        UAV = []\n        l = 0\n        for i in range(self.uav_num):\n            UAV.append([])\n            k = Sep[i]\n            for j in range(k):\n                UAV[i].append(X[l])\n                l = l+1\n\n        # Calculate Fitness\n        fitness = 0\n        for i in range(self.uav_num):\n            k = Sep[i]\n            t = 0\n            for j in range(k):\n                m1 = UAV[i][j]\n\n                if j == 0:\n                    t = t+self.Distance[0, m1] / \\\n                        self.vehicles_speed[i]+self.Stay_time[m1]\n                else:\n                    m1 = UAV[i][j]\n                    m2 = UAV[i][j-1]\n                    t = t+self.Distance[m1][m2] / \\\n                        self.vehicles_speed[i]+self.Stay_time[m1]\n                if t <= self.time_all:\n                    fitness = fitness+self.Value[m1]\n        return fitness\n    # ----------------------------variation-------------------------------------------\n\n    def variation_fun(self):\n        p1 = np.random.uniform(0, 1)  # Probability of mutation\n        if p1 < self.p1:\n            for i in range(self.pN):\n                # Proportion of individuals with variation in population\n                p2 = np.random.uniform(0, 1)\n                if p2 < self.p2:\n                    # Numbers of locations where variation occurs\n                    m = int(self.p3*(self.dim+self.uav_num-1))\n                    for j in range(m):\n                        replace_position = math.floor(\n                            np.random.uniform(0, 1)*(self.dim+self.uav_num-1))\n                        replace_value = np.random.uniform(0, 1)\n                        self.X[i][replace_position] = replace_value\n            # Update pbest & gbest\n            for i in range(self.pN):\n                temp = self.function(self.X[i])\n                self.ring_fit[i] = temp\n                if temp > self.p_fit[i]:\n                    self.p_fit[i] = temp\n                    self.pbest[i] = self.X[i]\n                    # Update gbest\n                    if self.p_fit[i] > self.fit:\n                        self.gbest = self.X[i]\n                        self.fit = self.p_fit[i]\n\n    # ---------------------Population Initialization----------------------------------\n\n    def init_Population(self):\n        # Initialization of position(X), speed(V), history optimal(pbest) and global optimal(gbest)\n        for i in range(self.pN):\n            x = np.random.uniform(0, 1, self.dim+self.uav_num-1)\n            self.X[i, :] = x\n            v = np.random.uniform(0, 0.4, self.dim+self.uav_num-1)\n            self.V[i, :] = v\n            self.pbest[i] = self.X[i]\n\n            tmp = self.function(self.X[i])\n            self.p_fit[i] = tmp\n            if tmp > self.fit:\n                self.fit = tmp\n                self.gbest = self.X[i]\n        # Calculate the convergence factor k\n        phi = self.c1+self.c2\n        k = abs(phi*phi-4*phi)\n        k = cmath.sqrt(k)\n        k = abs(2-phi-k)\n        k = 2/k\n        self.k = k\n        # Initialize ring_matrix\n        for i in range(self.pN):\n            self.ring.append([])\n            self.ring[i].append(i)\n        # Initialize test_set\n        self.TEST = np.zeros((self.test_num, self.dim+self.uav_num-1))\n        for i in range(self.test_num):\n            test = np.random.uniform(0, 1, self.dim+self.uav_num-1)\n            self.TEST[i, :] = test\n\n    # ----------------------Update Particle Position----------------------------------\n\n    def iterator(self):\n        fitness = []\n        fitness_old = 0\n        k = 0\n        for t in range(self.max_iter):\n            w = (self.wini-self.wend)*(self.max_iter-t)/self.max_iter+self.wend\n            self.w = w\n            # Variation\n            self.variation_fun()\n            l1 = len(self.ring[0])\n            # Local PSO algorithm\n            # Update ring_arrary\n            if l1 < self.pN:\n                if not(t % 2):\n                    k = k+1\n                    for i in range(self.pN):\n                        m1 = i-k\n                        if m1 < 0:\n                            m1 = self.pN+m1\n                        m2 = i+k\n                        if m2 > self.pN-1:\n                            m2 = m2-self.pN\n                        self.ring[i].append(m1)\n                        self.ring[i].append(m2)\n                # Update gbest_ring\n                l_ring = len(self.ring[0])\n                for i in range(self.pN):\n                    fitness1 = 0\n                    for j in range(l_ring):\n                        m1 = self.ring[i][j]\n                        fitness2 = self.ring_fit[m1]\n                        if fitness2 > fitness1:\n                            self.gbest_ring[i] = self.X[m1]\n                            fitness1 = fitness2\n                # Update velocity\n                for i in range(self.pN):\n                    self.V[i] = self.k*(self.w * self.V[i] + self.c1 * self.r1 * (self.pbest[i] - self.X[i])) + \\\n                        self.c2 * self.r2 * (self.gbest_ring[i] - self.X[i])\n                # Update position\n                    self.X[i] = self.X[i] + self.V[i]\n\n            # Global PSO algorithm\n            else:\n                # Update velocity\n                for i in range(self.pN):\n                    self.V[i] = self.k*(self.w * self.V[i] + self.c1 * self.r1 * (self.pbest[i] - self.X[i])) + \\\n                        self.c2 * self.r2 * (self.gbest - self.X[i])\n                # Update position\n                    self.X[i] = self.X[i] + self.V[i]\n\n            # Set position boundary\n            for i in range(self.pN):\n                for j in range(self.dim+self.uav_num-1):\n                    if self.X[i][j] >= 1:\n                        self.X[i][j] = 0.999\n                    if self.X[i][j] < 0:\n                        self.X[i][j] = 0\n            # Update pbest & gbest\n            for i in range(self.pN):\n                temp = self.function(self.X[i])\n                self.ring_fit[i] = temp\n                if temp > self.p_fit[i]:\n                    self.p_fit[i] = temp\n                    self.pbest[i] = self.X[i]\n                    # Update gbest\n                    if self.p_fit[i] > self.fit:\n                        self.gbest = self.X[i]\n                        self.fit = self.p_fit[i]\n                        self.uav_best = self.fun_Data()\n\n            # print\n            fitness.append(self.fit)\n            if self.fit == fitness_old:\n                continue\n            else:\n                fitness_old = self.fit\n        return fitness\n\n    # ---------------------Data_Processing Function---------------------------\n    def fun_Data(self):\n        X_path, Sep = self.fun_Transfer(self.gbest)\n        # Obtain the Real Flight Path Sequence of Particles\n        X = self.position(X_path)\n        # Get the search sequence of each UAV\n        UAV = []\n        l = 0\n        for i in range(self.uav_num):\n            UAV.append([])\n            k = Sep[i]\n            for j in range(k):\n                UAV[i].append(X[l])\n                l = l+1\n        # Calculate UAV_Out\n        UAV_Out = []\n        for i in range(self.uav_num):\n            k = Sep[i]\n            t = 0\n            UAV_Out.append([])\n            for j in range(k):\n                m1 = UAV[i][j]\n                if j == 0:\n                    t = t+self.Distance[0, m1] / \\\n                        self.vehicles_speed[i]+self.Stay_time[m1]\n                else:\n                    m2 = UAV[i][j-1]\n                    t = t+self.Distance[m2][m1] / \\\n                        self.vehicles_speed[i]+self.Stay_time[m1]\n                if t <= self.time_all:\n                    UAV_Out[i].append(m1)\n                    self.time_out[i] = t\n        return UAV_Out\n    # ---------------------TEST Function------------------------------\n\n    def fun_TEST(self):\n        Test_Value = []\n        for i in range(self.test_num):\n            Test_Value.append(self.function(self.TEST[i]))\n        return Test_Value\n    # ---------------------Main----------------------------------------\n\n    def run(self):\n        print(\"PSO start, pid: %s\" % os.getpid())\n        start_time = time.time()\n        self.fun_get_initial_parameter()\n        self.init_Population()\n        fitness = self.iterator()\n        end_time = time.time()\n        #self.cal_time  = end_time - start_time\n        #self.task_assignment = self.uav_best\n        print(\"PSO result:\", self.uav_best)\n        print(\"PSO time:\", end_time - start_time)\n        return self.uav_best, end_time - start_time\n        \n\n"
  },
  {
    "path": "readme.md",
    "content": "# Multi-UAV Task Assignment Benchmark\n## 多无人机任务分配算法测试基准\n\n## Introduction\nA benchmark for multi-UAV task assignment is presented in order to evaluate different algorithms. An extended Team Orienteering Problem is modeled for a kind of multi-UAV task assignment problem. Three intelligent algorithms, i.e., Genetic Algorithm, Ant Colony Optimization and Particle Swarm Optimization are implemented to solve the problem. A series of experiments with different settings are conducted to evaluate three algorithms. The modeled problem and the evaluation results constitute a benchmark, which can be used to evaluate other algorithms used for multi-UAV task assignment problems.\n\nNotice that three algorithms run at three CPU cores respectively, which means that there is no parallel optimization in this benchmark.\n\n<img src=\"./task_pic/large/ACO-1-1.png\" width=\"640\" height=\"368\" />  \n\n<img src=\"./mean_reward_large.png\" width=\"640\" height=\"368\" />  \n\n<img src=\"./mean_time_large.png\" width=\"640\" height=\"368\" />  \n\nPlease refer to the paper to see more detail.\n\nK. Xiao, J. Lu, Y. Nie, L. Ma, X. Wang and G. Wang, \"A Benchmark for Multi-UAV Task Assignment of an Extended Team Orienteering Problem,\" 2022 China Automation Congress (CAC), Xiamen, China, 2022, pp. 6966-6970, doi: 10.1109/CAC57257.2022.10054991.\n\nArXiv preprint **[ arXiv:2003.09700](https://arxiv.org/abs/2009.00363)** \n\n\n## Usage\n\n### 1. Algorithm input and output\n\nAlgorithm input includes vehicle number (scalar),  speeds of vehicles ($n\\times1$ array), target  number (scalar $n$),  targets ($(n+1)\\times4$ array, the first line is depot, the first column is x position, the second column is y position, the third column is reward and the forth column is time consumption to finish the mission), time limit (scalar).  The code below is the initialization of the class GA in `ga.py`.\n\n```python\ndef __init__(self, vehicle_num, vehicles_speed, target_num, targets, time_lim)\n```\n\nThere should be a function called `run()` in the algorithm class, and the function should return task assignment plan(array, e.g. [[28, 19, 11], [25, 22, 7, 16, 17, 23], [21, 26, 12, 9, 6, 3], [5, 15, 1], [18, 20, 29]], each subset is a vehicle path) and computational time usage (scalar). \n\n### 2. Evaluate\n\nYou can replace one algorithm  below with another algorithm in `evaluate.py`, and then `python evaluate.py`. If you don't want to evaluate three algorithm together, you should modify the code properly( this is easy).    \n\n```python\nga = GA(vehicle_num,env.vehicles_speed,target_num,env.targets,env.time_lim)\naco = ACO(vehicle_num,target_num,env.vehicles_speed,env.targets,env.time_lim)\npso = PSO(vehicle_num,target_num ,env.targets,env.vehicles_speed,env.time_lim)\nga_result=p.apply_async(ga.run)\naco_result=p.apply_async(aco.run)\npso_result=p.apply_async(pso.run)\np.close()\np.join()\nga_task_assignmet = ga_result.get()[0]\nenv.run(ga_task_assignmet,'GA',i+1,j+1)\nre_ga[i].append((env.total_reward,ga_result.get()[1]))\nenv.reset()\naco_task_assignmet = aco_result.get()[0]\nenv.run(aco_task_assignmet,'ACO',i+1,j+1)\nre_aco[i].append((env.total_reward,aco_result.get()[1]))\nenv.reset()\npso_task_assignmet = pso_result.get()[0]\nenv.run(pso_task_assignmet,'PSO',i+1,j+1)\nre_pso[i].append((env.total_reward,pso_result.get()[1]))\n```\n\n### 3. About reinforcement learning\n\nIn `Env()` in `evaluate.py`, function `step` is used for reinforcement learning. Because this is still being developed, we cannot supply a demo. If your algorithm is reinforcement learning, you can try to train it with `Env()`. Your pull request and issue are welcome.\n\n## Enhancement\n\nThis [repository](https://github.com/dietmarwo/Multi-UAV-Task-Assignment-Benchmark) does great enhancement and you can use it for high performance. Thanks to [dietmarwo](https://github.com/dietmarwo) for the nice work.\n\n1) GA uses [numba](https://numba.pydata.org/) for a dramatic speedup. Parameters are adapted so that the\n    execution time remains the same: popsize 50 -> 300, iterations 500 -> 6000\n    For this reason GA now performs much better compared to the original version.\n\n2) Experiments are configured so that wall time for small size is balanced. This means:\n    increased effort for GA, decreased effort for ACO. For medium / large \n    problem size you see which algorithms scale badly: Increase execution time superlinear\n    in relation to the problem size. Avoid these for large problems. \n\n3) Adds a standard continuous optimization algorithm: [BiteOpt](https://github.com/avaneev/biteopt) \n    from Aleksey Vaneev - using the same fitness function as GA.py. \n    BiteOpt is the only algorithm included which works well with a large problem size. \n    It is by far the simplest implementation, only the fitness function needs\n    to be coded, since we can apply a continuous optimization library \n    [fcmaes](https://github.com/dietmarwo/fast-cma-es). Execute \"pip install fcmaes\" to use it. \n\n4) Uses NestablePool to enable BiteOpt multiprocessing: Many BiteOpt optimization runs\n   are performed in parallel and the best result is returned. Set workers=1 \n   if you want to test BiteOpt single threaded. \n   \n5) All results are created using an AMD 5950x 16 core processor\n    utilizing all cores: 29 parallel BiteOpt threads, the other 3 algorithms remain single threaded. \n\n6) Added test_bite.py where you can monitor the progress of BiteOpt applied to the problem.\n\n7) Added test_mode.py where you can monitor the progress of fcmaes-MODE applied to the problem and compare it\n   to BiteOpt for the same instance. fcmaes-MODE is a multi-objective optimizer applied to a \n   multi-objective variant of the problem.\n   Objectives are: reward (to be maximized), maximal time (to be minimized), energy (to be minimized).\n   The maximal time constraint from the single objective case is still valid.\n   Energy consumption is approximated by `sum(dt*v*v)`\n\n\n \n\n"
  },
  {
    "path": "small_size_result.csv",
    "content": ",aco_reward,aco_time,ga_reward,ga_time,pso_reward,pso_time\n0,89,88.16137266159058,119,12.685494184494019,107,13.414397716522217\n1,88,91.05341100692749,106,9.964252710342407,107,16.068947315216064\n2,89,91.91174960136414,109,14.250640153884888,107,16.815466165542603\n3,89,92.22553205490112,110,21.893151998519897,107,17.22126531600952\n4,89,92.13908076286316,101,10.614777565002441,107,16.337905406951904\n5,89,92.50885510444641,121,15.014703512191772,107,17.107726097106934\n6,90,92.41064667701721,109,17.080315113067627,107,17.380303859710693\n7,89,92.84331011772156,119,20.32515835762024,107,16.93877387046814\n8,88,92.46119904518127,114,19.172377824783325,107,16.968574047088623\n9,90,94.00208711624146,127,21.729254484176636,107,17.066615104675293\n10,96,89.81863832473755,117,22.284271717071533,107,17.06852388381958\n11,99,88.96507263183594,113,19.54985523223877,107,17.087736129760742\n12,96,88.38604950904846,109,14.674241304397583,107,16.747975826263428\n13,97,89.2066752910614,110,11.188013553619385,107,16.291066884994507\n14,96,89.45146775245667,115,18.54390525817871,107,17.09095311164856\n15,97,91.43690013885498,118,34.3544921875,107,17.089507579803467\n16,101,87.81503558158875,116,16.678542375564575,107,17.032145738601685\n17,95,87.75125932693481,116,13.018948793411255,107,16.583208084106445\n18,97,87.48667025566101,103,11.756951570510864,107,16.502718687057495\n19,96,89.22999024391174,117,23.240997076034546,107,17.057995319366455\n20,96,90.63653469085693,113,20.940587043762207,102,17.042691230773926\n21,91,90.38828539848328,104,17.667251586914062,102,17.154610633850098\n22,87,90.0098135471344,98,17.66662073135376,102,16.96206521987915\n23,87,89.37335777282715,97,12.019228219985962,102,16.48302674293518\n24,85,90.64067506790161,114,23.917268753051758,102,17.08442449569702\n25,90,91.06858992576599,108,20.25761079788208,102,17.10637593269348\n26,87,89.10331749916077,103,10.527708768844604,102,16.272404670715332\n27,86,90.21255588531494,111,17.25125241279602,102,17.043729782104492\n28,88,90.96662783622742,111,24.268309354782104,102,17.005598783493042\n29,88,90.03387355804443,109,13.798240184783936,102,16.55376434326172\n30,67,85.81624722480774,75,19.326607704162598,72,17.03825044631958\n31,67,85.53707933425903,80,22.211409330368042,72,17.13922429084778\n32,67,85.78650307655334,79,19.998062133789062,72,17.11928629875183\n33,67,84.82013392448425,81,11.410378456115723,72,16.370493173599243\n34,67,86.44931316375732,79,28.120026111602783,72,17.011059761047363\n35,67,84.43776869773865,78,14.097299098968506,72,16.78164315223694\n36,67,84.38073658943176,75,10.875871896743774,72,16.290704488754272\n37,67,85.27193593978882,77,18.085707426071167,72,17.11751627922058\n38,67,86.35783362388611,77,20.6546049118042,72,17.204835653305054\n39,67,84.17668747901917,70,9.783939123153687,72,16.234663009643555\n40,90,81.11320877075195,109,25.354978561401367,94,17.01652693748474\n41,92,81.4474401473999,115,25.54797601699829,94,17.27031111717224\n42,90,82.45976829528809,105,21.565988302230835,94,17.12051248550415\n43,86,81.89453530311584,116,25.38959002494812,94,17.148070096969604\n44,90,80.1114649772644,108,12.730968475341797,94,16.551698684692383\n45,88,80.46094584465027,112,16.600263595581055,94,17.025696277618408\n46,92,80.10696768760681,108,10.806996822357178,94,16.190462350845337\n47,94,82.78089380264282,106,23.92259168624878,94,17.245839834213257\n48,87,82.29063534736633,110,17.997102975845337,94,17.09163522720337\n49,94,79.32447719573975,106,10.254770755767822,94,16.159439086914062\n50,63,81.13122916221619,85,19.498087406158447,80,17.062880754470825\n51,64,78.95552730560303,73,11.968117952346802,80,16.42030930519104\n52,64,80.55506777763367,83,19.04611301422119,80,17.044256925582886\n53,64,80.00666880607605,79,11.748615503311157,80,16.27301836013794\n54,66,80.77283143997192,88,11.66093134880066,80,16.391525506973267\n55,64,82.00660061836243,88,24.136553049087524,80,17.06270742416382\n56,66,81.55947065353394,85,16.429919242858887,80,16.999276161193848\n57,63,81.114262342453,87,22.340787172317505,80,17.043132543563843\n58,63,81.04138517379761,78,18.89263892173767,80,17.08504867553711\n59,64,80.52739334106445,93,13.616387367248535,80,16.57269597053528\n60,82,86.7540352344513,99,13.066121578216553,94,16.544466018676758\n61,81,87.89562153816223,104,25.433122873306274,94,17.053762197494507\n62,82,86.26548409461975,99,12.918514966964722,94,16.66313886642456\n63,83,87.24347186088562,106,21.98454189300537,94,17.103214263916016\n64,82,86.60797476768494,98,12.473426580429077,94,16.57561159133911\n65,84,87.11871242523193,103,10.223820686340332,94,16.257148027420044\n66,81,88.71003365516663,94,24.597357988357544,94,17.143748998641968\n67,82,86.51209807395935,96,14.048967838287354,94,16.72418999671936\n68,81,86.93004393577576,91,17.69091010093689,94,17.053284645080566\n69,81,87.69031119346619,89,13.707175254821777,94,16.6680109500885\n70,79,85.87792015075684,86,9.261616945266724,91,16.168455839157104\n71,79,88.14101362228394,96,23.5689058303833,91,17.132070302963257\n72,79,88.25272393226624,96,26.78891372680664,91,17.096915006637573\n73,79,89.53712511062622,96,28.207839250564575,91,17.064720630645752\n74,79,87.57802844047546,97,14.012932777404785,91,16.740302085876465\n75,79,87.26567029953003,86,12.413164615631104,91,16.392486333847046\n76,79,86.66206288337708,89,11.749331951141357,91,16.469265460968018\n77,79,87.8359739780426,85,17.515364170074463,91,17.102705717086792\n78,80,88.0450918674469,94,23.927947282791138,91,17.082552194595337\n79,79,87.65122652053833,93,17.091141939163208,91,17.03953456878662\n80,74,84.19559216499329,91,26.049842834472656,83,16.9984188079834\n81,75,82.51725816726685,85,16.961402416229248,83,17.110384464263916\n82,75,82.8316662311554,89,23.545618057250977,83,17.114362478256226\n83,73,82.98265886306763,84,23.422379732131958,83,17.07512879371643\n84,73,81.70284986495972,92,18.57567572593689,83,17.09586262702942\n85,76,83.29867267608643,90,24.692944288253784,83,17.056284427642822\n86,76,81.04190707206726,93,11.550926208496094,83,16.41378617286682\n87,74,82.97272729873657,94,18.214133262634277,83,17.02610754966736\n88,72,81.47475123405457,86,10.687882900238037,83,16.277140855789185\n89,73,82.54533529281616,89,17.926280975341797,83,17.130127668380737\n90,88,89.74022126197815,128,20.152071475982666,107,17.193649530410767\n91,88,88.83557057380676,115,14.007786512374878,107,16.752809762954712\n92,89,89.99075937271118,120,21.46599245071411,107,17.087929248809814\n93,89,90.57273888587952,116,22.19508957862854,107,17.231003284454346\n94,91,89.41200828552246,109,13.885975360870361,107,16.87487244606018\n95,88,89.83750343322754,115,20.845364570617676,107,17.157454013824463\n96,91,91.00219821929932,124,27.728176832199097,107,17.07404375076294\n97,93,91.0686559677124,117,25.557191371917725,107,17.23515510559082\n98,87,90.42128872871399,112,15.197402715682983,107,16.944777965545654\n99,90,89.05649876594543,100,14.382555484771729,107,16.822237491607666\n"
  }
]