[
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 Ahmed Qureshi\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "MPNet/AE/CAE.py",
    "content": "import argparse\nimport os\nimport torch\nimport torchvision\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom data_loader import load_dataset\n\n\nclass Encoder(nn.Module):\n\tdef __init__(self):\n\t\tsuper(Encoder, self).__init__()\n\t\tself.encoder = nn.Sequential(nn.Linear(2800, 512),nn.PReLU(),nn.Linear(512, 256),nn.PReLU(),nn.Linear(256, 128),nn.PReLU(),nn.Linear(128, 28))\n\t\t\t\n\tdef forward(self, x):\n\t\tx = self.encoder(x)\n\t\treturn x\n\nclass Decoder(nn.Module):\n\tdef __init__(self):\n\t\tsuper(Decoder, self).__init__()\n\t\tself.decoder = nn.Sequential(nn.Linear(28, 128),nn.PReLU(),nn.Linear(128, 256),nn.PReLU(),nn.Linear(256, 512),nn.PReLU(),nn.Linear(512, 2800))\n\tdef forward(self, x):\n\t\tx = self.decoder(x)\n\t\treturn x\n\n\n\nmse_loss = nn.MSELoss()\nlam=1e-3\ndef loss_function(W, x, recons_x, h):\n\tmse = mse_loss(recons_x, x)\n\t\"\"\"\n\tW is shape of N_hidden x N. So, we do not need to transpose it as opposed to http://wiseodd.github.io/techblog/2016/12/05/contractive-autoencoder/\n\t\"\"\"\n\tdh = h*(1-h) # N_batch x N_hidden\n\tcontractive_loss = torch.sum(Variable(W)**2, dim=1).sum().mul_(lam)\n\treturn mse + contractive_loss\n\n\ndef main(args):\t\n\t\n\tif not os.path.exists(args.model_path):\n\t\tos.makedirs(args.model_path)\n\n\n\tobs = load_dataset()\n\n\tencoder = Encoder()\n\tdecoder = Decoder()\n\tif torch.cuda.is_available():\n\t\tencoder.cuda()\n\t\tdecoder.cuda()\n\n\t\n\tparams = list(encoder.parameters())+list(decoder.parameters())\n\toptimizer = torch.optim.Adagrad(params)\n\ttotal_loss=[]\n\tfor epoch in range(args.num_epochs):\n\t\tprint \"epoch\" + str(epoch)\n\t\tavg_loss=0\n\t\tfor i in range(0, len(obs), args.batch_size):\n\t\t\tdecoder.zero_grad()\n\t\t\tencoder.zero_grad()\n\t\t\tif i+args.batch_size<len(obs):\n\t\t\t\tinp = obs[i:i+args.batch_size]\n\t\t\telse:\n\t\t\t\tinp = obs[i:]\n\t\t\tinp=torch.from_numpy(inp)\n\t\t\tinp =Variable(inp).cuda()\n\t\t\t# ===================forward=====================\n\t\t\th = encoder(inp)\n\t\t\toutput = decoder(h)\n\t\t\tkeys=encoder.state_dict().keys()\n\t\t\tW=encoder.state_dict()['encoder.6.weight'] # regularize or contracting last layer of encoder. Print keys to displace the layers name. \n\t\t\tloss = loss_function(W,inp,output,h)\n\t\t\tavg_loss=avg_loss+loss.data[0]\n\t\t\t# ===================backward====================\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\t\tprint \"--average loss:\"\n\t\tprint avg_loss/(len(obs)/args.batch_size)\n\t\ttotal_loss.append(avg_loss/(len(obs)/args.batch_size))\n\n\tavg_loss=0\n\tfor i in range(len(obs)-5000, len(obs), args.batch_size):\n\t\tinp = obs[i:i+args.batch_size]\n\t\tinp=torch.from_numpy(inp)\n\t\tinp =Variable(inp).cuda()\n\t\t# ===================forward=====================\n\t\toutput = encoder(inp)\n\t\toutput = decoder(output)\n\t\tloss = mse_loss(output,inp)\n\t\tavg_loss=avg_loss+loss.data[0]\n\t\t# ===================backward====================\n\tprint \"--Validation average loss:\"\n\tprint avg_loss/(5000/args.batch_size)\n\n\n    \n\ttorch.save(encoder.state_dict(),os.path.join(args.model_path,'cae_encoder.pkl'))\n\ttorch.save(decoder.state_dict(),os.path.join(args.model_path,'cae_decoder.pkl'))\n\ttorch.save(total_loss,'total_loss.dat')\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--model_path', type=str, default='./models/',help='path for saving trained models')\n\tparser.add_argument('--no_env', type=int, default=50,help='directory for obstacle images')\n\tparser.add_argument('--no_motion_paths', type=int,default=2000,help='number of optimal paths in each environment')\n\tparser.add_argument('--log_step', type=int , default=10,help='step size for prining log info')\n\tparser.add_argument('--save_step', type=int , default=1000,help='step size for saving trained models')\n\n\t# Model parameters\n\tparser.add_argument('--input_size', type=int , default=18, help='dimension of the input vector')\n\tparser.add_argument('--output_size', type=int , default=2, help='dimension of the input vector')\n\tparser.add_argument('--hidden_size', type=int , default=256, help='dimension of lstm hidden states')\n\tparser.add_argument('--num_layers', type=int , default=4, help='number of layers in lstm')\n\n\tparser.add_argument('--num_epochs', type=int, default=400)\n\tparser.add_argument('--batch_size', type=int, default=100)\n\tparser.add_argument('--learning_rate', type=float, default=0.001)\n\targs = parser.parse_args()\n\tprint(args)\n\tmain(args)\n"
  },
  {
    "path": "MPNet/AE/data_loader.py",
    "content": "import torch\nimport torch.utils.data as data\nimport os\nimport pickle\nimport numpy as np\nimport nltk\nfrom PIL import Image\nimport os.path\nimport random\n\n\ndef load_dataset(N=30000,NP=1800):\n\n\tobstacles=np.zeros((N,2800),dtype=np.float32)\n\tfor i in range(0,N):\n\t\ttemp=np.fromfile('../dataset2/obs_cloud/obc'+str(i)+'.dat')\n\t\ttemp=temp.reshape(len(temp)/2,2)\n\t\tobstacles[i]=temp.flatten()\n\n\t\n\treturn \tobstacles\t\n"
  },
  {
    "path": "MPNet/data_loader.py",
    "content": "import torch\nimport torch.utils.data as data\nimport os\nimport pickle\nimport numpy as np\nimport nltk\nfrom PIL import Image\nimport os.path\nimport random\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport math\n\n# Environment Encoder\n\nclass Encoder(nn.Module):\n\tdef __init__(self):\n\t\tsuper(Encoder, self).__init__()\n\t\tself.encoder = nn.Sequential(nn.Linear(2800, 512),nn.PReLU(),nn.Linear(512, 256),nn.PReLU(),nn.Linear(256, 128),nn.PReLU(),nn.Linear(128, 28))\n\t\t\t\n\tdef forward(self, x):\n\t\tx = self.encoder(x)\n\t\treturn x\n\n#N=number of environments; NP=Number of Paths\ndef load_dataset(N=100,NP=4000):\n\n\tQ = Encoder()\n\tQ.load_state_dict(torch.load('../models/cae_encoder.pkl'))\n\tif torch.cuda.is_available():\n\t\tQ.cuda()\n\n\t\t\n\tobs_rep=np.zeros((N,28),dtype=np.float32)\n\tfor i in range(0,N):\n\t\t#load obstacle point cloud\n\t\ttemp=np.fromfile('../../dataset/obs_cloud/obc'+str(i)+'.dat')\n\t\ttemp=temp.reshape(len(temp)/2,2)\n\t\tobstacles=np.zeros((1,2800),dtype=np.float32)\n\t\tobstacles[0]=temp.flatten()\n\t\tinp=torch.from_numpy(obstacles)\n\t\tinp=Variable(inp).cuda()\n\t\toutput=Q(inp)\n\t\toutput=output.data.cpu()\n\t\tobs_rep[i]=output.numpy()\n\n\n\n\t\n\t## calculating length of the longest trajectory\n\tmax_length=0\n\tpath_lengths=np.zeros((N,NP),dtype=np.int8)\n\tfor i in range(0,N):\n\t\tfor j in range(0,NP):\n\t\t\tfname='../../dataset/e'+str(i)+'/path'+str(j)+'.dat'\n\t\t\tif os.path.isfile(fname):\n\t\t\t\tpath=np.fromfile(fname)\n\t\t\t\tpath=path.reshape(len(path)/2,2)\n\t\t\t\tpath_lengths[i][j]=len(path)\t\n\t\t\t\tif len(path)> max_length:\n\t\t\t\t\tmax_length=len(path)\n\t\t\t\n\n\tpaths=np.zeros((N,NP,max_length,2), dtype=np.float32)   ## padded paths\n\n\tfor i in range(0,N):\n\t\tfor j in range(0,NP):\n\t\t\tfname='../../dataset/e'+str(i)+'/path'+str(j)+'.dat'\n\t\t\tif os.path.isfile(fname):\n\t\t\t\tpath=np.fromfile(fname)\n\t\t\t\tpath=path.reshape(len(path)/2,2)\n\t\t\t\tfor k in range(0,len(path)):\n\t\t\t\t\tpaths[i][j][k]=path[k]\n\t\n\t\t\t\t\t\n\n\tdataset=[]\n\ttargets=[]\n\tfor i in range(0,N):\n\t\tfor j in range(0,NP):\n\t\t\tif path_lengths[i][j]>0:\t\t\t\t\n\t\t\t\tfor m in range(0, path_lengths[i][j]-1):\n\t\t\t\t\tdata=np.zeros(32,dtype=np.float32)\n\t\t\t\t\tfor k in range(0,28):\n\t\t\t\t\t\tdata[k]=obs_rep[i][k]\n\t\t\t\t\tdata[28]=paths[i][j][m][0]\n\t\t\t\t\tdata[29]=paths[i][j][m][1]\n\t\t\t\t\tdata[30]=paths[i][j][path_lengths[i][j]-1][0]\n\t\t\t\t\tdata[31]=paths[i][j][path_lengths[i][j]-1][1]\n\t\t\t\t\t\t\n\t\t\t\t\ttargets.append(paths[i][j][m+1])\n\t\t\t\t\tdataset.append(data)\n\t\t\t\n\tdata=zip(dataset,targets)\n\trandom.shuffle(data)\t\n\tdataset,targets=zip(*data)\n\treturn \tnp.asarray(dataset),np.asarray(targets) \n\n#N=number of environments; NP=Number of Paths; s=starting environment no.; sp=starting_path_no\n#Unseen_environments==> N=10, NP=2000,s=100, sp=0\n#seen_environments==> N=100, NP=200,s=0, sp=4000\ndef load_test_dataset(N=100,NP=200, s=0,sp=4000):\n\n\tobc=np.zeros((N,7,2),dtype=np.float32)\n\ttemp=np.fromfile('../../dataset/obs.dat')\n\tobs=temp.reshape(len(temp)/2,2)\n\n\ttemp=np.fromfile('../../dataset/obs_perm2.dat',np.int32)\n\tperm=temp.reshape(77520,7)\n\n\t## loading obstacles\n\tfor i in range(0,N):\n\t\tfor j in range(0,7):\n\t\t\tfor k in range(0,2):\n\t\t\t\tobc[i][j][k]=obs[perm[i+s][j]][k]\n\t\n\t\t\t\t\t\n\tQ = Encoder()\n\tQ.load_state_dict(torch.load('../models/cae_encoder.pkl'))\n\tif torch.cuda.is_available():\n\t\tQ.cuda()\n\t\n\tobs_rep=np.zeros((N,28),dtype=np.float32)\t\n\tk=0\n\tfor i in range(s,s+N):\n\t\ttemp=np.fromfile('../../dataset/obs_cloud/obc'+str(i)+'.dat')\n\t\ttemp=temp.reshape(len(temp)/2,2)\n\t\tobstacles=np.zeros((1,2800),dtype=np.float32)\n\t\tobstacles[0]=temp.flatten()\n\t\tinp=torch.from_numpy(obstacles)\n\t\tinp=Variable(inp).cuda()\n\t\toutput=Q(inp)\n\t\toutput=output.data.cpu()\n\t\tobs_rep[k]=output.numpy()\n\t\tk=k+1\n\t## calculating length of the longest trajectory\n\tmax_length=0\n\tpath_lengths=np.zeros((N,NP),dtype=np.int8)\n\tfor i in range(0,N):\n\t\tfor j in range(0,NP):\n\t\t\tfname='../../dataset/e'+str(i+s)+'/path'+str(j+sp)+'.dat'\n\t\t\tif os.path.isfile(fname):\n\t\t\t\tpath=np.fromfile(fname)\n\t\t\t\tpath=path.reshape(len(path)/2,2)\n\t\t\t\tpath_lengths[i][j]=len(path)\t\n\t\t\t\tif len(path)> max_length:\n\t\t\t\t\tmax_length=len(path)\n\t\t\t\n\n\tpaths=np.zeros((N,NP,max_length,2), dtype=np.float32)   ## padded paths\n\n\tfor i in range(0,N):\n\t\tfor j in range(0,NP):\n\t\t\tfname='../../dataset/e'+str(i+s)+'/path'+str(j+sp)+'.dat'\n\t\t\tif os.path.isfile(fname):\n\t\t\t\tpath=np.fromfile(fname)\n\t\t\t\tpath=path.reshape(len(path)/2,2)\n\t\t\t\tfor k in range(0,len(path)):\n\t\t\t\t\tpaths[i][j][k]=path[k]\n\t\n\t\t\t\t\t\n\n\n\n\treturn \tobc,obs_rep,paths,path_lengths\n\t\n\n\n"
  },
  {
    "path": "MPNet/model.py",
    "content": "import torch\nimport torch.nn as nn\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torch.autograd import Variable\n\n\n# DMLP Model-Path Generator \nclass MLP(nn.Module):\n\tdef __init__(self, input_size, output_size):\n\t\tsuper(MLP, self).__init__()\n\t\tself.fc = nn.Sequential(\n\t\tnn.Linear(input_size, 1280),nn.PReLU(),nn.Dropout(),\n\t\tnn.Linear(1280, 1024),nn.PReLU(),nn.Dropout(),\n\t\tnn.Linear(1024, 896),nn.PReLU(),nn.Dropout(),\n\t\tnn.Linear(896, 768),nn.PReLU(),nn.Dropout(),\n\t\tnn.Linear(768, 512),nn.PReLU(),nn.Dropout(),\n\t\tnn.Linear(512, 384),nn.PReLU(),nn.Dropout(),\n\t\tnn.Linear(384, 256),nn.PReLU(), nn.Dropout(),\n\t\tnn.Linear(256, 256),nn.PReLU(), nn.Dropout(),\n\t\tnn.Linear(256, 128),nn.PReLU(), nn.Dropout(),\n\t\tnn.Linear(128, 64),nn.PReLU(), nn.Dropout(),\n\t\tnn.Linear(64, 32),nn.PReLU(),\n\t\tnn.Linear(32, output_size))\n\t\t\n        \n\tdef forward(self, x):\n\t\tout = self.fc(x)\n\t\treturn out\n\n \n"
  },
  {
    "path": "MPNet/neuralplanner.py",
    "content": "import argparse\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport os\nimport pickle\nfrom data_loader import load_test_dataset \nfrom model import MLP \nfrom torch.autograd import Variable \nimport math\nimport time\n\nsize=5.0\n\n# Load trained model for path generation\nmlp = MLP(32, 2) # simple @D\nmlp.load_state_dict(torch.load('models/mlp_100_4000_PReLU_ae_dd150.pkl'))\n\nif torch.cuda.is_available():\n\tmlp.cuda()\n\n#load test dataset\nobc,obstacles, paths, path_lengths= load_test_dataset() \n\ndef IsInCollision(x,idx):\n\ts=np.zeros(2,dtype=np.float32)\n\ts[0]=x[0]\n\ts[1]=x[1]\n\tfor i in range(0,7):\n\t\tcf=True\n\t\tfor j in range(0,2):\n\t\t\tif abs(obc[idx][i][j] - s[j]) > size/2.0:\n\t\t\t\tcf=False\n\t\t\t\tbreak\n\t\tif cf==True:\t\t\t\t\t\t\n\t\t\treturn True\n\treturn False\n\n\ndef steerTo (start, end, idx):\n\n\tDISCRETIZATION_STEP=0.01\n\tdists=np.zeros(2,dtype=np.float32)\n\tfor i in range(0,2): \n\t\tdists[i] = end[i] - start[i]\n\n\tdistTotal = 0.0\n\tfor i in range(0,2): \n\t\tdistTotal =distTotal+ dists[i]*dists[i]\n\n\tdistTotal = math.sqrt(distTotal)\n\tif distTotal>0:\n\t\tincrementTotal = distTotal/DISCRETIZATION_STEP\n\t\tfor i in range(0,2): \n\t\t\tdists[i] =dists[i]/incrementTotal\n\n\n\n\t\tnumSegments = int(math.floor(incrementTotal))\n\n\t\tstateCurr = np.zeros(2,dtype=np.float32)\n\t\tfor i in range(0,2): \n\t\t\tstateCurr[i] = start[i]\n\t\tfor i in range(0,numSegments):\n\n\t\t\tif IsInCollision(stateCurr,idx):\n\t\t\t\treturn 0\n\n\t\t\tfor j in range(0,2):\n\t\t\t\tstateCurr[j] = stateCurr[j]+dists[j]\n\n\n\t\tif IsInCollision(end,idx):\n\t\t\treturn 0\n\n\n\treturn 1\n\n# checks the feasibility of entire path including the path edges\ndef feasibility_check(path,idx):\n\n\tfor i in range(0,len(path)-1):\n\t\tind=steerTo(path[i],path[i+1],idx)\n\t\tif ind==0:\n\t\t\treturn 0\n\treturn 1\n\n\n# checks the feasibility of path nodes only\ndef collision_check(path,idx):\n\n\tfor i in range(0,len(path)):\n\t\tif IsInCollision(path[i],idx):\n\t\t\treturn 0\n\treturn 1\n\ndef to_var(x, volatile=False):\n\tif torch.cuda.is_available():\n\t\tx = x.cuda()\n\treturn Variable(x, volatile=volatile)\n\ndef get_input(i,dataset,targets,seq,bs):\n\tbi=np.zeros((bs,18),dtype=np.float32)\n\tbt=np.zeros((bs,2),dtype=np.float32)\n\tk=0\t\n\tfor b in range(i,i+bs):\n\t\tbi[k]=dataset[seq[i]].flatten()\n\t\tbt[k]=targets[seq[i]].flatten()\n\t\tk=k+1\n\treturn torch.from_numpy(bi),torch.from_numpy(bt)\n\n\n\ndef is_reaching_target(start1,start2):\n\ts1=np.zeros(2,dtype=np.float32)\n\ts1[0]=start1[0]\n\ts1[1]=start1[1]\n\n\ts2=np.zeros(2,dtype=np.float32)\n\ts2[0]=start2[0]\n\ts2[1]=start2[1]\n\n\n\tfor i in range(0,2):\n\t\tif abs(s1[i]-s2[i]) > 1.0: \n\t\t\treturn False\n\treturn True\n\n#lazy vertex contraction \ndef lvc(path,idx):\n\n\tfor i in range(0,len(path)-1):\n\t\tfor j in range(len(path)-1,i+1,-1):\n\t\t\tind=0\n\t\t\tind=steerTo(path[i],path[j],idx)\n\t\t\tif ind==1:\n\t\t\t\tpc=[]\n\t\t\t\tfor k in range(0,i+1):\n\t\t\t\t\tpc.append(path[k])\n\t\t\t\tfor k in range(j,len(path)):\n\t\t\t\t\tpc.append(path[k])\n\n\t\t\t\treturn lvc(pc,idx)\n\t\t\t\t\n\treturn path\n\ndef re_iterate_path2(p,g,idx,obs):\n\tstep=0\n\tpath=[]\n\tpath.append(p[0])\n\tfor i in range(1,len(p)-1):\n\t\tif not IsInCollision(p[i],idx):\n\t\t\tpath.append(p[i])\n\tpath.append(g)\t\t\t\n\tnew_path=[]\n\tfor i in range(0,len(path)-1):\n\t\ttarget_reached=False\n\n\t \n\t\tst=path[i]\n\t\tgl=path[i+1]\n\t\tsteer=steerTo(st, gl, idx)\n\t\tif steer==1:\n\t\t\tnew_path.append(st)\n\t\t\tnew_path.append(gl)\n\t\telse:\n\t\t\titr=0\n\t\t\ttarget_reached=False\n\t\t\twhile (not target_reached) and itr<50 :\n\t\t\t\tnew_path.append(st)\n\t\t\t\titr=itr+1\n\t\t\t\tip=torch.cat((obs,st,gl))\n\t\t\t\tip=to_var(ip)\n\t\t\t\tst=mlp(ip)\n\t\t\t\tst=st.data.cpu()\t\t\n\t\t\t\ttarget_reached=is_reaching_target(st,gl)\n\t\t\tif target_reached==False:\n\t\t\t\treturn 0\n\n\t#new_path.append(g)\n\treturn new_path\n\ndef replan_path(p,g,idx,obs):\n\tstep=0\n\tpath=[]\n\tpath.append(p[0])\n\tfor i in range(1,len(p)-1):\n\t\tif not IsInCollision(p[i],idx):\n\t\t\tpath.append(p[i])\n\tpath.append(g)\t\t\t\n\tnew_path=[]\n\tfor i in range(0,len(path)-1):\n\t\ttarget_reached=False\n\n\t \n\t\tst=path[i]\n\t\tgl=path[i+1]\n\t\tsteer=steerTo(st, gl, idx)\n\t\tif steer==1:\n\t\t\tnew_path.append(st)\n\t\t\tnew_path.append(gl)\n\t\telse:\n\t\t\titr=0\n\t\t\tpA=[]\n\t\t\tpA.append(st)\n\t\t\tpB=[]\n\t\t\tpB.append(gl)\n\t\t\ttarget_reached=0\n\t\t\ttree=0\n\t\t\twhile target_reached==0 and itr<50 :\n\t\t\t\titr=itr+1\n\t\t\t\tif tree==0:\n\t\t\t\t\tip1=torch.cat((obs,st,gl))\n\t\t\t\t\tip1=to_var(ip1)\n\t\t\t\t\tst=mlp(ip1)\n\t\t\t\t\tst=st.data.cpu()\n\t\t\t\t\tpA.append(st)\n\t\t\t\t\ttree=1\n\t\t\t\telse:\n\t\t\t\t\tip2=torch.cat((obs,gl,st))\n\t\t\t\t\tip2=to_var(ip2)\n\t\t\t\t\tgl=mlp(ip2)\n\t\t\t\t\tgl=gl.data.cpu()\n\t\t\t\t\tpB.append(gl)\n\t\t\t\t\ttree=0\t\t\n\t\t\t\ttarget_reached=steerTo(st, gl, idx)\n\t\t\tif target_reached==0:\n\t\t\t\treturn 0\n\t\t\telse:\n\t\t\t\tfor p1 in range(0,len(pA)):\n\t\t\t\t\tnew_path.append(pA[p1])\n\t\t\t\tfor p2 in range(len(pB)-1,-1,-1):\n\t\t\t\t\tnew_path.append(pB[p2])\n\n\treturn new_path\t\n    \ndef main(args):\n\t# Create model directory\n\tif not os.path.exists(args.model_path):\n\t\tos.makedirs(args.model_path)\n\t\n\n\t\n\ttp=0\n\tfp=0\n\ttot=[]\n\tfor i in range(0,1):\n\t\tet=[]\n\t\tfor j in range(0,2):\n\t\t\tprint (\"step: i=\"+str(i)+\" j=\"+str(j))\n\t\t\tp1_ind=0\n\t\t\tp2_ind=0\n\t\t\tp_ind=0\t\n\t\t\tif path_lengths[i][j]>0:\t\t\t\t\t\t\t\t\n\t\t\t\tstart=np.zeros(2,dtype=np.float32)\n\t\t\t\tgoal=np.zeros(2,dtype=np.float32)\n\t\t\t\tfor l in range(0,2):\n\t\t\t\t\tstart[l]=paths[i][j][0][l]\n\t\t\t\t\n\t\t\t\tfor l in range(0,2):\n\t\t\t\t\tgoal[l]=paths[i][j][path_lengths[i][j]-1][l]\n\t\t\t\t#start and goal for bidirectional generation\n\t\t\t\t## starting point\n\t\t\t\tstart1=torch.from_numpy(start)\n\t\t\t\tgoal2=torch.from_numpy(start)\n\t\t\t\t##goal point\n\t\t\t\tgoal1=torch.from_numpy(goal)\n\t\t\t\tstart2=torch.from_numpy(goal)\n\t\t\t\t##obstacles\n\t\t\t\tobs=obstacles[i]\n\t\t\t\tobs=torch.from_numpy(obs)\n\t\t\t\t##generated paths\n\t\t\t\tpath1=[] \n\t\t\t\tpath1.append(start1)\n\t\t\t\tpath2=[]\n\t\t\t\tpath2.append(start2)\n\t\t\t\tpath=[]\n\t\t\t\ttarget_reached=0\n\t\t\t\tstep=0\t\n\t\t\t\tpath=[] # stores end2end path by concatenating path1 and path2\n\t\t\t\ttree=0\t\n\t\t\t\ttic = time.clock()\t\n\t\t\t\twhile target_reached==0 and step<80 :\n\t\t\t\t\tstep=step+1\n\t\t\t\t\tif tree==0:\n\t\t\t\t\t\tinp1=torch.cat((obs,start1,start2))\n\t\t\t\t\t\tinp1=to_var(inp1)\n\t\t\t\t\t\tstart1=mlp(inp1)\n\t\t\t\t\t\tstart1=start1.data.cpu()\n\t\t\t\t\t\tpath1.append(start1)\n\t\t\t\t\t\ttree=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tinp2=torch.cat((obs,start2,start1))\n\t\t\t\t\t\tinp2=to_var(inp2)\n\t\t\t\t\t\tstart2=mlp(inp2)\n\t\t\t\t\t\tstart2=start2.data.cpu()\n\t\t\t\t\t\tpath2.append(start2)\n\t\t\t\t\t\ttree=0\n\t\t\t\t\ttarget_reached=steerTo(start1,start2,i);\n\t\t\t\ttp=tp+1\n\n\t\t\t\tif target_reached==1:\n\t\t\t\t\tfor p1 in range(0,len(path1)):\n\t\t\t\t\t\tpath.append(path1[p1])\n\t\t\t\t\tfor p2 in range(len(path2)-1,-1,-1):\n\t\t\t\t\t\tpath.append(path2[p2])\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tpath=lvc(path,i)\n\t\t\t\t\tindicator=feasibility_check(path,i)\n\t\t\t\t\tif indicator==1:\n\t\t\t\t\t\ttoc = time.clock()\n\t\t\t\t\t\tt=toc-tic\n\t\t\t\t\t\tet.append(t)\n\t\t\t\t\t\tfp=fp+1\n\t\t\t\t\t\tprint (\"path[0]:\")\n\t\t\t\t\t\tfor p in range(0,len(path)):\n\t\t\t\t\t\t\tprint (path[p][0])\n\t\t\t\t\t\tprint (\"path[1]:\")\n\t\t\t\t\t\tfor p in range(0,len(path)):\n\t\t\t\t\t\t\tprint (path[p][1])\n\t\t\t\t\t\tprint (\"Actual path[0]:\")\n\t\t\t\t\t\tfor p in range(0,path_lengths[i][j]):\n\t\t\t\t\t\t\tprint (paths[i][j][p][0])\n\t\t\t\t\t\tprint (\"Actual path[1]:\")\n\t\t\t\t\t\tfor p in range(0,path_lengths[i][j]):\n\t\t\t\t\t\t\tprint (paths[i][j][p][1])\n\t\t\t\t\telse:\n\t\t\t\t\t\tsp=0\n\t\t\t\t\t\tindicator=0\n\t\t\t\t\t\twhile indicator==0 and sp<10 and path !=0:\n\t\t\t\t\t\t\tsp=sp+1\n\t\t\t\t\t\t\tg=np.zeros(2,dtype=np.float32)\n\t\t\t\t\t\t\tg=torch.from_numpy(paths[i][j][path_lengths[i][j]-1])\n\t\t\t\t\t\t\tpath=replan_path(path,g,i,obs) #replanning at coarse level\n\t\t\t\t\t\t\tif path !=0:\n\t\t\t\t\t\t\t\tpath=lvc(path,i)\n\t\t\t\t\t\t\t\tindicator=feasibility_check(path,i)\n\t\t\t\t\t\n\t\t\t\t\t\t\tif indicator==1:\n\t\t\t\t\t\t\t\ttoc = time.clock()\n\t\t\t\t\t\t\t\tt=toc-tic\n\t\t\t\t\t\t\t\tet.append(t)\n\t\t\t\t\t\t\t\tfp=fp+1\n\t\t\t\t\t\t\t\tif len(path)<20:\n\t\t\t\t\t\t\t\t\tprint (\"new_path[0]:\")\n\t\t\t\t\t\t\t\t\tfor p in range(0,len(path)):\n\t\t\t\t\t\t\t\t\t\tprint (path[p][0])\n\t\t\t\t\t\t\t\t\tprint (\"new_path[1]:\")\n\t\t\t\t\t\t\t\t\tfor p in range(0,len(path)):\n\t\t\t\t\t\t\t\t\t\tprint (path[p][1])\n\t\t\t\t\t\t\t\t\tprint (\"Actual path[0]:\")\n\t\t\t\t\t\t\t\t\tfor p in range(0,path_lengths[i][j]):\n\t\t\t\t\t\t\t\t\t\tprint (paths[i][j][p][0])\n\t\t\t\t\t\t\t\t\tprint (\"Actual path[1]:\")\n\t\t\t\t\t\t\t\t\tfor p in range(0,path_lengths[i][j]):\n\t\t\t\t\t\t\t\t\t\tprint (paths[i][j][p][1])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint \"path found, dont worry\"\t\n\n\t\t\t\t\n\t\ttot.append(et)\t\t\t\t\t\n\tpickle.dump(tot, open(\"time_s2D_unseen_mlp.p\", \"wb\" ))\t\n\n\n\tprint (\"total paths\")\n\tprint (tp)\n\tprint (\"feasible paths\")\n\tprint (fp)\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--model_path', type=str, default='./models/',help='path for saving trained models')\n\tparser.add_argument('--no_env', type=int, default=50,help='directory for obstacle images')\n\tparser.add_argument('--no_motion_paths', type=int,default=2000,help='number of optimal paths in each environment')\n\tparser.add_argument('--log_step', type=int , default=10,help='step size for prining log info')\n\tparser.add_argument('--save_step', type=int , default=1000,help='step size for saving trained models')\n\n\t# Model parameters\n\tparser.add_argument('--input_size', type=int , default=68, help='dimension of the input vector')\n\tparser.add_argument('--output_size', type=int , default=2, help='dimension of the input vector')\n\tparser.add_argument('--hidden_size', type=int , default=256, help='dimension of lstm hidden states')\n\tparser.add_argument('--num_layers', type=int , default=4, help='number of layers in lstm')\n\n\tparser.add_argument('--num_epochs', type=int, default=100)\n\tparser.add_argument('--batch_size', type=int, default=28)\n\tparser.add_argument('--learning_rate', type=float, default=0.001)\n\targs = parser.parse_args()\n\tprint(args)\n\tmain(args)\n\n\n"
  },
  {
    "path": "MPNet/train.py",
    "content": "import argparse\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport os\nimport pickle\nfrom data_loader import load_dataset \nfrom model import MLP \nfrom torch.autograd import Variable \nimport math\n\ndef to_var(x, volatile=False):\n\tif torch.cuda.is_available():\n\t\tx = x.cuda()\n\treturn Variable(x, volatile=volatile)\n\ndef get_input(i,data,targets,bs):\n\n\tif i+bs<len(data):\n\t\tbi=data[i:i+bs]\n\t\tbt=targets[i:i+bs]\t\n\telse:\n\t\tbi=data[i:]\n\t\tbt=targets[i:]\n\t\t\n\treturn torch.from_numpy(bi),torch.from_numpy(bt)\n\n\n    \ndef main(args):\n\t# Create model directory\n\tif not os.path.exists(args.model_path):\n\t\tos.makedirs(args.model_path)\n    \n    \n\t# Build data loader\n\tdataset,targets= load_dataset() \n\t\n\t# Build the models\n\tmlp = MLP(args.input_size, args.output_size)\n    \n\tif torch.cuda.is_available():\n\t\tmlp.cuda()\n\n\t# Loss and Optimizer\n\tcriterion = nn.MSELoss()\n\toptimizer = torch.optim.Adagrad(mlp.parameters()) \n    \n\t# Train the Models\n\ttotal_loss=[]\n\tprint len(dataset)\n\tprint len(targets)\n\tsm=100 # start saving models after 100 epochs\n\tfor epoch in range(args.num_epochs):\n\t\tprint \"epoch\" + str(epoch)\n\t\tavg_loss=0\n\t\tfor i in range (0,len(dataset),args.batch_size):\n\t\t\t# Forward, Backward and Optimize\n\t\t\tmlp.zero_grad()\t\t\t\n\t\t\tbi,bt= get_input(i,dataset,targets,args.batch_size)\n\t\t\tbi=to_var(bi)\n\t\t\tbt=to_var(bt)\n\t\t\tbo = mlp(bi)\n\t\t\tloss = criterion(bo,bt)\n\t\t\tavg_loss=avg_loss+loss.data[0]\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\t\tprint \"--average loss:\"\n\t\tprint avg_loss/(len(dataset)/args.batch_size)\n\t\ttotal_loss.append(avg_loss/(len(dataset)/args.batch_size))\n\t\t# Save the models\n\t\tif epoch==sm:\n\t\t\tmodel_path='mlp_100_4000_PReLU_ae_dd'+str(sm)+'.pkl'\n\t\t\ttorch.save(mlp.state_dict(),os.path.join(args.model_path,model_path))\n\t\t\tsm=sm+50 # save model after every 50 epochs from 100 epoch ownwards\n\ttorch.save(total_loss,'total_loss.dat')\n\tmodel_path='mlp_100_4000_PReLU_ae_dd_final.pkl'\n\ttorch.save(mlp.state_dict(),os.path.join(args.model_path,model_path))\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--model_path', type=str, default='./models/',help='path for saving trained models')\n\tparser.add_argument('--no_env', type=int, default=50,help='directory for obstacle images')\n\tparser.add_argument('--no_motion_paths', type=int,default=2000,help='number of optimal paths in each environment')\n\tparser.add_argument('--log_step', type=int , default=10,help='step size for prining log info')\n\tparser.add_argument('--save_step', type=int , default=1000,help='step size for saving trained models')\n\n\t# Model parameters\n\tparser.add_argument('--input_size', type=int , default=32, help='dimension of the input vector')\n\tparser.add_argument('--output_size', type=int , default=2, help='dimension of the input vector')\n\tparser.add_argument('--hidden_size', type=int , default=256, help='dimension of lstm hidden states')\n\tparser.add_argument('--num_layers', type=int , default=4, help='number of layers in lstm')\n\n\tparser.add_argument('--num_epochs', type=int, default=500)\n\tparser.add_argument('--batch_size', type=int, default=100)\n\tparser.add_argument('--learning_rate', type=float, default=0.0001)\n\targs = parser.parse_args()\n\tprint(args)\n\tmain(args)\n\n\n\n"
  },
  {
    "path": "README.md",
    "content": "# Motion Planning Networks\nImplementation of [MPNet: Motion Planning Networks](https://sites.google.com/view/mpnet). [[arXiv1]](https://arxiv.org/abs/1806.05767) [[arXiv2]](https://arxiv.org/abs/1907.06013) \n\n\nThe code can easily be adapted for [Informed Neural Sampling](https://arxiv.org/abs/1809.10252).\n\n## Contains\n* Data Generation\n\t* Any existing classical motion planner can be used to generate datasets. However, we provide following implementations in C++:\n\t\t* [P-RRT*](https://link.springer.com/article/10.1007/s10514-015-9518-0)\n\t\t* [RRT*](https://arxiv.org/abs/1105.1186)\n\t\t* Example dataset: [simple2D](https://drive.google.com/open?id=1oADJ85qxb3WKHXE4Bj6lwio-soGOktRa)\n\t\t* Example dataset: [Complex3D](https://drive.google.com/file/d/1wNPfdVGkkZ-7haTUhdzT0sGnZAkAJEol/view?usp=sharing)\n* MPNet algorithm\n* A navie python visualization files\n\n\n## Data Description\n* Simple 2D has 7 blocks each of size 5x5 that are placed randomly.\n* Complex 3D contains 10 blocks with sizes as follow:\n\t* shape=[[5.0,5.0,10.0],[5.0,10.0,5.0],[5.0,10.0,10.0],\n              [10.0,5.0,5.0],[10.0,5.0,10.0],[10.0,10.0,5.0],\n              [10.0,10.0,10.0],[5.0,5.0,5.0],[10.0,10.0,10.0],[5.0,5.0,5.0]]\n* e0-109 has the training and testing paths in 110 different environments.\n\t* 0-100 environments and 0-4000 paths/environment are for training.\n\t* Seen test dataset: 0-100 envs and 4000-4200=200 paths/env.\n\t* Unseen test dataset: 100-110 envs and 0-2000 paths/env.\n* obs_cloud is the point-cloud of randomly generated 30,000 environments.\n\t* 0-110 corresponds to the same environments for which path data is provided.\n\t* You may use full dataset to train encoder network via unsupervised learning.\n* obs.dat contains the center location (x,y) of each obstacle in the environments.\n* obs_perm2.dat contains the order in which the blocks should be placed in preset locations given by obs.dat file to setup environments.\n\t* For instance, in complex 3D, the permutation 8342567901 indicates obstacle #8 of size 10x10x10 should be placed at the location #0 given by obs.dat.\n\n## Generating your own data\n* Define a region of operation, for instance in simple2D, it is 20x20\n* Decide how many obstacles (r) you would like to place in the region. In the case of simple2D, we have r=7 5x5 blocks.\n* Generate random N locations to place r obstacles in the region. In the case of simple2D, we generated N=20.\n* For N locations and r obstacles, apply combinatorics, to generate NCr different environments i.e., in simple 2D NCr= 20C7= 77520\n \t* The obs_perm2 file contains the combinations, for instance 6432150 indicates to place obstacle#6 at location #0.\n* Once obstacles are placed, randomly generate collision-free samples and use them in pairs as stat-goal to generate paths using any classical planne for the training. For classical planners, we recommend using [OMPL](https://ompl.kavrakilab.org/) implementations.\n\n\n## Requirements\n* Data Generation\n\n\t1. Install [libbot2]( https://github.com/libbot2/libbot2)\n\t\t* Make sure all dependencies of libbot2 (e.g., lcm) are installed.\n\t\t* Install libbot2 with the local installation procedure.\n\t\t* Run \"make\" in the data_generation folder where the README file is located.\n\n\t2. Use any compiler such as Netbeans to load the precomplie code.\n\t\t* data_generation/src/rrts_main.cpp contains the main rrt/prrt code. \t\n\t\t* data_generation/viewer/src/viewer_main.cpp contains the visualization code.\n\t\t\t* Also checkout comments in data_generation/viewer/src/renderers/graph_renderer.cpp\n\n\t\t* Note: main_viewer and rrts_main should run in parallel as:\n\t\t\t* rrts_main sends the path solution as well as the tree to the main_viewer to publish through local network.\n\t\t\t* data is transmitted through LCM network protocol.\n\n* MPNet\n\t* [PyTorch](http://pytorch.org/) \n\n\n## Examples\n\n1. Assuming paths to obstacles point-cloud are declared, train obstacle-encoder:\n```python MPNET/AE/CAE.py```\n\n2. Assuming paths to demonstration dataset and obstacle-encoder are declared, run mpnet_trainer:\n\t\n    ```python MPNET/train.py```\n    \n3. Run tests by first loading the trained models:\n\t\n    ```python MPNET/neuralplanner.py``` \n\n## References\n\n```\n@inproceedings{qureshi2019motion,\n  title={Motion planning networks},\n  author={Qureshi, Ahmed H and Simeonov, Anthony and Bency, Mayur J and Yip, Michael C},\n  booktitle={2019 International Conference on Robotics and Automation (ICRA)},\n  pages={2118--2124},\n  year={2019},\n  organization={IEEE}\n}\n@inproceedings{qureshi2018deeply,\n  title={Deeply Informed Neural Sampling for Robot Motion Planning},\n  author={Qureshi, Ahmed H and Yip, Michael C},\n  booktitle={2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},\n  pages={6582--6588},\n  year={2018},\n  organization={IEEE}\n}\n@article{qureshi2019motion,\n  title={Motion Planning Networks: Bridging the Gap Between Learning-based and Classical Motion Planners},\n  author={Qureshi, Ahmed H and Miao, Yinglong and Simeonov, Anthony and Yip, Michael C},\n  journal={arXiv preprint arXiv:1907.06013},\n  year={2019}\n}\n```\n\n\n"
  },
  {
    "path": "data_generation/Makefile",
    "content": "default_target: all\n\n# get a list of subdirs to build by reading tobuild.txt\nSUBDIRS:=$(shell grep -v \"^\\#\" tobuild.txt)\n\n#force to build on the top level build dir\nBUILD_PREFIX=`pwd`/build\n\n\n# build quietly by default.  For a verbose build, run \"make VERBOSE=1\"\n$(VERBOSE).SILENT:\n\nall: \n\t@[ -d $(BUILD_PREFIX) ] || mkdir -p $(BUILD_PREFIX) || exit 1\n\t@for subdir in $(SUBDIRS); do \\\n\t\techo \"\\n-------------------------------------------\"; \\\n\t\techo \"-- $$subdir\"; \\\n\t\techo \"-------------------------------------------\"; \\\n\t\t$(MAKE) -C $$subdir all || exit 2; \\\n\tdone\n\t@# Place additional commands here if you have any\n\nclean:\n\t@for subdir in $(SUBDIRS); do \\\n\t\techo \"\\n-------------------------------------------\"; \\\n\t\techo \"-- $$subdir\"; \\\n\t\techo \"-------------------------------------------\"; \\\n\t\t$(MAKE) -C $$subdir clean; \\\n\tdone\n\trm -rf build/bin\n\trm -rf build/include\n\trm -rf build/lib\n\trm -rf build/share\n\t@# Place additional commands here if you have any\n"
  },
  {
    "path": "data_generation/README",
    "content": "To install with libbot2:\n\n1. Check out libbot2 from \nhttps://github.com/libbot2/libbot2\n\n2. Make sure all dependencies of libbot2 (e.g., lcm) are installed.\n\n3. Install libbot2 with the local installation procedure.\n\n4. Run \"make\" in the folder where this README file is located.\n\n5. Once complied, one can use Netbeans C/C++ IDE to load the pre-compiled project.\n\n6- Run viewer for visualization as follow or build&run using netbeans:\n```g++ viewer/src/main_viewer.cpp```\n7- Run rrtstar/prrtstar as follow or build&run using netbeans:\n```g++ rrtstar/src/rrts_main.cpp```\n\n#Note: main_viewer and rrts_main should run in parallel as:\n-rrts_main sends the path solution as well as the tree to the main_viewer to publish through local network.\n-data is transmitted through LCM network protocol.\n\n\n\n\n\n"
  },
  {
    "path": "data_generation/lcmtypes/CMakeLists.txt",
    "content": "SET(ENV{PKG_CONFIG_PATH} \"$ENV{PKG_CONFIG_PATH}:/usr/local/lib/pkgconfig:/opt/local/lib/pkgconfig:/usr/local/share/pkgconfig\")\n\ncmake_minimum_required(VERSION 2.6.0)\n\nset(POD_NAME lcmtypes)\ninclude(cmake/pods.cmake)\n\n# automatically build LCM types.  This also defines a number of CMake\n# variables, see cmake/lcmtypes.cmake for details\ninclude(cmake/lcmtypes.cmake)\nlcmtypes_build()\n\ninclude_directories(${LCMTYPES_INCLUDE_DIRS})\n\npods_install_pkg_config_file(lcmtypes\n    CFLAGS\n    LIBS -llcmtypes_lcmtypes\n    VERSION 0.0.1)\n"
  },
  {
    "path": "data_generation/lcmtypes/Makefile",
    "content": "# Default makefile distributed with pods version: 11.02.09\n\ndefault_target: all\n\n# Default to a less-verbose build.  If you want all the gory compiler output,\n# run \"make VERBOSE=1\"\n$(VERBOSE).SILENT:\n\n# Figure out where to build the software.\n#   Use BUILD_PREFIX if it was passed in.\n#   If not, search up to four parent directories for a 'build' directory.\n#   Otherwise, use ./build.\nifeq \"$(BUILD_PREFIX)\" \"\"\nBUILD_PREFIX:=$(shell for pfx in .. ../.. ../../.. ../../../..; do d=`pwd`/$$pfx/build;\\\n               if [ -d $$d ]; then echo $$d; exit 0; fi; done; echo `pwd`/build)\nendif\n# create the build directory if needed, and normalize its path name\nBUILD_PREFIX:=$(shell mkdir -p $(BUILD_PREFIX) && cd $(BUILD_PREFIX) && echo `pwd`)\n\n# Default to a release build.  If you want to enable debugging flags, run\n# \"make BUILD_TYPE=Debug\"\nifeq \"$(BUILD_TYPE)\" \"\"\nBUILD_TYPE=\"Release\"\nendif\n\nall: pod-build/Makefile\n\t$(MAKE) -C pod-build all install\n\npod-build/Makefile:\n\t$(MAKE) configure\n\n.PHONY: configure\nconfigure:\n\t@echo \"\\nBUILD_PREFIX: $(BUILD_PREFIX)\\n\\n\"\n\n\t# create the temporary build directory if needed\n\t@mkdir -p pod-build\n\n\t# run CMake to generate and configure the build scripts\n\t@cd pod-build && cmake -DCMAKE_INSTALL_PREFIX=$(BUILD_PREFIX) \\\n\t\t   -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) ..\n\nclean:\n\t-if [ -e pod-build/install_manifest.txt ]; then rm -f `cat pod-build/install_manifest.txt`; fi\n\t-if [ -d pod-build ]; then $(MAKE) -C pod-build clean; rm -rf pod-build; fi\n"
  },
  {
    "path": "data_generation/lcmtypes/cmake/lcmtypes.cmake",
    "content": "# Macros for automatically compiling LCM types into C, Java, and Python\n# libraries.\n#\n# The primary macro is:\n#     lcmtypes_build([C_AGGREGATE_HEADER header_fname] \n#                    [C_LIBNAME lib_name]\n#                    [JAVA_DEST_DIR dir_name]\n#                    [PY_DEST_DIR dir_name]\n#                    )\n# \n# It expects that the directory ${PROJECT_SOURCE_DIR}/lcmtypes contains all\n# the LCM types used by the system.  The macro generates C, Java, and Python\n# bindings.  See the C, Java, and Python sections below for information on\n# language specific options and generated results.\n#\n# After invoking this macro, the following variables will be set:\n#\n#   LCMTYPES_INCLUDE_DIRS\n#   LCMTYPES_LIBS\n#   LCMTYPES_JAR\n#\n# \n# C\n# ==\n# \n# C bindings will be placed in ${PROJECT_SOURCE_DIR}/lcmtypes/c.  This\n# directory is also stored in LCMTYPES_INCLUDE_DIRS on output.\n#\n# The autogenerated C bindings also get compiled to a static and shared\n# library.  The library prefix will be stored in LCMTYPES_LIBS on output.\n# This prefix can be manually set using the C_LIBNAME option.\n# \n# Additionally, a header file will be generated that automatically includes\n# all of the other automatically generated header files.  The name of this\n# header file defaults to a cleaned-up version of \"${PROJECT_NAME}.h\" \n# (non-alphanumeric characters replaced with underscores), but can\n# be manually set using the C_AGGREGATE_HEADER option.\n#\n#\n# Java\n# ====\n#\n# If Java is available, then Java bindings are be generated and placed in \n#    ${PROJECT_SOURCE_DIR}/lcmtypes/java\n#\n# This directory can be changed using the JAVA_DEST_DIR option.\n# \n# Additionally, targets are added to automatically compile the .java files to a\n# .jar file. The location of this jar file is stored in LCMTYPES_JAR\n#\n# and the .jar file will be installed to \n#   ${CMAKE_INSTALL_PREFIX}/share/java\n#\n#\n# Python\n# ======\n#\n# If Python is enabled, then python bindings will be generated and placed in \n#    ${PROJECT_SOURCE_DIR}/lcmtypes/python\n# \n# This directory can be changed by setting the PY_DEST_DIR option.\n#\n# Additionally, the .py files will be installed to \n#   ${CMAKE_INSTALL_PREFIX}/lib/python{X.Y}/site-packages\n#   \n# where {X.Y} refers to the python version used to build the .py files.\n#\n# ----\n# File: lcmtypes.cmake\n# Distributed with pods version: 11.02.09\n\ncmake_minimum_required(VERSION 2.6.0)\n\n# Policy settings to prevent warnings on 2.6 but ensure proper operation on\n# 2.4.\nif(COMMAND cmake_policy)\n    # Logical target names must be globally unique.\n    cmake_policy(SET CMP0002 OLD)\n    # Libraries linked via full path no longer produce linker search paths.\n    cmake_policy(SET CMP0003 OLD)\n    # Preprocessor definition values are now escaped automatically.\n    cmake_policy(SET CMP0005 OLD)\n    if(POLICY CMP0011)\n        # Included scripts do automatic cmake_policy PUSH and POP.\n        cmake_policy(SET CMP0011 OLD)\n    endif(POLICY CMP0011)\nendif()\n\nmacro(lcmtypes_get_types msgvar)\n    # get a list of all LCM types\n    file(GLOB __tmplcmtypes \"${PROJECT_SOURCE_DIR}/lcmtypes/*.lcm\")\n    set(${msgvar} \"\")\n    foreach(_msg ${__tmplcmtypes})\n        # Try to filter out temporary and backup files\n        if(${_msg} MATCHES \"^[^\\\\.].*\\\\.lcm$\")\n            list(APPEND ${msgvar} ${_msg})\n        endif(${_msg} MATCHES \"^[^\\\\.].*\\\\.lcm$\")\n    endforeach(_msg)\nendmacro()\n\nfunction(lcmgen)\n    execute_process(COMMAND lcm-gen ${ARGV} RESULT_VARIABLE lcmgen_result)\n    if(NOT lcmgen_result EQUAL 0)\n        message(FATAL_ERROR \"lcm-gen failed\")\n    endif()\nendfunction()\n\nfunction(lcmtypes_add_clean_dir clean_dir)\n    get_directory_property(acfiles ADDITIONAL_MAKE_CLEAN_FILES)\n    list(APPEND acfiles ${clean_dir})\n    set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES \"${acfiles}\")\nendfunction()\n\nfunction(lcmtypes_build_c)\n    lcmtypes_get_types(_lcmtypes)\n    list(LENGTH _lcmtypes _num_lcmtypes)\n    if(_num_lcmtypes EQUAL 0)\n        return()\n    endif()\n\n    string(REGEX REPLACE \"[^a-zA-Z0-9]\" \"_\" __sanitized_project_name \"${PROJECT_NAME}\")\n\n    # set some defaults\n\n    # library name\n    set(libname \"lcmtypes_${PROJECT_NAME}\")\n\n    # header file that includes all other generated header files\n    set(agg_h_bname \"${__sanitized_project_name}.h\")\n\n    # allow defaults to be overriden by function parameters\n    set(modewords C_LIBNAME C_AGGREGATE_HEADER)\n    set(curmode \"\")\n    foreach(word ${ARGV})\n        list(FIND modewords ${word} mode_index)\n        if(${mode_index} GREATER -1)\n            set(curmode ${word})\n        elseif(curmode STREQUAL C_AGGREGATE_HEADER)\n            set(agg_h_bname \"${word}\")\n            set(curmode \"\")\n        elseif(curmode STREQUAL C_LIBNAME)\n            set(libname \"${word}\")\n            set(curmode \"\")\n        endif()\n    endforeach()\n\n    # generate C bindings for LCM types\n    set(_lcmtypes_c_dir ${PROJECT_SOURCE_DIR}/lcmtypes/c/lcmtypes)\n\n    # blow away any existing auto-generated files.\n    file(REMOVE_RECURSE ${_lcmtypes_c_dir})\n\n    # run lcm-gen now\n    execute_process(COMMAND mkdir -p ${_lcmtypes_c_dir})\n    lcmgen(--lazy -c --c-cpath ${_lcmtypes_c_dir} --c-hpath ${_lcmtypes_c_dir} --cinclude lcmtypes ${_lcmtypes})\n\n    # run lcm-gen at compile time\n    add_custom_target(lcmgen_c ALL \n        COMMAND sh -c '[ -d ${_lcmtypes_c_dir} ] || mkdir -p ${_lcmtypes_c_dir}'\n        COMMAND sh -c 'lcm-gen --lazy -c ${_lcmtypes} --c-cpath ${_lcmtypes_c_dir} --c-hpath ${_lcmtypes_c_dir}')\n\n    # get a list of all generated .c and .h files\n    file(GLOB _lcmtypes_c_files ${_lcmtypes_c_dir}/*.c)\n    file(GLOB _lcmtypes_h_files ${_lcmtypes_c_dir}/*.h)\n\n    include_directories(BEFORE ${PROJECT_SOURCE_DIR}/lcmtypes/c)\n    include_directories(${LCM_INCLUDE_DIRS})\n\n    # aggregate into a static library\n    add_library(${libname} STATIC ${_lcmtypes_c_files})\n    set_source_files_properties(${_lcmtypes_c_files} PROPERTIES COMPILE_FLAGS \"-fPIC\")\n    #    set_target_properties(\"${libname}-static\" PROPERTIES OUTPUT_NAME \"${libname}\")\n    set_target_properties(${libname} PROPERTIES PREFIX \"lib\")\n    set_target_properties(${libname} PROPERTIES CLEAN_DIRECT_OUTPUT 1)\n    add_dependencies(${libname} lcmgen_c)\n\n    #    add_library(\"${libname}-static\" STATIC ${_lcmtypes_c_files})\n    #    set_source_files_properties(${_lcmtypes_c_files} PROPERTIES COMPILE_FLAGS \"-I${PROJECT_SOURCE_DIR}/lcmtypes/c\")\n    #    set_target_properties(\"${libname}-static\" PROPERTIES OUTPUT_NAME \"${libname}\")\n    #    set_target_properties(\"${libname}-static\" PROPERTIES PREFIX \"lib\")\n    #    set_target_properties(\"${libname}-static\" PROPERTIES CLEAN_DIRECT_OUTPUT 1)\n    #    add_dependencies(\"${libname}-static\" lcmgen_c)\n\n    # XXX don't build a shared library, as it makes using 3rd-party/external\n    # LCM types awkward (linker will try to link external symbols at library link time, \n    # rather than executable link time)\n\n    #    # aggregate into a shared library\n    #    add_library(${libname} SHARED ${_lcmtypes_c_files})\n    #    set_target_properties(\"${libname}\" PROPERTIES CLEAN_DIRECT_OUTPUT 1)\n    #    add_dependencies(\"${libname}\" lcmgen_c)\n    #    target_link_libraries(${libname} ${LCM_LDFLAGS})\n\n    # create a header file aggregating all of the autogenerated .h files\n    set(__agg_h_fname \"${_lcmtypes_c_dir}/${agg_h_bname}\")\n    file(WRITE ${__agg_h_fname}\n        \"#ifndef __lcmtypes_${__sanitized_project_name}_h__\\n\"\n        \"#define __lcmtypes_${__sanitized_project_name}_h__\\n\\n\")\n    foreach(h_file ${_lcmtypes_h_files})\n        file(RELATIVE_PATH __tmp_path ${_lcmtypes_c_dir} ${h_file})\n        file(APPEND ${__agg_h_fname} \"#include \\\"${__tmp_path}\\\"\\n\")\n    endforeach()\n    file(APPEND ${__agg_h_fname} \"\\n#endif\\n\")\n    list(APPEND _lcmtypes_h_files ${__agg_h_fname})\n    unset(__sanitized_project_name)\n    unset(__agg_h_fname)\n\n    # make header files and libraries public\n    #install(TARGETS \"${libname}-static\" ARCHIVE DESTINATION lib)\n    install(TARGETS ${libname} ARCHIVE DESTINATION lib)\n    #install(TARGETS \"${libname}\" LIBRARY DESTINATION lib)\n    install(FILES ${_lcmtypes_h_files} DESTINATION include/lcmtypes)\n\n    # set some compilation variables\n    set(LCMTYPES_INCLUDE_DIRS ${PROJECT_SOURCE_DIR}/lcmtypes/c PARENT_SCOPE)\n    set(LCMTYPES_LIBS ${libname} PARENT_SCOPE)\n\n    # create a pkg-config file\n    set(pc_fname \"${CMAKE_BINARY_DIR}/lib/pkgconfig/${libname}.pc\")\n    file(WRITE ${pc_fname}\n        \"prefix=${CMAKE_INSTALL_PREFIX}\\n\"\n        \"exec_prefix=\\${prefix}\\n\"\n        \"libdir=\\${exec_prefix}/lib\\n\"\n        \"includedir=\\${prefix}/include\\n\"\n        \"\\n\"\n        \"Name: ${libname}\\n\"\n        \"Description: LCM types for ${PROJECT_NAME}\\n\"\n        \"Version: 0.0.0\\n\"\n        \"Requires: lcm\\n\"\n        \"Libs: -L\\${exec_prefix}/lib -l${libname}\\n\")\n\n    # mark the pkg-config file for installation to the lib/pkgconfig directory\n    install(FILES ${pc_fname} DESTINATION lib/pkgconfig)\n\n    lcmtypes_add_clean_dir(\"${PROJECT_SOURCE_DIR}/lcmtypes/c\")\nendfunction()\n\nfunction(lcmtypes_build_java)\n    lcmtypes_get_types(_lcmtypes)\n    list(LENGTH _lcmtypes _num_lcmtypes)\n    if(_num_lcmtypes EQUAL 0)\n        return()\n    endif()\n\n    find_package(Java)\n    if(JAVA_COMPILE STREQUAL JAVA_COMPILE-NOTFOUND OR\n       JAVA_ARCHIVE STREQUAL JAVA_ARCHIVE-NOTFOUND)\n        message(STATUS \"Not building Java LCM type bindings (Can't find Java)\")\n        return()\n    endif()\n\n    # generate Java bindings for LCM types\n    set(_lcmtypes_java_dir ${PROJECT_SOURCE_DIR}/lcmtypes/java)\n    set(auto_manage_files YES)\n\n    set(modewords JAVA_DEST_DIR)\n    set(curmode \"\")\n    foreach(word ${ARGV})\n        list(FIND modewords ${word} mode_index)\n        if(${mode_index} GREATER -1)\n            set(curmode ${word})\n        elseif(curmode STREQUAL JAVA_DEST_DIR)\n            set(_lcmtypes_java_dir \"${word}\")\n            set(auto_manage_files NO)\n            set(curmode \"\")\n        endif()\n    endforeach()\n\n    # blow away any existing auto-generated files?\n    if(auto_manage_files)\n        file(REMOVE_RECURSE ${_lcmtypes_java_dir})\n    endif()\n\n    # run lcm-gen now\n    execute_process(COMMAND mkdir -p ${_lcmtypes_java_dir})\n    lcmgen(--lazy -j ${_lcmtypes} --jpath ${_lcmtypes_java_dir})\n\n    # run lcm-gen at compile time\n    add_custom_target(lcmgen_java ALL\n        COMMAND sh -c '[ -d ${_lcmtypes_java_dir} ] || mkdir -p ${_lcmtypes_java_dir}'\n        COMMAND sh -c 'lcm-gen --lazy -j ${_lcmtypes} --jpath ${_lcmtypes_java_dir}')\n\n    if(NOT auto_manage_files)\n        return()\n    endif()\n\n    # get a list of all generated .java files\n    file(GLOB_RECURSE _lcmtypes_java_files ${_lcmtypes_java_dir}/*.java)\n\n    # where is lcm.jar?\n    execute_process(COMMAND pkg-config --variable=classpath lcm-java OUTPUT_VARIABLE LCM_JAR_FILE)\n    string(STRIP ${LCM_JAR_FILE} LCM_JAR_FILE)\n    set(LCMTYPES_JAR ${CMAKE_CURRENT_BINARY_DIR}/lcmtypes_${PROJECT_NAME}.jar)\n\n    set(java_classpath ${_lcmtypes_java_dir}:${LCM_JAR_FILE})\n\n    # search for lcmtypes_*.jar files in well-known places and add them to the\n    # classpath\n    foreach(pfx /usr /usr/local ${CMAKE_INSTALL_PREFIX})\n        file(GLOB_RECURSE jarfiles ${pfx}/share/java/lcmtypes_*.jar)\n        foreach(jarfile ${jarfiles})\n            set(java_classpath ${java_classpath}:${jarfile})\n            #            message(\"found ${jarfile}\")\n        endforeach()\n    endforeach()\n\n    # convert the list of .java filenames to a list of .class filenames\n    foreach(javafile ${_lcmtypes_java_files})\n        string(REPLACE .java .class __tmp_class_fname ${javafile})\n        #        add_custom_command(OUTPUT ${__tmp_class_fname} COMMAND\n        #            ${JAVA_COMPILE} -source 6 -cp ${_lcmtypes_java_dir}:${lcm_jar} ${javafile} VERBATIM DEPENDS ${javafile})\n        list(APPEND _lcmtypes_class_files ${__tmp_class_fname})\n        unset(__tmp_class_fname)\n    endforeach()\n\n    # add a rule to build the .class files from from the .java files\n    add_custom_command(OUTPUT ${_lcmtypes_class_files} COMMAND \n        ${JAVA_COMPILE} -source 6 -cp ${java_classpath} ${_lcmtypes_java_files} \n        DEPENDS ${_lcmtypes_java_files} VERBATIM)\n\n    # add a rule to build a .jar file from the .class files\n    add_custom_command(OUTPUT lcmtypes_${PROJECT_NAME}.jar COMMAND\n        ${JAVA_ARCHIVE} cf ${LCMTYPES_JAR} -C ${_lcmtypes_java_dir} . DEPENDS ${_lcmtypes_class_files} VERBATIM)\n    add_custom_target(lcmtypes_${PROJECT_NAME}_jar ALL DEPENDS ${LCMTYPES_JAR})\n\n    add_dependencies(lcmtypes_${PROJECT_NAME}_jar lcmgen_java)\n\n    install(FILES ${LCMTYPES_JAR} DESTINATION share/java)\n    set(LCMTYPES_JAR ${LCMTYPES_JAR} PARENT_SCOPE)\n\n    lcmtypes_add_clean_dir(${_lcmtypes_java_dir})\nendfunction()\n\nfunction(lcmtypes_build_python)\n    lcmtypes_get_types(_lcmtypes)\n    list(LENGTH _lcmtypes _num_lcmtypes)\n    if(_num_lcmtypes EQUAL 0)\n        return()\n    endif()\n\n    find_package(PythonInterp)\n    if(NOT PYTHONINTERP_FOUND)\n        message(STATUS \"Not building Python LCM type bindings (Can't find Python)\")\n        return()\n    endif()\n\n    set(_lcmtypes_python_dir ${PROJECT_SOURCE_DIR}/lcmtypes/python)\n    set(auto_manage_files YES)\n\n    set(modewords PY_DEST_DIR)\n    set(curmode \"\")\n    foreach(word ${ARGV})\n        list(FIND modewords ${word} mode_index)\n        if(${mode_index} GREATER -1)\n            set(curmode ${word})\n        elseif(curmode STREQUAL PY_DEST_DIR)\n            set(_lcmtypes_python_dir \"${word}\")\n            set(auto_manage_files NO)\n            set(curmode \"\")\n        endif()\n    endforeach()\n\n    # purge existing files?\n    if(auto_manage_files)\n        file(REMOVE_RECURSE ${_lcmtypes_python_dir})\n    endif()\n\n    # generate Python bindings for LCM types\n    execute_process(COMMAND mkdir -p ${_lcmtypes_python_dir})\n    execute_process(COMMAND lcm-gen --lazy -p ${_lcmtypes} --ppath ${_lcmtypes_python_dir})\n\n    # run lcm-gen at compile time\n    add_custom_target(lcmgen_python ALL\n        COMMAND sh -c 'lcm-gen --lazy -p ${_lcmtypes} --ppath ${_lcmtypes_python_dir}')\n\n    if(NOT auto_manage_files)\n        return()\n    endif()\n\n    # get a list of all generated .py files\n    file(GLOB_RECURSE _lcmtypes_python_files RELATIVE ${_lcmtypes_python_dir} ${_lcmtypes_python_dir}/*.py )\n\n    # add rules for byte-compiling .py --> .pyc\n    foreach(py_file ${_lcmtypes_python_files})\n        set(full_py_fname ${_lcmtypes_python_dir}/${py_file})\n        add_custom_command(OUTPUT \"${full_py_fname}c\" COMMAND \n            ${PYTHON_EXECUTABLE} -m py_compile ${full_py_fname} DEPENDS ${full_py_fname} VERBATIM)\n        list(APPEND pyc_files \"${full_py_fname}c\")\n    endforeach()\n    add_custom_target(pyc_files ALL DEPENDS ${pyc_files})\n\n    # install python files\n    execute_process(COMMAND \n        ${PYTHON_EXECUTABLE} -c \"import sys; sys.stdout.write(sys.version[:3])\"\n        OUTPUT_VARIABLE pyversion)\n    install(DIRECTORY ${_lcmtypes_python_dir}/ DESTINATION lib/python${pyversion}/site-packages)\n\n    lcmtypes_add_clean_dir(${_lcmtypes_python_dir})\nendfunction()\n\nfunction(lcmtypes_install_types)\n    lcmtypes_get_types(_lcmtypes)\n    list(LENGTH _lcmtypes _num_lcmtypes)\n    if(_num_lcmtypes EQUAL 0)\n        return()\n    endif()\n\n    install(FILES ${_lcmtypes} DESTINATION share/lcmtypes)\nendfunction()\n\nmacro(lcmtypes_build)\n    find_package(PkgConfig REQUIRED)\n    pkg_check_modules(LCM REQUIRED lcm)\n    lcmtypes_build_c(${ARGV})\n    include_directories(${LCMTYPES_INCLUDE_DIRS})\n\n    lcmtypes_build_java(${ARGV})\n    lcmtypes_build_python(${ARGV})\n    lcmtypes_install_types()\nendmacro()\n"
  },
  {
    "path": "data_generation/lcmtypes/cmake/pods.cmake",
    "content": "# Macros to simplify compliance with the pods build policies.\n#\n# To enable the macros, add the following lines to CMakeLists.txt:\n#   set(POD_NAME <pod-name>)\n#   include(cmake/pods.cmake)\n#\n# If POD_NAME is not set, then the CMake source directory is used as POD_NAME\n#\n# Next, any of the following macros can be used.  See the individual macro\n# definitions in this file for individual documentation.\n#\n# C/C++\n#   pods_install_headers(...)\n#   pods_install_libraries(...)\n#   pods_install_executables(...)\n#   pods_install_pkg_config_file(...)\n#\n#   pods_use_pkg_config_packages(...)\n#\n# Python\n#   pods_install_python_packages(...)\n#   pods_install_python_script(...)\n#\n# Java\n#   None yet\n#\n# ----\n# File: pods.cmake\n# Distributed with pods version: 11.02.09\n\n# pods_install_headers(<header1.h> ... DESTINATION <subdir_name>)\n# \n# Install a (list) of header files.\n#\n# Header files will all be installed to include/<subdir_name>\n#\n# example:\n#   add_library(perception detector.h sensor.h)\n#   pods_install_headers(detector.h sensor.h DESTINATION perception)\n#\nfunction(pods_install_headers)\n    list(GET ARGV -2 checkword)\n    if(NOT checkword STREQUAL DESTINATION)\n        message(FATAL_ERROR \"pods_install_headers missing DESTINATION parameter\")\n    endif()\n\n    list(GET ARGV -1 dest_dir)\n    list(REMOVE_AT ARGV -1)\n    list(REMOVE_AT ARGV -1)\n    #copy the headers to the INCLUDE_OUTPUT_PATH (${CMAKE_BINARY_DIR}/include)\n    foreach(header ${ARGV})\n        get_filename_component(_header_name ${header} NAME)\n        configure_file(${header} ${INCLUDE_OUTPUT_PATH}/${dest_dir}/${_header_name} COPYONLY)\n\tendforeach(header)\n\t#mark them to be installed\n\tinstall(FILES ${ARGV} DESTINATION include/${dest_dir})\n\n\nendfunction(pods_install_headers)\n\n# pods_install_executables(<executable1> ...)\n#\n# Install a (list) of executables to bin/\nfunction(pods_install_executables)\n    install(TARGETS ${ARGV} RUNTIME DESTINATION bin)\nendfunction(pods_install_executables)\n\n# pods_install_libraries(<library1> ...)\n#\n# Install a (list) of libraries to lib/\nfunction(pods_install_libraries)\n    install(TARGETS ${ARGV} LIBRARY DESTINATION lib ARCHIVE DESTINATION lib)\nendfunction(pods_install_libraries)\n\n\n# pods_install_pkg_config_file(<package-name> \n#                              [VERSION <version>]\n#                              [DESCRIPTION <description>]\n#                              [CFLAGS <cflag> ...]\n#                              [LIBS <lflag> ...]\n#                              [REQUIRES <required-package-name> ...])\n# \n# Create and install a pkg-config .pc file.\n#\n# example:\n#    add_library(mylib mylib.c)\n#    pods_install_pkg_config_file(mylib LIBS -lmylib REQUIRES glib-2.0)\nfunction(pods_install_pkg_config_file)\n    list(GET ARGV 0 pc_name)\n    # TODO error check\n\n    set(pc_version 0.0.1)\n    set(pc_description ${pc_name})\n    set(pc_requires \"\")\n    set(pc_libs \"\")\n    set(pc_cflags \"\")\n    set(pc_fname \"${PKG_CONFIG_OUTPUT_PATH}/${pc_name}.pc\")\n    \n    set(modewords LIBS CFLAGS REQUIRES VERSION DESCRIPTION)\n    set(curmode \"\")\n\n    # parse function arguments and populate pkg-config parameters\n    list(REMOVE_AT ARGV 0)\n    foreach(word ${ARGV})\n        list(FIND modewords ${word} mode_index)\n        if(${mode_index} GREATER -1)\n            set(curmode ${word})\n        elseif(curmode STREQUAL LIBS)\n            set(pc_libs \"${pc_libs} ${word}\")\n        elseif(curmode STREQUAL CFLAGS)\n            set(pc_cflags \"${pc_cflags} ${word}\")\n        elseif(curmode STREQUAL REQUIRES)\n            set(pc_requires \"${pc_requires} ${word}\")\n        elseif(curmode STREQUAL VERSION)\n            set(pc_version ${word})\n            set(curmode \"\")\n        elseif(curmode STREQUAL DESCRIPTION)\n            set(pc_description \"${word}\")\n            set(curmode \"\")\n        else(${mode_index} GREATER -1)\n            message(\"WARNING incorrect use of pods_add_pkg_config (${word})\")\n            break()\n        endif(${mode_index} GREATER -1)\n    endforeach(word)\n\n    # write the .pc file out\n    file(WRITE ${pc_fname}\n        \"prefix=${CMAKE_INSTALL_PREFIX}\\n\"\n        \"exec_prefix=\\${prefix}\\n\"\n        \"libdir=\\${exec_prefix}/lib\\n\"\n        \"includedir=\\${prefix}/include\\n\"\n        \"\\n\"\n        \"Name: ${pc_name}\\n\"\n        \"Description: ${pc_description}\\n\"\n        \"Requires: ${pc_requires}\\n\"\n        \"Version: ${pc_version}\\n\"\n        \"Libs: -L\\${exec_prefix}/lib ${pc_libs}\\n\"\n        \"Cflags: ${pc_cflags}\\n\")\n\n    # mark the .pc file for installation to the lib/pkgconfig directory\n    install(FILES ${pc_fname} DESTINATION lib/pkgconfig)\n    \n    # find targets that this pkg-config file depends on\n    string(REPLACE \" \" \";\" split_lib ${pc_libs})\n    foreach(lib ${split_lib})\n        string(REGEX REPLACE \"^-l\" \"\" libname ${lib})\n        get_target_property(IS_TARGET ${libname} LOCATION)\n        if (NOT IS_TARGET STREQUAL \"IS_TARGET-NOTFOUND\")\n            set_property(GLOBAL APPEND PROPERTY \"PODS_PKG_CONFIG_TARGETS-${pc_name}\" ${libname})\n        endif() \n    endforeach()\n    \nendfunction(pods_install_pkg_config_file)\n\n\n# pods_install_python_script(<script_name> <python_module>)\n#\n# Create and install a script that invokes the python interpreter with a\n# specified module.\n#\n# A script will be installed to bin/<script_name>.  The script simply\n# adds <install-prefix>/lib/pythonX.Y/site-packages to the python path, and\n# then invokes `python -m <python_module>`.\n#\n# example:\n#    pods_install_python_script(run-pdb pdb)\nfunction(pods_install_python_script script_name py_module)\n    find_package(PythonInterp REQUIRED)\n\n    # which python version?\n    execute_process(COMMAND \n        ${PYTHON_EXECUTABLE} -c \"import sys; sys.stdout.write(sys.version[:3])\"\n        OUTPUT_VARIABLE pyversion)\n\n    # where do we install .py files to?\n    set(python_install_dir \n        ${CMAKE_INSTALL_PREFIX}/lib/python${pyversion}/site-packages)\n\n    # write the script file\n    file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${script_name} \"#!/bin/sh\\n\"\n        \"export PYTHONPATH=${python_install_dir}:\\${PYTHONPATH}\\n\"\n        \"exec ${PYTHON_EXECUTABLE} -m ${py_module} $*\\n\")\n\n    # install it...\n    install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${script_name} DESTINATION bin)\nendfunction()\n\n# pods_install_python_packages(<src_dir>)\n#\n# Install python packages to lib/pythonX.Y/site-packages, where X.Y refers to\n# the current python version (e.g., 2.6)\n#\n# Recursively searches <src_dir> for .py files, byte-compiles them, and\n# installs them\nfunction(pods_install_python_packages py_src_dir)\n    find_package(PythonInterp REQUIRED)\n\n    # which python version?\n    execute_process(COMMAND \n        ${PYTHON_EXECUTABLE} -c \"import sys; sys.stdout.write(sys.version[:3])\"\n        OUTPUT_VARIABLE pyversion)\n\n    # where do we install .py files to?\n    set(python_install_dir \n        ${CMAKE_INSTALL_PREFIX}/lib/python${pyversion}/site-packages)\n\n    if(ARGC GREATER 1)\n        message(FATAL_ERROR \"NYI\")\n    else()\n        # get a list of all .py files\n        file(GLOB_RECURSE py_files RELATIVE ${py_src_dir} ${py_src_dir}/*.py)\n\n        # add rules for byte-compiling .py --> .pyc\n        foreach(py_file ${py_files})\n            get_filename_component(py_dirname ${py_file} PATH)\n            add_custom_command(OUTPUT \"${py_src_dir}/${py_file}c\" \n                COMMAND ${PYTHON_EXECUTABLE} -m py_compile ${py_src_dir}/${py_file} \n                DEPENDS ${py_src_dir}/${py_file})\n            list(APPEND pyc_files \"${py_src_dir}/${py_file}c\")\n\n            # install python file and byte-compiled file\n            install(FILES ${py_src_dir}/${py_file} ${py_src_dir}/${py_file}c\n                DESTINATION \"${python_install_dir}/${py_dirname}\")\n#            message(\"${py_src_dir}/${py_file} -> ${python_install_dir}/${py_dirname}\")\n        endforeach()\n        string(REGEX REPLACE \"[^a-zA-Z0-9]\" \"_\" san_src_dir \"${py_src_dir}\")\n        add_custom_target(\"pyc_${san_src_dir}\" ALL DEPENDS ${pyc_files})\n    endif()\nendfunction()\n\n\n# pods_use_pkg_config_packages(<target> <package-name> ...)\n#\n# Convenience macro to get compiler and linker flags from pkg-config and apply them\n# to the specified target.\n#\n# Invokes `pkg-config --cflags-only-I <package-name> ...` and adds the result to the\n# include directories.\n#\n# Additionally, invokes `pkg-config --libs <package-name> ...` and adds the result to\n# the target's link flags (via target_link_libraries)\n#\n# example:\n#   add_executable(myprogram main.c)\n#   pods_use_pkg_config_packages(myprogram glib-2.0 opencv)\nmacro(pods_use_pkg_config_packages target)\n    if(${ARGC} LESS 2)\n        message(WARNING \"Useless invocation of pods_use_pkg_config_packages\")\n        return()\n    endif()\n    find_package(PkgConfig REQUIRED)\n    execute_process(COMMAND \n        ${PKG_CONFIG_EXECUTABLE} --cflags-only-I ${ARGN}\n        OUTPUT_VARIABLE _pods_pkg_include_flags)\n    string(STRIP ${_pods_pkg_include_flags} _pods_pkg_include_flags)\n    string(REPLACE \"-I\" \"\" _pods_pkg_include_flags \"${_pods_pkg_include_flags}\")\n\tseparate_arguments(_pods_pkg_include_flags)\n    #    message(\"include: ${_pods_pkg_include_flags}\")\n    execute_process(COMMAND \n        ${PKG_CONFIG_EXECUTABLE} --libs ${ARGN}\n        OUTPUT_VARIABLE _pods_pkg_ldflags)\n    string(STRIP ${_pods_pkg_ldflags} _pods_pkg_ldflags)\n    #    message(\"ldflags: ${_pods_pkg_ldflags}\")\n    include_directories(${_pods_pkg_include_flags})\n    target_link_libraries(${target} ${_pods_pkg_ldflags})\n   \n    # make the target depend on libraries being installed by this source build\n    foreach(_pkg ${ARGN})\n        get_property(_has_dependencies GLOBAL PROPERTY \"PODS_PKG_CONFIG_TARGETS-${_pkg}\" SET)\n        if(_has_dependencies)\n            get_property(_dependencies GLOBAL PROPERTY \"PODS_PKG_CONFIG_TARGETS-${_pkg}\")\n            add_dependencies(${target} ${_dependencies})\n            #            message(\"Found dependencies for ${_pkg}: ${dependencies}\")\n        endif()\n        unset(_has_dependencies)\n        unset(_dependencies)\n    endforeach()\n\n    unset(_pods_pkg_include_flags)\n    unset(_pods_pkg_ldflags)\nendmacro()\n\n\n# pods_config_search_paths()\n#\n# Setup include, linker, and pkg-config paths according to the pods core\n# policy.  This macro is automatically invoked, there is no need to do so\n# manually.\nmacro(pods_config_search_paths)\n    if(NOT DEFINED __pods_setup)\n\t\t#set where files should be output locally\n\t    set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib)\n\t    set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/bin)\n\t    set(INCLUDE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/include)\n\t    set(PKG_CONFIG_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib/pkgconfig)\n\t\t\n\t\t#set where files should be installed to\n\t    set(LIBRARY_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/lib)\n\t    set(EXECUTABLE_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/bin)\n\t    set(INCLUDE_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/include)\n\t    set(PKG_CONFIG_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/lib/pkgconfig)\n\n\n        # add build/lib/pkgconfig to the pkg-config search path\n        set(ENV{PKG_CONFIG_PATH} ${PKG_CONFIG_INSTALL_PATH}:$ENV{PKG_CONFIG_PATH})\n        set(ENV{PKG_CONFIG_PATH} ${PKG_CONFIG_OUTPUT_PATH}:$ENV{PKG_CONFIG_PATH})\n\n        # add build/include to the compiler include path\n        include_directories(BEFORE ${INCLUDE_OUTPUT_PATH})\n        include_directories(${INCLUDE_INSTALL_PATH})\n\n        # add build/lib to the link path\n        link_directories(${LIBRARY_INSTALL_PATH})\n        link_directories(${LIBRARY_OUTPUT_PATH})\n\n        # abuse RPATH\n        if(${CMAKE_INSTALL_RPATH})\n            set(CMAKE_INSTALL_RPATH ${LIBRARY_INSTALL_PATH}:${CMAKE_INSTALL_RPATH})\n        else(${CMAKE_INSTALL_RPATH})\n            set(CMAKE_INSTALL_RPATH ${LIBRARY_INSTALL_PATH})\n        endif(${CMAKE_INSTALL_RPATH})\n\n        # for osx, which uses \"install name\" path rather than rpath\n        #set(CMAKE_INSTALL_NAME_DIR ${LIBRARY_OUTPUT_PATH})\n        set(CMAKE_INSTALL_NAME_DIR ${CMAKE_INSTALL_RPATH})\n        \n        # hack to force cmake always create install and clean targets \n        install(FILES DESTINATION)\n        add_custom_target(tmp)\n\n        set(__pods_setup true)\n    endif(NOT DEFINED __pods_setup)\nendmacro(pods_config_search_paths)\n\nmacro(enforce_out_of_source)\n    if(CMAKE_BINARY_DIR STREQUAL PROJECT_SOURCE_DIR)\n      message(FATAL_ERROR \n      \"\\n\n      Do not run cmake directly in the pod directory. \n      use the supplied Makefile instead!  You now need to\n      remove CMakeCache.txt and the CMakeFiles directory.\n\n      Then to build, simply type: \n       $ make\n      \")\n    endif()\nendmacro(enforce_out_of_source)\n\n#set the variable POD_NAME to the directory path, and set the cmake PROJECT_NAME\nif(NOT POD_NAME)\n    get_filename_component(POD_NAME ${CMAKE_SOURCE_DIR} NAME)\n    message(STATUS \"POD_NAME is not set... Defaulting to directory name: ${POD_NAME}\") \nendif(NOT POD_NAME)\nproject(${POD_NAME})\n\n#make sure we're running an out-of-source build\nenforce_out_of_source()\n\n#call the function to setup paths\npods_config_search_paths()\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes.h",
    "content": "#ifndef __lcmtypes_lcmtypes_h__\n#define __lcmtypes_lcmtypes_h__\n\n#include \"lcmtypes_trajectory_t.h\"\n#include \"lcmtypes_state_t.h\"\n#include \"lcmtypes_environment_t.h\"\n#include \"lcmtypes_vertex_t.h\"\n#include \"lcmtypes_graph_t.h\"\n#include \"lcmtypes_edge_t.h\"\n#include \"lcmtypes_region_3d_t.h\"\n\n#endif\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_edge_t.c",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <string.h>\n#include \"lcmtypes/lcmtypes_edge_t.h\"\n\nstatic int __lcmtypes_edge_t_hash_computed;\nstatic uint64_t __lcmtypes_edge_t_hash;\n\nuint64_t __lcmtypes_edge_t_hash_recursive(const __lcm_hash_ptr *p)\n{\n    const __lcm_hash_ptr *fp;\n    for (fp = p; fp != NULL; fp = fp->parent)\n        if (fp->v == __lcmtypes_edge_t_get_hash)\n            return 0;\n\n    __lcm_hash_ptr cp;\n    cp.parent =  p;\n    cp.v = (void*)__lcmtypes_edge_t_get_hash;\n    (void) cp;\n\n    uint64_t hash = (uint64_t)0x1fae492d71eedf94LL\n         + __lcmtypes_vertex_t_hash_recursive(&cp)\n         + __lcmtypes_vertex_t_hash_recursive(&cp)\n         + __lcmtypes_trajectory_t_hash_recursive(&cp)\n        ;\n\n    return (hash<<1) + ((hash>>63)&1);\n}\n\nint64_t __lcmtypes_edge_t_get_hash(void)\n{\n    if (!__lcmtypes_edge_t_hash_computed) {\n        __lcmtypes_edge_t_hash = (int64_t)__lcmtypes_edge_t_hash_recursive(NULL);\n        __lcmtypes_edge_t_hash_computed = 1;\n    }\n\n    return __lcmtypes_edge_t_hash;\n}\n\nint __lcmtypes_edge_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_edge_t *p, int elements)\n{\n    int pos = 0, element;\n    int thislen;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __lcmtypes_vertex_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].vertex_src), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_vertex_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].vertex_dst), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_trajectory_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].trajectory), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint lcmtypes_edge_t_encode(void *buf, int offset, int maxlen, const lcmtypes_edge_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_edge_t_get_hash();\n\n    thislen = __int64_t_encode_array(buf, offset + pos, maxlen - pos, &hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    thislen = __lcmtypes_edge_t_encode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint __lcmtypes_edge_t_encoded_array_size(const lcmtypes_edge_t *p, int elements)\n{\n    int size = 0, element;\n    for (element = 0; element < elements; element++) {\n\n        size += __lcmtypes_vertex_t_encoded_array_size(&(p[element].vertex_src), 1);\n\n        size += __lcmtypes_vertex_t_encoded_array_size(&(p[element].vertex_dst), 1);\n\n        size += __lcmtypes_trajectory_t_encoded_array_size(&(p[element].trajectory), 1);\n\n    }\n    return size;\n}\n\nint lcmtypes_edge_t_encoded_size(const lcmtypes_edge_t *p)\n{\n    return 8 + __lcmtypes_edge_t_encoded_array_size(p, 1);\n}\n\nint __lcmtypes_edge_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_edge_t *p, int elements)\n{\n    int pos = 0, thislen, element;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __lcmtypes_vertex_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].vertex_src), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_vertex_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].vertex_dst), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_trajectory_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].trajectory), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint __lcmtypes_edge_t_decode_array_cleanup(lcmtypes_edge_t *p, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __lcmtypes_vertex_t_decode_array_cleanup(&(p[element].vertex_src), 1);\n\n        __lcmtypes_vertex_t_decode_array_cleanup(&(p[element].vertex_dst), 1);\n\n        __lcmtypes_trajectory_t_decode_array_cleanup(&(p[element].trajectory), 1);\n\n    }\n    return 0;\n}\n\nint lcmtypes_edge_t_decode(const void *buf, int offset, int maxlen, lcmtypes_edge_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_edge_t_get_hash();\n\n    int64_t this_hash;\n    thislen = __int64_t_decode_array(buf, offset + pos, maxlen - pos, &this_hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n    if (this_hash != hash) return -1;\n\n    thislen = __lcmtypes_edge_t_decode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint lcmtypes_edge_t_decode_cleanup(lcmtypes_edge_t *p)\n{\n    return __lcmtypes_edge_t_decode_array_cleanup(p, 1);\n}\n\nint __lcmtypes_edge_t_clone_array(const lcmtypes_edge_t *p, lcmtypes_edge_t *q, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __lcmtypes_vertex_t_clone_array(&(p[element].vertex_src), &(q[element].vertex_src), 1);\n\n        __lcmtypes_vertex_t_clone_array(&(p[element].vertex_dst), &(q[element].vertex_dst), 1);\n\n        __lcmtypes_trajectory_t_clone_array(&(p[element].trajectory), &(q[element].trajectory), 1);\n\n    }\n    return 0;\n}\n\nlcmtypes_edge_t *lcmtypes_edge_t_copy(const lcmtypes_edge_t *p)\n{\n    lcmtypes_edge_t *q = (lcmtypes_edge_t*) malloc(sizeof(lcmtypes_edge_t));\n    __lcmtypes_edge_t_clone_array(p, q, 1);\n    return q;\n}\n\nvoid lcmtypes_edge_t_destroy(lcmtypes_edge_t *p)\n{\n    __lcmtypes_edge_t_decode_array_cleanup(p, 1);\n    free(p);\n}\n\nint lcmtypes_edge_t_publish(lcm_t *lc, const char *channel, const lcmtypes_edge_t *p)\n{\n      int max_data_size = lcmtypes_edge_t_encoded_size (p);\n      uint8_t *buf = (uint8_t*) malloc (max_data_size);\n      if (!buf) return -1;\n      int data_size = lcmtypes_edge_t_encode (buf, 0, max_data_size, p);\n      if (data_size < 0) {\n          free (buf);\n          return data_size;\n      }\n      int status = lcm_publish (lc, channel, buf, data_size);\n      free (buf);\n      return status;\n}\n\nstruct _lcmtypes_edge_t_subscription_t {\n    lcmtypes_edge_t_handler_t user_handler;\n    void *userdata;\n    lcm_subscription_t *lc_h;\n};\nstatic\nvoid lcmtypes_edge_t_handler_stub (const lcm_recv_buf_t *rbuf,\n                            const char *channel, void *userdata)\n{\n    int status;\n    lcmtypes_edge_t p;\n    memset(&p, 0, sizeof(lcmtypes_edge_t));\n    status = lcmtypes_edge_t_decode (rbuf->data, 0, rbuf->data_size, &p);\n    if (status < 0) {\n        fprintf (stderr, \"error %d decoding lcmtypes_edge_t!!!\\n\", status);\n        return;\n    }\n\n    lcmtypes_edge_t_subscription_t *h = (lcmtypes_edge_t_subscription_t*) userdata;\n    h->user_handler (rbuf, channel, &p, h->userdata);\n\n    lcmtypes_edge_t_decode_cleanup (&p);\n}\n\nlcmtypes_edge_t_subscription_t* lcmtypes_edge_t_subscribe (lcm_t *lcm,\n                    const char *channel,\n                    lcmtypes_edge_t_handler_t f, void *userdata)\n{\n    lcmtypes_edge_t_subscription_t *n = (lcmtypes_edge_t_subscription_t*)\n                       malloc(sizeof(lcmtypes_edge_t_subscription_t));\n    n->user_handler = f;\n    n->userdata = userdata;\n    n->lc_h = lcm_subscribe (lcm, channel,\n                                 lcmtypes_edge_t_handler_stub, n);\n    if (n->lc_h == NULL) {\n        fprintf (stderr,\"couldn't reg lcmtypes_edge_t LCM handler!\\n\");\n        free (n);\n        return NULL;\n    }\n    return n;\n}\n\nint lcmtypes_edge_t_subscription_set_queue_capacity (lcmtypes_edge_t_subscription_t* subs,\n                              int num_messages)\n{\n    return lcm_subscription_set_queue_capacity (subs->lc_h, num_messages);\n}\n\nint lcmtypes_edge_t_unsubscribe(lcm_t *lcm, lcmtypes_edge_t_subscription_t* hid)\n{\n    int status = lcm_unsubscribe (lcm, hid->lc_h);\n    if (0 != status) {\n        fprintf(stderr,\n           \"couldn't unsubscribe lcmtypes_edge_t_handler %p!\\n\", hid);\n        return -1;\n    }\n    free (hid);\n    return 0;\n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_edge_t.h",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <lcm/lcm_coretypes.h>\n#include <lcm/lcm.h>\n\n#ifndef _lcmtypes_edge_t_h\n#define _lcmtypes_edge_t_h\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#include \"lcmtypes/lcmtypes_vertex_t.h\"\n#include \"lcmtypes/lcmtypes_vertex_t.h\"\n#include \"lcmtypes/lcmtypes_trajectory_t.h\"\ntypedef struct _lcmtypes_edge_t lcmtypes_edge_t;\nstruct _lcmtypes_edge_t\n{\n    lcmtypes_vertex_t vertex_src;\n    lcmtypes_vertex_t vertex_dst;\n    lcmtypes_trajectory_t trajectory;\n};\n\n/**\n * Create a deep copy of a lcmtypes_edge_t.\n * When no longer needed, destroy it with lcmtypes_edge_t_destroy()\n */\nlcmtypes_edge_t* lcmtypes_edge_t_copy(const lcmtypes_edge_t* to_copy);\n\n/**\n * Destroy an instance of lcmtypes_edge_t created by lcmtypes_edge_t_copy()\n */\nvoid lcmtypes_edge_t_destroy(lcmtypes_edge_t* to_destroy);\n\n/**\n * Identifies a single subscription.  This is an opaque data type.\n */\ntypedef struct _lcmtypes_edge_t_subscription_t lcmtypes_edge_t_subscription_t;\n\n/**\n * Prototype for a callback function invoked when a message of type\n * lcmtypes_edge_t is received.\n */\ntypedef void(*lcmtypes_edge_t_handler_t)(const lcm_recv_buf_t *rbuf,\n             const char *channel, const lcmtypes_edge_t *msg, void *userdata);\n\n/**\n * Publish a message of type lcmtypes_edge_t using LCM.\n *\n * @param lcm The LCM instance to publish with.\n * @param channel The channel to publish on.\n * @param msg The message to publish.\n * @return 0 on success, <0 on error.  Success means LCM has transferred\n * responsibility of the message data to the OS.\n */\nint lcmtypes_edge_t_publish(lcm_t *lcm, const char *channel, const lcmtypes_edge_t *msg);\n\n/**\n * Subscribe to messages of type lcmtypes_edge_t using LCM.\n *\n * @param lcm The LCM instance to subscribe with.\n * @param channel The channel to subscribe to.\n * @param handler The callback function invoked by LCM when a message is received.\n *                This function is invoked by LCM during calls to lcm_handle() and\n *                lcm_handle_timeout().\n * @param userdata An opaque pointer passed to @p handler when it is invoked.\n * @return 0 on success, <0 if an error occured\n */\nlcmtypes_edge_t_subscription_t* lcmtypes_edge_t_subscribe(lcm_t *lcm, const char *channel, lcmtypes_edge_t_handler_t handler, void *userdata);\n\n/**\n * Removes and destroys a subscription created by lcmtypes_edge_t_subscribe()\n */\nint lcmtypes_edge_t_unsubscribe(lcm_t *lcm, lcmtypes_edge_t_subscription_t* hid);\n\n/**\n * Sets the queue capacity for a subscription.\n * Some LCM providers (e.g., the default multicast provider) are implemented\n * using a background receive thread that constantly revceives messages from\n * the network.  As these messages are received, they are buffered on\n * per-subscription queues until dispatched by lcm_handle().  This function\n * how many messages are queued before dropping messages.\n *\n * @param subs the subscription to modify.\n * @param num_messages The maximum number of messages to queue\n *  on the subscription.\n * @return 0 on success, <0 if an error occured\n */\nint lcmtypes_edge_t_subscription_set_queue_capacity(lcmtypes_edge_t_subscription_t* subs,\n                              int num_messages);\n\n/**\n * Encode a message of type lcmtypes_edge_t into binary form.\n *\n * @param buf The output buffer.\n * @param offset Encoding starts at this byte offset into @p buf.\n * @param maxlen Maximum number of bytes to write.  This should generally\n *               be equal to lcmtypes_edge_t_encoded_size().\n * @param msg The message to encode.\n * @return The number of bytes encoded, or <0 if an error occured.\n */\nint lcmtypes_edge_t_encode(void *buf, int offset, int maxlen, const lcmtypes_edge_t *p);\n\n/**\n * Decode a message of type lcmtypes_edge_t from binary form.\n * When decoding messages containing strings or variable-length arrays, this\n * function may allocate memory.  When finished with the decoded message,\n * release allocated resources with lcmtypes_edge_t_decode_cleanup().\n *\n * @param buf The buffer containing the encoded message\n * @param offset The byte offset into @p buf where the encoded message starts.\n * @param maxlen The maximum number of bytes to read while decoding.\n * @param msg Output parameter where the decoded message is stored\n * @return The number of bytes decoded, or <0 if an error occured.\n */\nint lcmtypes_edge_t_decode(const void *buf, int offset, int maxlen, lcmtypes_edge_t *msg);\n\n/**\n * Release resources allocated by lcmtypes_edge_t_decode()\n * @return 0\n */\nint lcmtypes_edge_t_decode_cleanup(lcmtypes_edge_t *p);\n\n/**\n * Check how many bytes are required to encode a message of type lcmtypes_edge_t\n */\nint lcmtypes_edge_t_encoded_size(const lcmtypes_edge_t *p);\n\n// LCM support functions. Users should not call these\nint64_t __lcmtypes_edge_t_get_hash(void);\nuint64_t __lcmtypes_edge_t_hash_recursive(const __lcm_hash_ptr *p);\nint     __lcmtypes_edge_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_edge_t *p, int elements);\nint     __lcmtypes_edge_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_edge_t *p, int elements);\nint     __lcmtypes_edge_t_decode_array_cleanup(lcmtypes_edge_t *p, int elements);\nint     __lcmtypes_edge_t_encoded_array_size(const lcmtypes_edge_t *p, int elements);\nint     __lcmtypes_edge_t_clone_array(const lcmtypes_edge_t *p, lcmtypes_edge_t *q, int elements);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_environment_t.c",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <string.h>\n#include \"lcmtypes/lcmtypes_environment_t.h\"\n\nstatic int __lcmtypes_environment_t_hash_computed;\nstatic uint64_t __lcmtypes_environment_t_hash;\n\nuint64_t __lcmtypes_environment_t_hash_recursive(const __lcm_hash_ptr *p)\n{\n    const __lcm_hash_ptr *fp;\n    for (fp = p; fp != NULL; fp = fp->parent)\n        if (fp->v == __lcmtypes_environment_t_get_hash)\n            return 0;\n\n    __lcm_hash_ptr cp;\n    cp.parent =  p;\n    cp.v = (void*)__lcmtypes_environment_t_get_hash;\n    (void) cp;\n\n    uint64_t hash = (uint64_t)0x8caabc2a2ba0f9c7LL\n         + __lcmtypes_region_3d_t_hash_recursive(&cp)\n         + __lcmtypes_region_3d_t_hash_recursive(&cp)\n         + __int32_t_hash_recursive(&cp)\n         + __lcmtypes_region_3d_t_hash_recursive(&cp)\n        ;\n\n    return (hash<<1) + ((hash>>63)&1);\n}\n\nint64_t __lcmtypes_environment_t_get_hash(void)\n{\n    if (!__lcmtypes_environment_t_hash_computed) {\n        __lcmtypes_environment_t_hash = (int64_t)__lcmtypes_environment_t_hash_recursive(NULL);\n        __lcmtypes_environment_t_hash_computed = 1;\n    }\n\n    return __lcmtypes_environment_t_hash;\n}\n\nint __lcmtypes_environment_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_environment_t *p, int elements)\n{\n    int pos = 0, element;\n    int thislen;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __lcmtypes_region_3d_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].operating), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_region_3d_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].goal), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __int32_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].num_obstacles), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_region_3d_t_encode_array(buf, offset + pos, maxlen - pos, p[element].obstacles, p[element].num_obstacles);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint lcmtypes_environment_t_encode(void *buf, int offset, int maxlen, const lcmtypes_environment_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_environment_t_get_hash();\n\n    thislen = __int64_t_encode_array(buf, offset + pos, maxlen - pos, &hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    thislen = __lcmtypes_environment_t_encode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint __lcmtypes_environment_t_encoded_array_size(const lcmtypes_environment_t *p, int elements)\n{\n    int size = 0, element;\n    for (element = 0; element < elements; element++) {\n\n        size += __lcmtypes_region_3d_t_encoded_array_size(&(p[element].operating), 1);\n\n        size += __lcmtypes_region_3d_t_encoded_array_size(&(p[element].goal), 1);\n\n        size += __int32_t_encoded_array_size(&(p[element].num_obstacles), 1);\n\n        size += __lcmtypes_region_3d_t_encoded_array_size(p[element].obstacles, p[element].num_obstacles);\n\n    }\n    return size;\n}\n\nint lcmtypes_environment_t_encoded_size(const lcmtypes_environment_t *p)\n{\n    return 8 + __lcmtypes_environment_t_encoded_array_size(p, 1);\n}\n\nint __lcmtypes_environment_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_environment_t *p, int elements)\n{\n    int pos = 0, thislen, element;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __lcmtypes_region_3d_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].operating), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_region_3d_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].goal), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __int32_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].num_obstacles), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        p[element].obstacles = (lcmtypes_region_3d_t*) lcm_malloc(sizeof(lcmtypes_region_3d_t) * p[element].num_obstacles);\n        thislen = __lcmtypes_region_3d_t_decode_array(buf, offset + pos, maxlen - pos, p[element].obstacles, p[element].num_obstacles);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint __lcmtypes_environment_t_decode_array_cleanup(lcmtypes_environment_t *p, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __lcmtypes_region_3d_t_decode_array_cleanup(&(p[element].operating), 1);\n\n        __lcmtypes_region_3d_t_decode_array_cleanup(&(p[element].goal), 1);\n\n        __int32_t_decode_array_cleanup(&(p[element].num_obstacles), 1);\n\n        __lcmtypes_region_3d_t_decode_array_cleanup(p[element].obstacles, p[element].num_obstacles);\n        if (p[element].obstacles) free(p[element].obstacles);\n\n    }\n    return 0;\n}\n\nint lcmtypes_environment_t_decode(const void *buf, int offset, int maxlen, lcmtypes_environment_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_environment_t_get_hash();\n\n    int64_t this_hash;\n    thislen = __int64_t_decode_array(buf, offset + pos, maxlen - pos, &this_hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n    if (this_hash != hash) return -1;\n\n    thislen = __lcmtypes_environment_t_decode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint lcmtypes_environment_t_decode_cleanup(lcmtypes_environment_t *p)\n{\n    return __lcmtypes_environment_t_decode_array_cleanup(p, 1);\n}\n\nint __lcmtypes_environment_t_clone_array(const lcmtypes_environment_t *p, lcmtypes_environment_t *q, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __lcmtypes_region_3d_t_clone_array(&(p[element].operating), &(q[element].operating), 1);\n\n        __lcmtypes_region_3d_t_clone_array(&(p[element].goal), &(q[element].goal), 1);\n\n        __int32_t_clone_array(&(p[element].num_obstacles), &(q[element].num_obstacles), 1);\n\n        q[element].obstacles = (lcmtypes_region_3d_t*) lcm_malloc(sizeof(lcmtypes_region_3d_t) * q[element].num_obstacles);\n        __lcmtypes_region_3d_t_clone_array(p[element].obstacles, q[element].obstacles, p[element].num_obstacles);\n\n    }\n    return 0;\n}\n\nlcmtypes_environment_t *lcmtypes_environment_t_copy(const lcmtypes_environment_t *p)\n{\n    lcmtypes_environment_t *q = (lcmtypes_environment_t*) malloc(sizeof(lcmtypes_environment_t));\n    __lcmtypes_environment_t_clone_array(p, q, 1);\n    return q;\n}\n\nvoid lcmtypes_environment_t_destroy(lcmtypes_environment_t *p)\n{\n    __lcmtypes_environment_t_decode_array_cleanup(p, 1);\n    free(p);\n}\n\nint lcmtypes_environment_t_publish(lcm_t *lc, const char *channel, const lcmtypes_environment_t *p)\n{\n      int max_data_size = lcmtypes_environment_t_encoded_size (p);\n      uint8_t *buf = (uint8_t*) malloc (max_data_size);\n      if (!buf) return -1;\n      int data_size = lcmtypes_environment_t_encode (buf, 0, max_data_size, p);\n      if (data_size < 0) {\n          free (buf);\n          return data_size;\n      }\n      int status = lcm_publish (lc, channel, buf, data_size);\n      free (buf);\n      return status;\n}\n\nstruct _lcmtypes_environment_t_subscription_t {\n    lcmtypes_environment_t_handler_t user_handler;\n    void *userdata;\n    lcm_subscription_t *lc_h;\n};\nstatic\nvoid lcmtypes_environment_t_handler_stub (const lcm_recv_buf_t *rbuf,\n                            const char *channel, void *userdata)\n{\n    int status;\n    lcmtypes_environment_t p;\n    memset(&p, 0, sizeof(lcmtypes_environment_t));\n    status = lcmtypes_environment_t_decode (rbuf->data, 0, rbuf->data_size, &p);\n    if (status < 0) {\n        fprintf (stderr, \"error %d decoding lcmtypes_environment_t!!!\\n\", status);\n        return;\n    }\n\n    lcmtypes_environment_t_subscription_t *h = (lcmtypes_environment_t_subscription_t*) userdata;\n    h->user_handler (rbuf, channel, &p, h->userdata);\n\n    lcmtypes_environment_t_decode_cleanup (&p);\n}\n\nlcmtypes_environment_t_subscription_t* lcmtypes_environment_t_subscribe (lcm_t *lcm,\n                    const char *channel,\n                    lcmtypes_environment_t_handler_t f, void *userdata)\n{\n    lcmtypes_environment_t_subscription_t *n = (lcmtypes_environment_t_subscription_t*)\n                       malloc(sizeof(lcmtypes_environment_t_subscription_t));\n    n->user_handler = f;\n    n->userdata = userdata;\n    n->lc_h = lcm_subscribe (lcm, channel,\n                                 lcmtypes_environment_t_handler_stub, n);\n    if (n->lc_h == NULL) {\n        fprintf (stderr,\"couldn't reg lcmtypes_environment_t LCM handler!\\n\");\n        free (n);\n        return NULL;\n    }\n    return n;\n}\n\nint lcmtypes_environment_t_subscription_set_queue_capacity (lcmtypes_environment_t_subscription_t* subs,\n                              int num_messages)\n{\n    return lcm_subscription_set_queue_capacity (subs->lc_h, num_messages);\n}\n\nint lcmtypes_environment_t_unsubscribe(lcm_t *lcm, lcmtypes_environment_t_subscription_t* hid)\n{\n    int status = lcm_unsubscribe (lcm, hid->lc_h);\n    if (0 != status) {\n        fprintf(stderr,\n           \"couldn't unsubscribe lcmtypes_environment_t_handler %p!\\n\", hid);\n        return -1;\n    }\n    free (hid);\n    return 0;\n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_environment_t.h",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <lcm/lcm_coretypes.h>\n#include <lcm/lcm.h>\n\n#ifndef _lcmtypes_environment_t_h\n#define _lcmtypes_environment_t_h\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#include \"lcmtypes/lcmtypes_region_3d_t.h\"\n#include \"lcmtypes/lcmtypes_region_3d_t.h\"\n#include \"lcmtypes/lcmtypes_region_3d_t.h\"\ntypedef struct _lcmtypes_environment_t lcmtypes_environment_t;\nstruct _lcmtypes_environment_t\n{\n    lcmtypes_region_3d_t operating;\n    lcmtypes_region_3d_t goal;\n    int32_t    num_obstacles;\n    lcmtypes_region_3d_t *obstacles;\n};\n\n/**\n * Create a deep copy of a lcmtypes_environment_t.\n * When no longer needed, destroy it with lcmtypes_environment_t_destroy()\n */\nlcmtypes_environment_t* lcmtypes_environment_t_copy(const lcmtypes_environment_t* to_copy);\n\n/**\n * Destroy an instance of lcmtypes_environment_t created by lcmtypes_environment_t_copy()\n */\nvoid lcmtypes_environment_t_destroy(lcmtypes_environment_t* to_destroy);\n\n/**\n * Identifies a single subscription.  This is an opaque data type.\n */\ntypedef struct _lcmtypes_environment_t_subscription_t lcmtypes_environment_t_subscription_t;\n\n/**\n * Prototype for a callback function invoked when a message of type\n * lcmtypes_environment_t is received.\n */\ntypedef void(*lcmtypes_environment_t_handler_t)(const lcm_recv_buf_t *rbuf,\n             const char *channel, const lcmtypes_environment_t *msg, void *userdata);\n\n/**\n * Publish a message of type lcmtypes_environment_t using LCM.\n *\n * @param lcm The LCM instance to publish with.\n * @param channel The channel to publish on.\n * @param msg The message to publish.\n * @return 0 on success, <0 on error.  Success means LCM has transferred\n * responsibility of the message data to the OS.\n */\nint lcmtypes_environment_t_publish(lcm_t *lcm, const char *channel, const lcmtypes_environment_t *msg);\n\n/**\n * Subscribe to messages of type lcmtypes_environment_t using LCM.\n *\n * @param lcm The LCM instance to subscribe with.\n * @param channel The channel to subscribe to.\n * @param handler The callback function invoked by LCM when a message is received.\n *                This function is invoked by LCM during calls to lcm_handle() and\n *                lcm_handle_timeout().\n * @param userdata An opaque pointer passed to @p handler when it is invoked.\n * @return 0 on success, <0 if an error occured\n */\nlcmtypes_environment_t_subscription_t* lcmtypes_environment_t_subscribe(lcm_t *lcm, const char *channel, lcmtypes_environment_t_handler_t handler, void *userdata);\n\n/**\n * Removes and destroys a subscription created by lcmtypes_environment_t_subscribe()\n */\nint lcmtypes_environment_t_unsubscribe(lcm_t *lcm, lcmtypes_environment_t_subscription_t* hid);\n\n/**\n * Sets the queue capacity for a subscription.\n * Some LCM providers (e.g., the default multicast provider) are implemented\n * using a background receive thread that constantly revceives messages from\n * the network.  As these messages are received, they are buffered on\n * per-subscription queues until dispatched by lcm_handle().  This function\n * how many messages are queued before dropping messages.\n *\n * @param subs the subscription to modify.\n * @param num_messages The maximum number of messages to queue\n *  on the subscription.\n * @return 0 on success, <0 if an error occured\n */\nint lcmtypes_environment_t_subscription_set_queue_capacity(lcmtypes_environment_t_subscription_t* subs,\n                              int num_messages);\n\n/**\n * Encode a message of type lcmtypes_environment_t into binary form.\n *\n * @param buf The output buffer.\n * @param offset Encoding starts at this byte offset into @p buf.\n * @param maxlen Maximum number of bytes to write.  This should generally\n *               be equal to lcmtypes_environment_t_encoded_size().\n * @param msg The message to encode.\n * @return The number of bytes encoded, or <0 if an error occured.\n */\nint lcmtypes_environment_t_encode(void *buf, int offset, int maxlen, const lcmtypes_environment_t *p);\n\n/**\n * Decode a message of type lcmtypes_environment_t from binary form.\n * When decoding messages containing strings or variable-length arrays, this\n * function may allocate memory.  When finished with the decoded message,\n * release allocated resources with lcmtypes_environment_t_decode_cleanup().\n *\n * @param buf The buffer containing the encoded message\n * @param offset The byte offset into @p buf where the encoded message starts.\n * @param maxlen The maximum number of bytes to read while decoding.\n * @param msg Output parameter where the decoded message is stored\n * @return The number of bytes decoded, or <0 if an error occured.\n */\nint lcmtypes_environment_t_decode(const void *buf, int offset, int maxlen, lcmtypes_environment_t *msg);\n\n/**\n * Release resources allocated by lcmtypes_environment_t_decode()\n * @return 0\n */\nint lcmtypes_environment_t_decode_cleanup(lcmtypes_environment_t *p);\n\n/**\n * Check how many bytes are required to encode a message of type lcmtypes_environment_t\n */\nint lcmtypes_environment_t_encoded_size(const lcmtypes_environment_t *p);\n\n// LCM support functions. Users should not call these\nint64_t __lcmtypes_environment_t_get_hash(void);\nuint64_t __lcmtypes_environment_t_hash_recursive(const __lcm_hash_ptr *p);\nint     __lcmtypes_environment_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_environment_t *p, int elements);\nint     __lcmtypes_environment_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_environment_t *p, int elements);\nint     __lcmtypes_environment_t_decode_array_cleanup(lcmtypes_environment_t *p, int elements);\nint     __lcmtypes_environment_t_encoded_array_size(const lcmtypes_environment_t *p, int elements);\nint     __lcmtypes_environment_t_clone_array(const lcmtypes_environment_t *p, lcmtypes_environment_t *q, int elements);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_graph_t.c",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <string.h>\n#include \"lcmtypes/lcmtypes_graph_t.h\"\n\nstatic int __lcmtypes_graph_t_hash_computed;\nstatic uint64_t __lcmtypes_graph_t_hash;\n\nuint64_t __lcmtypes_graph_t_hash_recursive(const __lcm_hash_ptr *p)\n{\n    const __lcm_hash_ptr *fp;\n    for (fp = p; fp != NULL; fp = fp->parent)\n        if (fp->v == __lcmtypes_graph_t_get_hash)\n            return 0;\n\n    __lcm_hash_ptr cp;\n    cp.parent =  p;\n    cp.v = (void*)__lcmtypes_graph_t_get_hash;\n    (void) cp;\n\n    uint64_t hash = (uint64_t)0x49189ad7b639b453LL\n         + __int32_t_hash_recursive(&cp)\n         + __lcmtypes_vertex_t_hash_recursive(&cp)\n         + __int32_t_hash_recursive(&cp)\n         + __lcmtypes_edge_t_hash_recursive(&cp)\n        ;\n\n    return (hash<<1) + ((hash>>63)&1);\n}\n\nint64_t __lcmtypes_graph_t_get_hash(void)\n{\n    if (!__lcmtypes_graph_t_hash_computed) {\n        __lcmtypes_graph_t_hash = (int64_t)__lcmtypes_graph_t_hash_recursive(NULL);\n        __lcmtypes_graph_t_hash_computed = 1;\n    }\n\n    return __lcmtypes_graph_t_hash;\n}\n\nint __lcmtypes_graph_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_graph_t *p, int elements)\n{\n    int pos = 0, element;\n    int thislen;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __int32_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].num_vertices), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_vertex_t_encode_array(buf, offset + pos, maxlen - pos, p[element].vertices, p[element].num_vertices);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __int32_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].num_edges), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_edge_t_encode_array(buf, offset + pos, maxlen - pos, p[element].edges, p[element].num_edges);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint lcmtypes_graph_t_encode(void *buf, int offset, int maxlen, const lcmtypes_graph_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_graph_t_get_hash();\n\n    thislen = __int64_t_encode_array(buf, offset + pos, maxlen - pos, &hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    thislen = __lcmtypes_graph_t_encode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint __lcmtypes_graph_t_encoded_array_size(const lcmtypes_graph_t *p, int elements)\n{\n    int size = 0, element;\n    for (element = 0; element < elements; element++) {\n\n        size += __int32_t_encoded_array_size(&(p[element].num_vertices), 1);\n\n        size += __lcmtypes_vertex_t_encoded_array_size(p[element].vertices, p[element].num_vertices);\n\n        size += __int32_t_encoded_array_size(&(p[element].num_edges), 1);\n\n        size += __lcmtypes_edge_t_encoded_array_size(p[element].edges, p[element].num_edges);\n\n    }\n    return size;\n}\n\nint lcmtypes_graph_t_encoded_size(const lcmtypes_graph_t *p)\n{\n    return 8 + __lcmtypes_graph_t_encoded_array_size(p, 1);\n}\n\nint __lcmtypes_graph_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_graph_t *p, int elements)\n{\n    int pos = 0, thislen, element;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __int32_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].num_vertices), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        p[element].vertices = (lcmtypes_vertex_t*) lcm_malloc(sizeof(lcmtypes_vertex_t) * p[element].num_vertices);\n        thislen = __lcmtypes_vertex_t_decode_array(buf, offset + pos, maxlen - pos, p[element].vertices, p[element].num_vertices);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __int32_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].num_edges), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        p[element].edges = (lcmtypes_edge_t*) lcm_malloc(sizeof(lcmtypes_edge_t) * p[element].num_edges);\n        thislen = __lcmtypes_edge_t_decode_array(buf, offset + pos, maxlen - pos, p[element].edges, p[element].num_edges);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint __lcmtypes_graph_t_decode_array_cleanup(lcmtypes_graph_t *p, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __int32_t_decode_array_cleanup(&(p[element].num_vertices), 1);\n\n        __lcmtypes_vertex_t_decode_array_cleanup(p[element].vertices, p[element].num_vertices);\n        if (p[element].vertices) free(p[element].vertices);\n\n        __int32_t_decode_array_cleanup(&(p[element].num_edges), 1);\n\n        __lcmtypes_edge_t_decode_array_cleanup(p[element].edges, p[element].num_edges);\n        if (p[element].edges) free(p[element].edges);\n\n    }\n    return 0;\n}\n\nint lcmtypes_graph_t_decode(const void *buf, int offset, int maxlen, lcmtypes_graph_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_graph_t_get_hash();\n\n    int64_t this_hash;\n    thislen = __int64_t_decode_array(buf, offset + pos, maxlen - pos, &this_hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n    if (this_hash != hash) return -1;\n\n    thislen = __lcmtypes_graph_t_decode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint lcmtypes_graph_t_decode_cleanup(lcmtypes_graph_t *p)\n{\n    return __lcmtypes_graph_t_decode_array_cleanup(p, 1);\n}\n\nint __lcmtypes_graph_t_clone_array(const lcmtypes_graph_t *p, lcmtypes_graph_t *q, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __int32_t_clone_array(&(p[element].num_vertices), &(q[element].num_vertices), 1);\n\n        q[element].vertices = (lcmtypes_vertex_t*) lcm_malloc(sizeof(lcmtypes_vertex_t) * q[element].num_vertices);\n        __lcmtypes_vertex_t_clone_array(p[element].vertices, q[element].vertices, p[element].num_vertices);\n\n        __int32_t_clone_array(&(p[element].num_edges), &(q[element].num_edges), 1);\n\n        q[element].edges = (lcmtypes_edge_t*) lcm_malloc(sizeof(lcmtypes_edge_t) * q[element].num_edges);\n        __lcmtypes_edge_t_clone_array(p[element].edges, q[element].edges, p[element].num_edges);\n\n    }\n    return 0;\n}\n\nlcmtypes_graph_t *lcmtypes_graph_t_copy(const lcmtypes_graph_t *p)\n{\n    lcmtypes_graph_t *q = (lcmtypes_graph_t*) malloc(sizeof(lcmtypes_graph_t));\n    __lcmtypes_graph_t_clone_array(p, q, 1);\n    return q;\n}\n\nvoid lcmtypes_graph_t_destroy(lcmtypes_graph_t *p)\n{\n    __lcmtypes_graph_t_decode_array_cleanup(p, 1);\n    free(p);\n}\n\nint lcmtypes_graph_t_publish(lcm_t *lc, const char *channel, const lcmtypes_graph_t *p)\n{\n      int max_data_size = lcmtypes_graph_t_encoded_size (p);\n      uint8_t *buf = (uint8_t*) malloc (max_data_size);\n      if (!buf) return -1;\n      int data_size = lcmtypes_graph_t_encode (buf, 0, max_data_size, p);\n      if (data_size < 0) {\n          free (buf);\n          return data_size;\n      }\n      int status = lcm_publish (lc, channel, buf, data_size);\n      free (buf);\n      return status;\n}\n\nstruct _lcmtypes_graph_t_subscription_t {\n    lcmtypes_graph_t_handler_t user_handler;\n    void *userdata;\n    lcm_subscription_t *lc_h;\n};\nstatic\nvoid lcmtypes_graph_t_handler_stub (const lcm_recv_buf_t *rbuf,\n                            const char *channel, void *userdata)\n{\n    int status;\n    lcmtypes_graph_t p;\n    memset(&p, 0, sizeof(lcmtypes_graph_t));\n    status = lcmtypes_graph_t_decode (rbuf->data, 0, rbuf->data_size, &p);\n    if (status < 0) {\n        fprintf (stderr, \"error %d decoding lcmtypes_graph_t!!!\\n\", status);\n        return;\n    }\n\n    lcmtypes_graph_t_subscription_t *h = (lcmtypes_graph_t_subscription_t*) userdata;\n    h->user_handler (rbuf, channel, &p, h->userdata);\n\n    lcmtypes_graph_t_decode_cleanup (&p);\n}\n\nlcmtypes_graph_t_subscription_t* lcmtypes_graph_t_subscribe (lcm_t *lcm,\n                    const char *channel,\n                    lcmtypes_graph_t_handler_t f, void *userdata)\n{\n    lcmtypes_graph_t_subscription_t *n = (lcmtypes_graph_t_subscription_t*)\n                       malloc(sizeof(lcmtypes_graph_t_subscription_t));\n    n->user_handler = f;\n    n->userdata = userdata;\n    n->lc_h = lcm_subscribe (lcm, channel,\n                                 lcmtypes_graph_t_handler_stub, n);\n    if (n->lc_h == NULL) {\n        fprintf (stderr,\"couldn't reg lcmtypes_graph_t LCM handler!\\n\");\n        free (n);\n        return NULL;\n    }\n    return n;\n}\n\nint lcmtypes_graph_t_subscription_set_queue_capacity (lcmtypes_graph_t_subscription_t* subs,\n                              int num_messages)\n{\n    return lcm_subscription_set_queue_capacity (subs->lc_h, num_messages);\n}\n\nint lcmtypes_graph_t_unsubscribe(lcm_t *lcm, lcmtypes_graph_t_subscription_t* hid)\n{\n    int status = lcm_unsubscribe (lcm, hid->lc_h);\n    if (0 != status) {\n        fprintf(stderr,\n           \"couldn't unsubscribe lcmtypes_graph_t_handler %p!\\n\", hid);\n        return -1;\n    }\n    free (hid);\n    return 0;\n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_graph_t.h",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <lcm/lcm_coretypes.h>\n#include <lcm/lcm.h>\n\n#ifndef _lcmtypes_graph_t_h\n#define _lcmtypes_graph_t_h\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#include \"lcmtypes/lcmtypes_vertex_t.h\"\n#include \"lcmtypes/lcmtypes_edge_t.h\"\ntypedef struct _lcmtypes_graph_t lcmtypes_graph_t;\nstruct _lcmtypes_graph_t\n{\n    int32_t    num_vertices;\n    lcmtypes_vertex_t *vertices;\n    int32_t    num_edges;\n    lcmtypes_edge_t *edges;\n};\n\n/**\n * Create a deep copy of a lcmtypes_graph_t.\n * When no longer needed, destroy it with lcmtypes_graph_t_destroy()\n */\nlcmtypes_graph_t* lcmtypes_graph_t_copy(const lcmtypes_graph_t* to_copy);\n\n/**\n * Destroy an instance of lcmtypes_graph_t created by lcmtypes_graph_t_copy()\n */\nvoid lcmtypes_graph_t_destroy(lcmtypes_graph_t* to_destroy);\n\n/**\n * Identifies a single subscription.  This is an opaque data type.\n */\ntypedef struct _lcmtypes_graph_t_subscription_t lcmtypes_graph_t_subscription_t;\n\n/**\n * Prototype for a callback function invoked when a message of type\n * lcmtypes_graph_t is received.\n */\ntypedef void(*lcmtypes_graph_t_handler_t)(const lcm_recv_buf_t *rbuf,\n             const char *channel, const lcmtypes_graph_t *msg, void *userdata);\n\n/**\n * Publish a message of type lcmtypes_graph_t using LCM.\n *\n * @param lcm The LCM instance to publish with.\n * @param channel The channel to publish on.\n * @param msg The message to publish.\n * @return 0 on success, <0 on error.  Success means LCM has transferred\n * responsibility of the message data to the OS.\n */\nint lcmtypes_graph_t_publish(lcm_t *lcm, const char *channel, const lcmtypes_graph_t *msg);\n\n/**\n * Subscribe to messages of type lcmtypes_graph_t using LCM.\n *\n * @param lcm The LCM instance to subscribe with.\n * @param channel The channel to subscribe to.\n * @param handler The callback function invoked by LCM when a message is received.\n *                This function is invoked by LCM during calls to lcm_handle() and\n *                lcm_handle_timeout().\n * @param userdata An opaque pointer passed to @p handler when it is invoked.\n * @return 0 on success, <0 if an error occured\n */\nlcmtypes_graph_t_subscription_t* lcmtypes_graph_t_subscribe(lcm_t *lcm, const char *channel, lcmtypes_graph_t_handler_t handler, void *userdata);\n\n/**\n * Removes and destroys a subscription created by lcmtypes_graph_t_subscribe()\n */\nint lcmtypes_graph_t_unsubscribe(lcm_t *lcm, lcmtypes_graph_t_subscription_t* hid);\n\n/**\n * Sets the queue capacity for a subscription.\n * Some LCM providers (e.g., the default multicast provider) are implemented\n * using a background receive thread that constantly revceives messages from\n * the network.  As these messages are received, they are buffered on\n * per-subscription queues until dispatched by lcm_handle().  This function\n * how many messages are queued before dropping messages.\n *\n * @param subs the subscription to modify.\n * @param num_messages The maximum number of messages to queue\n *  on the subscription.\n * @return 0 on success, <0 if an error occured\n */\nint lcmtypes_graph_t_subscription_set_queue_capacity(lcmtypes_graph_t_subscription_t* subs,\n                              int num_messages);\n\n/**\n * Encode a message of type lcmtypes_graph_t into binary form.\n *\n * @param buf The output buffer.\n * @param offset Encoding starts at this byte offset into @p buf.\n * @param maxlen Maximum number of bytes to write.  This should generally\n *               be equal to lcmtypes_graph_t_encoded_size().\n * @param msg The message to encode.\n * @return The number of bytes encoded, or <0 if an error occured.\n */\nint lcmtypes_graph_t_encode(void *buf, int offset, int maxlen, const lcmtypes_graph_t *p);\n\n/**\n * Decode a message of type lcmtypes_graph_t from binary form.\n * When decoding messages containing strings or variable-length arrays, this\n * function may allocate memory.  When finished with the decoded message,\n * release allocated resources with lcmtypes_graph_t_decode_cleanup().\n *\n * @param buf The buffer containing the encoded message\n * @param offset The byte offset into @p buf where the encoded message starts.\n * @param maxlen The maximum number of bytes to read while decoding.\n * @param msg Output parameter where the decoded message is stored\n * @return The number of bytes decoded, or <0 if an error occured.\n */\nint lcmtypes_graph_t_decode(const void *buf, int offset, int maxlen, lcmtypes_graph_t *msg);\n\n/**\n * Release resources allocated by lcmtypes_graph_t_decode()\n * @return 0\n */\nint lcmtypes_graph_t_decode_cleanup(lcmtypes_graph_t *p);\n\n/**\n * Check how many bytes are required to encode a message of type lcmtypes_graph_t\n */\nint lcmtypes_graph_t_encoded_size(const lcmtypes_graph_t *p);\n\n// LCM support functions. Users should not call these\nint64_t __lcmtypes_graph_t_get_hash(void);\nuint64_t __lcmtypes_graph_t_hash_recursive(const __lcm_hash_ptr *p);\nint     __lcmtypes_graph_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_graph_t *p, int elements);\nint     __lcmtypes_graph_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_graph_t *p, int elements);\nint     __lcmtypes_graph_t_decode_array_cleanup(lcmtypes_graph_t *p, int elements);\nint     __lcmtypes_graph_t_encoded_array_size(const lcmtypes_graph_t *p, int elements);\nint     __lcmtypes_graph_t_clone_array(const lcmtypes_graph_t *p, lcmtypes_graph_t *q, int elements);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_region_3d_t.c",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <string.h>\n#include \"lcmtypes/lcmtypes_region_3d_t.h\"\n\nstatic int __lcmtypes_region_3d_t_hash_computed;\nstatic uint64_t __lcmtypes_region_3d_t_hash;\n\nuint64_t __lcmtypes_region_3d_t_hash_recursive(const __lcm_hash_ptr *p)\n{\n    const __lcm_hash_ptr *fp;\n    for (fp = p; fp != NULL; fp = fp->parent)\n        if (fp->v == __lcmtypes_region_3d_t_get_hash)\n            return 0;\n\n    __lcm_hash_ptr cp;\n    cp.parent =  p;\n    cp.v = (void*)__lcmtypes_region_3d_t_get_hash;\n    (void) cp;\n\n    uint64_t hash = (uint64_t)0x94830fc8d7404191LL\n         + __double_hash_recursive(&cp)\n         + __double_hash_recursive(&cp)\n        ;\n\n    return (hash<<1) + ((hash>>63)&1);\n}\n\nint64_t __lcmtypes_region_3d_t_get_hash(void)\n{\n    if (!__lcmtypes_region_3d_t_hash_computed) {\n        __lcmtypes_region_3d_t_hash = (int64_t)__lcmtypes_region_3d_t_hash_recursive(NULL);\n        __lcmtypes_region_3d_t_hash_computed = 1;\n    }\n\n    return __lcmtypes_region_3d_t_hash;\n}\n\nint __lcmtypes_region_3d_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_region_3d_t *p, int elements)\n{\n    int pos = 0, element;\n    int thislen;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __double_encode_array(buf, offset + pos, maxlen - pos, p[element].center, 3);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __double_encode_array(buf, offset + pos, maxlen - pos, p[element].size, 3);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint lcmtypes_region_3d_t_encode(void *buf, int offset, int maxlen, const lcmtypes_region_3d_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_region_3d_t_get_hash();\n\n    thislen = __int64_t_encode_array(buf, offset + pos, maxlen - pos, &hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    thislen = __lcmtypes_region_3d_t_encode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint __lcmtypes_region_3d_t_encoded_array_size(const lcmtypes_region_3d_t *p, int elements)\n{\n    int size = 0, element;\n    for (element = 0; element < elements; element++) {\n\n        size += __double_encoded_array_size(p[element].center, 3);\n\n        size += __double_encoded_array_size(p[element].size, 3);\n\n    }\n    return size;\n}\n\nint lcmtypes_region_3d_t_encoded_size(const lcmtypes_region_3d_t *p)\n{\n    return 8 + __lcmtypes_region_3d_t_encoded_array_size(p, 1);\n}\n\nint __lcmtypes_region_3d_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_region_3d_t *p, int elements)\n{\n    int pos = 0, thislen, element;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __double_decode_array(buf, offset + pos, maxlen - pos, p[element].center, 3);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __double_decode_array(buf, offset + pos, maxlen - pos, p[element].size, 3);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint __lcmtypes_region_3d_t_decode_array_cleanup(lcmtypes_region_3d_t *p, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __double_decode_array_cleanup(p[element].center, 3);\n\n        __double_decode_array_cleanup(p[element].size, 3);\n\n    }\n    return 0;\n}\n\nint lcmtypes_region_3d_t_decode(const void *buf, int offset, int maxlen, lcmtypes_region_3d_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_region_3d_t_get_hash();\n\n    int64_t this_hash;\n    thislen = __int64_t_decode_array(buf, offset + pos, maxlen - pos, &this_hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n    if (this_hash != hash) return -1;\n\n    thislen = __lcmtypes_region_3d_t_decode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint lcmtypes_region_3d_t_decode_cleanup(lcmtypes_region_3d_t *p)\n{\n    return __lcmtypes_region_3d_t_decode_array_cleanup(p, 1);\n}\n\nint __lcmtypes_region_3d_t_clone_array(const lcmtypes_region_3d_t *p, lcmtypes_region_3d_t *q, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __double_clone_array(p[element].center, q[element].center, 3);\n\n        __double_clone_array(p[element].size, q[element].size, 3);\n\n    }\n    return 0;\n}\n\nlcmtypes_region_3d_t *lcmtypes_region_3d_t_copy(const lcmtypes_region_3d_t *p)\n{\n    lcmtypes_region_3d_t *q = (lcmtypes_region_3d_t*) malloc(sizeof(lcmtypes_region_3d_t));\n    __lcmtypes_region_3d_t_clone_array(p, q, 1);\n    return q;\n}\n\nvoid lcmtypes_region_3d_t_destroy(lcmtypes_region_3d_t *p)\n{\n    __lcmtypes_region_3d_t_decode_array_cleanup(p, 1);\n    free(p);\n}\n\nint lcmtypes_region_3d_t_publish(lcm_t *lc, const char *channel, const lcmtypes_region_3d_t *p)\n{\n      int max_data_size = lcmtypes_region_3d_t_encoded_size (p);\n      uint8_t *buf = (uint8_t*) malloc (max_data_size);\n      if (!buf) return -1;\n      int data_size = lcmtypes_region_3d_t_encode (buf, 0, max_data_size, p);\n      if (data_size < 0) {\n          free (buf);\n          return data_size;\n      }\n      int status = lcm_publish (lc, channel, buf, data_size);\n      free (buf);\n      return status;\n}\n\nstruct _lcmtypes_region_3d_t_subscription_t {\n    lcmtypes_region_3d_t_handler_t user_handler;\n    void *userdata;\n    lcm_subscription_t *lc_h;\n};\nstatic\nvoid lcmtypes_region_3d_t_handler_stub (const lcm_recv_buf_t *rbuf,\n                            const char *channel, void *userdata)\n{\n    int status;\n    lcmtypes_region_3d_t p;\n    memset(&p, 0, sizeof(lcmtypes_region_3d_t));\n    status = lcmtypes_region_3d_t_decode (rbuf->data, 0, rbuf->data_size, &p);\n    if (status < 0) {\n        fprintf (stderr, \"error %d decoding lcmtypes_region_3d_t!!!\\n\", status);\n        return;\n    }\n\n    lcmtypes_region_3d_t_subscription_t *h = (lcmtypes_region_3d_t_subscription_t*) userdata;\n    h->user_handler (rbuf, channel, &p, h->userdata);\n\n    lcmtypes_region_3d_t_decode_cleanup (&p);\n}\n\nlcmtypes_region_3d_t_subscription_t* lcmtypes_region_3d_t_subscribe (lcm_t *lcm,\n                    const char *channel,\n                    lcmtypes_region_3d_t_handler_t f, void *userdata)\n{\n    lcmtypes_region_3d_t_subscription_t *n = (lcmtypes_region_3d_t_subscription_t*)\n                       malloc(sizeof(lcmtypes_region_3d_t_subscription_t));\n    n->user_handler = f;\n    n->userdata = userdata;\n    n->lc_h = lcm_subscribe (lcm, channel,\n                                 lcmtypes_region_3d_t_handler_stub, n);\n    if (n->lc_h == NULL) {\n        fprintf (stderr,\"couldn't reg lcmtypes_region_3d_t LCM handler!\\n\");\n        free (n);\n        return NULL;\n    }\n    return n;\n}\n\nint lcmtypes_region_3d_t_subscription_set_queue_capacity (lcmtypes_region_3d_t_subscription_t* subs,\n                              int num_messages)\n{\n    return lcm_subscription_set_queue_capacity (subs->lc_h, num_messages);\n}\n\nint lcmtypes_region_3d_t_unsubscribe(lcm_t *lcm, lcmtypes_region_3d_t_subscription_t* hid)\n{\n    int status = lcm_unsubscribe (lcm, hid->lc_h);\n    if (0 != status) {\n        fprintf(stderr,\n           \"couldn't unsubscribe lcmtypes_region_3d_t_handler %p!\\n\", hid);\n        return -1;\n    }\n    free (hid);\n    return 0;\n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_region_3d_t.h",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <lcm/lcm_coretypes.h>\n#include <lcm/lcm.h>\n\n#ifndef _lcmtypes_region_3d_t_h\n#define _lcmtypes_region_3d_t_h\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\ntypedef struct _lcmtypes_region_3d_t lcmtypes_region_3d_t;\nstruct _lcmtypes_region_3d_t\n{\n    double     center[3];\n    double     size[3];\n};\n\n/**\n * Create a deep copy of a lcmtypes_region_3d_t.\n * When no longer needed, destroy it with lcmtypes_region_3d_t_destroy()\n */\nlcmtypes_region_3d_t* lcmtypes_region_3d_t_copy(const lcmtypes_region_3d_t* to_copy);\n\n/**\n * Destroy an instance of lcmtypes_region_3d_t created by lcmtypes_region_3d_t_copy()\n */\nvoid lcmtypes_region_3d_t_destroy(lcmtypes_region_3d_t* to_destroy);\n\n/**\n * Identifies a single subscription.  This is an opaque data type.\n */\ntypedef struct _lcmtypes_region_3d_t_subscription_t lcmtypes_region_3d_t_subscription_t;\n\n/**\n * Prototype for a callback function invoked when a message of type\n * lcmtypes_region_3d_t is received.\n */\ntypedef void(*lcmtypes_region_3d_t_handler_t)(const lcm_recv_buf_t *rbuf,\n             const char *channel, const lcmtypes_region_3d_t *msg, void *userdata);\n\n/**\n * Publish a message of type lcmtypes_region_3d_t using LCM.\n *\n * @param lcm The LCM instance to publish with.\n * @param channel The channel to publish on.\n * @param msg The message to publish.\n * @return 0 on success, <0 on error.  Success means LCM has transferred\n * responsibility of the message data to the OS.\n */\nint lcmtypes_region_3d_t_publish(lcm_t *lcm, const char *channel, const lcmtypes_region_3d_t *msg);\n\n/**\n * Subscribe to messages of type lcmtypes_region_3d_t using LCM.\n *\n * @param lcm The LCM instance to subscribe with.\n * @param channel The channel to subscribe to.\n * @param handler The callback function invoked by LCM when a message is received.\n *                This function is invoked by LCM during calls to lcm_handle() and\n *                lcm_handle_timeout().\n * @param userdata An opaque pointer passed to @p handler when it is invoked.\n * @return 0 on success, <0 if an error occured\n */\nlcmtypes_region_3d_t_subscription_t* lcmtypes_region_3d_t_subscribe(lcm_t *lcm, const char *channel, lcmtypes_region_3d_t_handler_t handler, void *userdata);\n\n/**\n * Removes and destroys a subscription created by lcmtypes_region_3d_t_subscribe()\n */\nint lcmtypes_region_3d_t_unsubscribe(lcm_t *lcm, lcmtypes_region_3d_t_subscription_t* hid);\n\n/**\n * Sets the queue capacity for a subscription.\n * Some LCM providers (e.g., the default multicast provider) are implemented\n * using a background receive thread that constantly revceives messages from\n * the network.  As these messages are received, they are buffered on\n * per-subscription queues until dispatched by lcm_handle().  This function\n * how many messages are queued before dropping messages.\n *\n * @param subs the subscription to modify.\n * @param num_messages The maximum number of messages to queue\n *  on the subscription.\n * @return 0 on success, <0 if an error occured\n */\nint lcmtypes_region_3d_t_subscription_set_queue_capacity(lcmtypes_region_3d_t_subscription_t* subs,\n                              int num_messages);\n\n/**\n * Encode a message of type lcmtypes_region_3d_t into binary form.\n *\n * @param buf The output buffer.\n * @param offset Encoding starts at this byte offset into @p buf.\n * @param maxlen Maximum number of bytes to write.  This should generally\n *               be equal to lcmtypes_region_3d_t_encoded_size().\n * @param msg The message to encode.\n * @return The number of bytes encoded, or <0 if an error occured.\n */\nint lcmtypes_region_3d_t_encode(void *buf, int offset, int maxlen, const lcmtypes_region_3d_t *p);\n\n/**\n * Decode a message of type lcmtypes_region_3d_t from binary form.\n * When decoding messages containing strings or variable-length arrays, this\n * function may allocate memory.  When finished with the decoded message,\n * release allocated resources with lcmtypes_region_3d_t_decode_cleanup().\n *\n * @param buf The buffer containing the encoded message\n * @param offset The byte offset into @p buf where the encoded message starts.\n * @param maxlen The maximum number of bytes to read while decoding.\n * @param msg Output parameter where the decoded message is stored\n * @return The number of bytes decoded, or <0 if an error occured.\n */\nint lcmtypes_region_3d_t_decode(const void *buf, int offset, int maxlen, lcmtypes_region_3d_t *msg);\n\n/**\n * Release resources allocated by lcmtypes_region_3d_t_decode()\n * @return 0\n */\nint lcmtypes_region_3d_t_decode_cleanup(lcmtypes_region_3d_t *p);\n\n/**\n * Check how many bytes are required to encode a message of type lcmtypes_region_3d_t\n */\nint lcmtypes_region_3d_t_encoded_size(const lcmtypes_region_3d_t *p);\n\n// LCM support functions. Users should not call these\nint64_t __lcmtypes_region_3d_t_get_hash(void);\nuint64_t __lcmtypes_region_3d_t_hash_recursive(const __lcm_hash_ptr *p);\nint     __lcmtypes_region_3d_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_region_3d_t *p, int elements);\nint     __lcmtypes_region_3d_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_region_3d_t *p, int elements);\nint     __lcmtypes_region_3d_t_decode_array_cleanup(lcmtypes_region_3d_t *p, int elements);\nint     __lcmtypes_region_3d_t_encoded_array_size(const lcmtypes_region_3d_t *p, int elements);\nint     __lcmtypes_region_3d_t_clone_array(const lcmtypes_region_3d_t *p, lcmtypes_region_3d_t *q, int elements);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_state_t.c",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <string.h>\n#include \"lcmtypes/lcmtypes_state_t.h\"\n\nstatic int __lcmtypes_state_t_hash_computed;\nstatic uint64_t __lcmtypes_state_t_hash;\n\nuint64_t __lcmtypes_state_t_hash_recursive(const __lcm_hash_ptr *p)\n{\n    const __lcm_hash_ptr *fp;\n    for (fp = p; fp != NULL; fp = fp->parent)\n        if (fp->v == __lcmtypes_state_t_get_hash)\n            return 0;\n\n    __lcm_hash_ptr cp;\n    cp.parent =  p;\n    cp.v = (void*)__lcmtypes_state_t_get_hash;\n    (void) cp;\n\n    uint64_t hash = (uint64_t)0x573f2fdd2f76508fLL\n         + __double_hash_recursive(&cp)\n         + __double_hash_recursive(&cp)\n         + __double_hash_recursive(&cp)\n        ;\n\n    return (hash<<1) + ((hash>>63)&1);\n}\n\nint64_t __lcmtypes_state_t_get_hash(void)\n{\n    if (!__lcmtypes_state_t_hash_computed) {\n        __lcmtypes_state_t_hash = (int64_t)__lcmtypes_state_t_hash_recursive(NULL);\n        __lcmtypes_state_t_hash_computed = 1;\n    }\n\n    return __lcmtypes_state_t_hash;\n}\n\nint __lcmtypes_state_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_state_t *p, int elements)\n{\n    int pos = 0, element;\n    int thislen;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __double_encode_array(buf, offset + pos, maxlen - pos, &(p[element].x), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __double_encode_array(buf, offset + pos, maxlen - pos, &(p[element].y), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __double_encode_array(buf, offset + pos, maxlen - pos, &(p[element].z), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint lcmtypes_state_t_encode(void *buf, int offset, int maxlen, const lcmtypes_state_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_state_t_get_hash();\n\n    thislen = __int64_t_encode_array(buf, offset + pos, maxlen - pos, &hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    thislen = __lcmtypes_state_t_encode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint __lcmtypes_state_t_encoded_array_size(const lcmtypes_state_t *p, int elements)\n{\n    int size = 0, element;\n    for (element = 0; element < elements; element++) {\n\n        size += __double_encoded_array_size(&(p[element].x), 1);\n\n        size += __double_encoded_array_size(&(p[element].y), 1);\n\n        size += __double_encoded_array_size(&(p[element].z), 1);\n\n    }\n    return size;\n}\n\nint lcmtypes_state_t_encoded_size(const lcmtypes_state_t *p)\n{\n    return 8 + __lcmtypes_state_t_encoded_array_size(p, 1);\n}\n\nint __lcmtypes_state_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_state_t *p, int elements)\n{\n    int pos = 0, thislen, element;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __double_decode_array(buf, offset + pos, maxlen - pos, &(p[element].x), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __double_decode_array(buf, offset + pos, maxlen - pos, &(p[element].y), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __double_decode_array(buf, offset + pos, maxlen - pos, &(p[element].z), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint __lcmtypes_state_t_decode_array_cleanup(lcmtypes_state_t *p, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __double_decode_array_cleanup(&(p[element].x), 1);\n\n        __double_decode_array_cleanup(&(p[element].y), 1);\n\n        __double_decode_array_cleanup(&(p[element].z), 1);\n\n    }\n    return 0;\n}\n\nint lcmtypes_state_t_decode(const void *buf, int offset, int maxlen, lcmtypes_state_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_state_t_get_hash();\n\n    int64_t this_hash;\n    thislen = __int64_t_decode_array(buf, offset + pos, maxlen - pos, &this_hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n    if (this_hash != hash) return -1;\n\n    thislen = __lcmtypes_state_t_decode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint lcmtypes_state_t_decode_cleanup(lcmtypes_state_t *p)\n{\n    return __lcmtypes_state_t_decode_array_cleanup(p, 1);\n}\n\nint __lcmtypes_state_t_clone_array(const lcmtypes_state_t *p, lcmtypes_state_t *q, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __double_clone_array(&(p[element].x), &(q[element].x), 1);\n\n        __double_clone_array(&(p[element].y), &(q[element].y), 1);\n\n        __double_clone_array(&(p[element].z), &(q[element].z), 1);\n\n    }\n    return 0;\n}\n\nlcmtypes_state_t *lcmtypes_state_t_copy(const lcmtypes_state_t *p)\n{\n    lcmtypes_state_t *q = (lcmtypes_state_t*) malloc(sizeof(lcmtypes_state_t));\n    __lcmtypes_state_t_clone_array(p, q, 1);\n    return q;\n}\n\nvoid lcmtypes_state_t_destroy(lcmtypes_state_t *p)\n{\n    __lcmtypes_state_t_decode_array_cleanup(p, 1);\n    free(p);\n}\n\nint lcmtypes_state_t_publish(lcm_t *lc, const char *channel, const lcmtypes_state_t *p)\n{\n      int max_data_size = lcmtypes_state_t_encoded_size (p);\n      uint8_t *buf = (uint8_t*) malloc (max_data_size);\n      if (!buf) return -1;\n      int data_size = lcmtypes_state_t_encode (buf, 0, max_data_size, p);\n      if (data_size < 0) {\n          free (buf);\n          return data_size;\n      }\n      int status = lcm_publish (lc, channel, buf, data_size);\n      free (buf);\n      return status;\n}\n\nstruct _lcmtypes_state_t_subscription_t {\n    lcmtypes_state_t_handler_t user_handler;\n    void *userdata;\n    lcm_subscription_t *lc_h;\n};\nstatic\nvoid lcmtypes_state_t_handler_stub (const lcm_recv_buf_t *rbuf,\n                            const char *channel, void *userdata)\n{\n    int status;\n    lcmtypes_state_t p;\n    memset(&p, 0, sizeof(lcmtypes_state_t));\n    status = lcmtypes_state_t_decode (rbuf->data, 0, rbuf->data_size, &p);\n    if (status < 0) {\n        fprintf (stderr, \"error %d decoding lcmtypes_state_t!!!\\n\", status);\n        return;\n    }\n\n    lcmtypes_state_t_subscription_t *h = (lcmtypes_state_t_subscription_t*) userdata;\n    h->user_handler (rbuf, channel, &p, h->userdata);\n\n    lcmtypes_state_t_decode_cleanup (&p);\n}\n\nlcmtypes_state_t_subscription_t* lcmtypes_state_t_subscribe (lcm_t *lcm,\n                    const char *channel,\n                    lcmtypes_state_t_handler_t f, void *userdata)\n{\n    lcmtypes_state_t_subscription_t *n = (lcmtypes_state_t_subscription_t*)\n                       malloc(sizeof(lcmtypes_state_t_subscription_t));\n    n->user_handler = f;\n    n->userdata = userdata;\n    n->lc_h = lcm_subscribe (lcm, channel,\n                                 lcmtypes_state_t_handler_stub, n);\n    if (n->lc_h == NULL) {\n        fprintf (stderr,\"couldn't reg lcmtypes_state_t LCM handler!\\n\");\n        free (n);\n        return NULL;\n    }\n    return n;\n}\n\nint lcmtypes_state_t_subscription_set_queue_capacity (lcmtypes_state_t_subscription_t* subs,\n                              int num_messages)\n{\n    return lcm_subscription_set_queue_capacity (subs->lc_h, num_messages);\n}\n\nint lcmtypes_state_t_unsubscribe(lcm_t *lcm, lcmtypes_state_t_subscription_t* hid)\n{\n    int status = lcm_unsubscribe (lcm, hid->lc_h);\n    if (0 != status) {\n        fprintf(stderr,\n           \"couldn't unsubscribe lcmtypes_state_t_handler %p!\\n\", hid);\n        return -1;\n    }\n    free (hid);\n    return 0;\n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_state_t.h",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <lcm/lcm_coretypes.h>\n#include <lcm/lcm.h>\n\n#ifndef _lcmtypes_state_t_h\n#define _lcmtypes_state_t_h\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\ntypedef struct _lcmtypes_state_t lcmtypes_state_t;\nstruct _lcmtypes_state_t\n{\n    double     x;\n    double     y;\n    double     z;\n};\n\n/**\n * Create a deep copy of a lcmtypes_state_t.\n * When no longer needed, destroy it with lcmtypes_state_t_destroy()\n */\nlcmtypes_state_t* lcmtypes_state_t_copy(const lcmtypes_state_t* to_copy);\n\n/**\n * Destroy an instance of lcmtypes_state_t created by lcmtypes_state_t_copy()\n */\nvoid lcmtypes_state_t_destroy(lcmtypes_state_t* to_destroy);\n\n/**\n * Identifies a single subscription.  This is an opaque data type.\n */\ntypedef struct _lcmtypes_state_t_subscription_t lcmtypes_state_t_subscription_t;\n\n/**\n * Prototype for a callback function invoked when a message of type\n * lcmtypes_state_t is received.\n */\ntypedef void(*lcmtypes_state_t_handler_t)(const lcm_recv_buf_t *rbuf,\n             const char *channel, const lcmtypes_state_t *msg, void *userdata);\n\n/**\n * Publish a message of type lcmtypes_state_t using LCM.\n *\n * @param lcm The LCM instance to publish with.\n * @param channel The channel to publish on.\n * @param msg The message to publish.\n * @return 0 on success, <0 on error.  Success means LCM has transferred\n * responsibility of the message data to the OS.\n */\nint lcmtypes_state_t_publish(lcm_t *lcm, const char *channel, const lcmtypes_state_t *msg);\n\n/**\n * Subscribe to messages of type lcmtypes_state_t using LCM.\n *\n * @param lcm The LCM instance to subscribe with.\n * @param channel The channel to subscribe to.\n * @param handler The callback function invoked by LCM when a message is received.\n *                This function is invoked by LCM during calls to lcm_handle() and\n *                lcm_handle_timeout().\n * @param userdata An opaque pointer passed to @p handler when it is invoked.\n * @return 0 on success, <0 if an error occured\n */\nlcmtypes_state_t_subscription_t* lcmtypes_state_t_subscribe(lcm_t *lcm, const char *channel, lcmtypes_state_t_handler_t handler, void *userdata);\n\n/**\n * Removes and destroys a subscription created by lcmtypes_state_t_subscribe()\n */\nint lcmtypes_state_t_unsubscribe(lcm_t *lcm, lcmtypes_state_t_subscription_t* hid);\n\n/**\n * Sets the queue capacity for a subscription.\n * Some LCM providers (e.g., the default multicast provider) are implemented\n * using a background receive thread that constantly revceives messages from\n * the network.  As these messages are received, they are buffered on\n * per-subscription queues until dispatched by lcm_handle().  This function\n * how many messages are queued before dropping messages.\n *\n * @param subs the subscription to modify.\n * @param num_messages The maximum number of messages to queue\n *  on the subscription.\n * @return 0 on success, <0 if an error occured\n */\nint lcmtypes_state_t_subscription_set_queue_capacity(lcmtypes_state_t_subscription_t* subs,\n                              int num_messages);\n\n/**\n * Encode a message of type lcmtypes_state_t into binary form.\n *\n * @param buf The output buffer.\n * @param offset Encoding starts at this byte offset into @p buf.\n * @param maxlen Maximum number of bytes to write.  This should generally\n *               be equal to lcmtypes_state_t_encoded_size().\n * @param msg The message to encode.\n * @return The number of bytes encoded, or <0 if an error occured.\n */\nint lcmtypes_state_t_encode(void *buf, int offset, int maxlen, const lcmtypes_state_t *p);\n\n/**\n * Decode a message of type lcmtypes_state_t from binary form.\n * When decoding messages containing strings or variable-length arrays, this\n * function may allocate memory.  When finished with the decoded message,\n * release allocated resources with lcmtypes_state_t_decode_cleanup().\n *\n * @param buf The buffer containing the encoded message\n * @param offset The byte offset into @p buf where the encoded message starts.\n * @param maxlen The maximum number of bytes to read while decoding.\n * @param msg Output parameter where the decoded message is stored\n * @return The number of bytes decoded, or <0 if an error occured.\n */\nint lcmtypes_state_t_decode(const void *buf, int offset, int maxlen, lcmtypes_state_t *msg);\n\n/**\n * Release resources allocated by lcmtypes_state_t_decode()\n * @return 0\n */\nint lcmtypes_state_t_decode_cleanup(lcmtypes_state_t *p);\n\n/**\n * Check how many bytes are required to encode a message of type lcmtypes_state_t\n */\nint lcmtypes_state_t_encoded_size(const lcmtypes_state_t *p);\n\n// LCM support functions. Users should not call these\nint64_t __lcmtypes_state_t_get_hash(void);\nuint64_t __lcmtypes_state_t_hash_recursive(const __lcm_hash_ptr *p);\nint     __lcmtypes_state_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_state_t *p, int elements);\nint     __lcmtypes_state_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_state_t *p, int elements);\nint     __lcmtypes_state_t_decode_array_cleanup(lcmtypes_state_t *p, int elements);\nint     __lcmtypes_state_t_encoded_array_size(const lcmtypes_state_t *p, int elements);\nint     __lcmtypes_state_t_clone_array(const lcmtypes_state_t *p, lcmtypes_state_t *q, int elements);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_trajectory_t.c",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <string.h>\n#include \"lcmtypes/lcmtypes_trajectory_t.h\"\n\nstatic int __lcmtypes_trajectory_t_hash_computed;\nstatic uint64_t __lcmtypes_trajectory_t_hash;\n\nuint64_t __lcmtypes_trajectory_t_hash_recursive(const __lcm_hash_ptr *p)\n{\n    const __lcm_hash_ptr *fp;\n    for (fp = p; fp != NULL; fp = fp->parent)\n        if (fp->v == __lcmtypes_trajectory_t_get_hash)\n            return 0;\n\n    __lcm_hash_ptr cp;\n    cp.parent =  p;\n    cp.v = (void*)__lcmtypes_trajectory_t_get_hash;\n    (void) cp;\n\n    uint64_t hash = (uint64_t)0x67039c5ec5ece44fLL\n         + __int32_t_hash_recursive(&cp)\n         + __lcmtypes_state_t_hash_recursive(&cp)\n        ;\n\n    return (hash<<1) + ((hash>>63)&1);\n}\n\nint64_t __lcmtypes_trajectory_t_get_hash(void)\n{\n    if (!__lcmtypes_trajectory_t_hash_computed) {\n        __lcmtypes_trajectory_t_hash = (int64_t)__lcmtypes_trajectory_t_hash_recursive(NULL);\n        __lcmtypes_trajectory_t_hash_computed = 1;\n    }\n\n    return __lcmtypes_trajectory_t_hash;\n}\n\nint __lcmtypes_trajectory_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_trajectory_t *p, int elements)\n{\n    int pos = 0, element;\n    int thislen;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __int32_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].num_states), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        thislen = __lcmtypes_state_t_encode_array(buf, offset + pos, maxlen - pos, p[element].states, p[element].num_states);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint lcmtypes_trajectory_t_encode(void *buf, int offset, int maxlen, const lcmtypes_trajectory_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_trajectory_t_get_hash();\n\n    thislen = __int64_t_encode_array(buf, offset + pos, maxlen - pos, &hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    thislen = __lcmtypes_trajectory_t_encode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint __lcmtypes_trajectory_t_encoded_array_size(const lcmtypes_trajectory_t *p, int elements)\n{\n    int size = 0, element;\n    for (element = 0; element < elements; element++) {\n\n        size += __int32_t_encoded_array_size(&(p[element].num_states), 1);\n\n        size += __lcmtypes_state_t_encoded_array_size(p[element].states, p[element].num_states);\n\n    }\n    return size;\n}\n\nint lcmtypes_trajectory_t_encoded_size(const lcmtypes_trajectory_t *p)\n{\n    return 8 + __lcmtypes_trajectory_t_encoded_array_size(p, 1);\n}\n\nint __lcmtypes_trajectory_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_trajectory_t *p, int elements)\n{\n    int pos = 0, thislen, element;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __int32_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].num_states), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n        p[element].states = (lcmtypes_state_t*) lcm_malloc(sizeof(lcmtypes_state_t) * p[element].num_states);\n        thislen = __lcmtypes_state_t_decode_array(buf, offset + pos, maxlen - pos, p[element].states, p[element].num_states);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint __lcmtypes_trajectory_t_decode_array_cleanup(lcmtypes_trajectory_t *p, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __int32_t_decode_array_cleanup(&(p[element].num_states), 1);\n\n        __lcmtypes_state_t_decode_array_cleanup(p[element].states, p[element].num_states);\n        if (p[element].states) free(p[element].states);\n\n    }\n    return 0;\n}\n\nint lcmtypes_trajectory_t_decode(const void *buf, int offset, int maxlen, lcmtypes_trajectory_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_trajectory_t_get_hash();\n\n    int64_t this_hash;\n    thislen = __int64_t_decode_array(buf, offset + pos, maxlen - pos, &this_hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n    if (this_hash != hash) return -1;\n\n    thislen = __lcmtypes_trajectory_t_decode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint lcmtypes_trajectory_t_decode_cleanup(lcmtypes_trajectory_t *p)\n{\n    return __lcmtypes_trajectory_t_decode_array_cleanup(p, 1);\n}\n\nint __lcmtypes_trajectory_t_clone_array(const lcmtypes_trajectory_t *p, lcmtypes_trajectory_t *q, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __int32_t_clone_array(&(p[element].num_states), &(q[element].num_states), 1);\n\n        q[element].states = (lcmtypes_state_t*) lcm_malloc(sizeof(lcmtypes_state_t) * q[element].num_states);\n        __lcmtypes_state_t_clone_array(p[element].states, q[element].states, p[element].num_states);\n\n    }\n    return 0;\n}\n\nlcmtypes_trajectory_t *lcmtypes_trajectory_t_copy(const lcmtypes_trajectory_t *p)\n{\n    lcmtypes_trajectory_t *q = (lcmtypes_trajectory_t*) malloc(sizeof(lcmtypes_trajectory_t));\n    __lcmtypes_trajectory_t_clone_array(p, q, 1);\n    return q;\n}\n\nvoid lcmtypes_trajectory_t_destroy(lcmtypes_trajectory_t *p)\n{\n    __lcmtypes_trajectory_t_decode_array_cleanup(p, 1);\n    free(p);\n}\n\nint lcmtypes_trajectory_t_publish(lcm_t *lc, const char *channel, const lcmtypes_trajectory_t *p)\n{\n      int max_data_size = lcmtypes_trajectory_t_encoded_size (p);\n      uint8_t *buf = (uint8_t*) malloc (max_data_size);\n      if (!buf) return -1;\n      int data_size = lcmtypes_trajectory_t_encode (buf, 0, max_data_size, p);\n      if (data_size < 0) {\n          free (buf);\n          return data_size;\n      }\n      int status = lcm_publish (lc, channel, buf, data_size);\n      free (buf);\n      return status;\n}\n\nstruct _lcmtypes_trajectory_t_subscription_t {\n    lcmtypes_trajectory_t_handler_t user_handler;\n    void *userdata;\n    lcm_subscription_t *lc_h;\n};\nstatic\nvoid lcmtypes_trajectory_t_handler_stub (const lcm_recv_buf_t *rbuf,\n                            const char *channel, void *userdata)\n{\n    int status;\n    lcmtypes_trajectory_t p;\n    memset(&p, 0, sizeof(lcmtypes_trajectory_t));\n    status = lcmtypes_trajectory_t_decode (rbuf->data, 0, rbuf->data_size, &p);\n    if (status < 0) {\n        fprintf (stderr, \"error %d decoding lcmtypes_trajectory_t!!!\\n\", status);\n        return;\n    }\n\n    lcmtypes_trajectory_t_subscription_t *h = (lcmtypes_trajectory_t_subscription_t*) userdata;\n    h->user_handler (rbuf, channel, &p, h->userdata);\n\n    lcmtypes_trajectory_t_decode_cleanup (&p);\n}\n\nlcmtypes_trajectory_t_subscription_t* lcmtypes_trajectory_t_subscribe (lcm_t *lcm,\n                    const char *channel,\n                    lcmtypes_trajectory_t_handler_t f, void *userdata)\n{\n    lcmtypes_trajectory_t_subscription_t *n = (lcmtypes_trajectory_t_subscription_t*)\n                       malloc(sizeof(lcmtypes_trajectory_t_subscription_t));\n    n->user_handler = f;\n    n->userdata = userdata;\n    n->lc_h = lcm_subscribe (lcm, channel,\n                                 lcmtypes_trajectory_t_handler_stub, n);\n    if (n->lc_h == NULL) {\n        fprintf (stderr,\"couldn't reg lcmtypes_trajectory_t LCM handler!\\n\");\n        free (n);\n        return NULL;\n    }\n    return n;\n}\n\nint lcmtypes_trajectory_t_subscription_set_queue_capacity (lcmtypes_trajectory_t_subscription_t* subs,\n                              int num_messages)\n{\n    return lcm_subscription_set_queue_capacity (subs->lc_h, num_messages);\n}\n\nint lcmtypes_trajectory_t_unsubscribe(lcm_t *lcm, lcmtypes_trajectory_t_subscription_t* hid)\n{\n    int status = lcm_unsubscribe (lcm, hid->lc_h);\n    if (0 != status) {\n        fprintf(stderr,\n           \"couldn't unsubscribe lcmtypes_trajectory_t_handler %p!\\n\", hid);\n        return -1;\n    }\n    free (hid);\n    return 0;\n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_trajectory_t.h",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <lcm/lcm_coretypes.h>\n#include <lcm/lcm.h>\n\n#ifndef _lcmtypes_trajectory_t_h\n#define _lcmtypes_trajectory_t_h\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#include \"lcmtypes/lcmtypes_state_t.h\"\ntypedef struct _lcmtypes_trajectory_t lcmtypes_trajectory_t;\nstruct _lcmtypes_trajectory_t\n{\n    int32_t    num_states;\n    lcmtypes_state_t *states;\n};\n\n/**\n * Create a deep copy of a lcmtypes_trajectory_t.\n * When no longer needed, destroy it with lcmtypes_trajectory_t_destroy()\n */\nlcmtypes_trajectory_t* lcmtypes_trajectory_t_copy(const lcmtypes_trajectory_t* to_copy);\n\n/**\n * Destroy an instance of lcmtypes_trajectory_t created by lcmtypes_trajectory_t_copy()\n */\nvoid lcmtypes_trajectory_t_destroy(lcmtypes_trajectory_t* to_destroy);\n\n/**\n * Identifies a single subscription.  This is an opaque data type.\n */\ntypedef struct _lcmtypes_trajectory_t_subscription_t lcmtypes_trajectory_t_subscription_t;\n\n/**\n * Prototype for a callback function invoked when a message of type\n * lcmtypes_trajectory_t is received.\n */\ntypedef void(*lcmtypes_trajectory_t_handler_t)(const lcm_recv_buf_t *rbuf,\n             const char *channel, const lcmtypes_trajectory_t *msg, void *userdata);\n\n/**\n * Publish a message of type lcmtypes_trajectory_t using LCM.\n *\n * @param lcm The LCM instance to publish with.\n * @param channel The channel to publish on.\n * @param msg The message to publish.\n * @return 0 on success, <0 on error.  Success means LCM has transferred\n * responsibility of the message data to the OS.\n */\nint lcmtypes_trajectory_t_publish(lcm_t *lcm, const char *channel, const lcmtypes_trajectory_t *msg);\n\n/**\n * Subscribe to messages of type lcmtypes_trajectory_t using LCM.\n *\n * @param lcm The LCM instance to subscribe with.\n * @param channel The channel to subscribe to.\n * @param handler The callback function invoked by LCM when a message is received.\n *                This function is invoked by LCM during calls to lcm_handle() and\n *                lcm_handle_timeout().\n * @param userdata An opaque pointer passed to @p handler when it is invoked.\n * @return 0 on success, <0 if an error occured\n */\nlcmtypes_trajectory_t_subscription_t* lcmtypes_trajectory_t_subscribe(lcm_t *lcm, const char *channel, lcmtypes_trajectory_t_handler_t handler, void *userdata);\n\n/**\n * Removes and destroys a subscription created by lcmtypes_trajectory_t_subscribe()\n */\nint lcmtypes_trajectory_t_unsubscribe(lcm_t *lcm, lcmtypes_trajectory_t_subscription_t* hid);\n\n/**\n * Sets the queue capacity for a subscription.\n * Some LCM providers (e.g., the default multicast provider) are implemented\n * using a background receive thread that constantly revceives messages from\n * the network.  As these messages are received, they are buffered on\n * per-subscription queues until dispatched by lcm_handle().  This function\n * how many messages are queued before dropping messages.\n *\n * @param subs the subscription to modify.\n * @param num_messages The maximum number of messages to queue\n *  on the subscription.\n * @return 0 on success, <0 if an error occured\n */\nint lcmtypes_trajectory_t_subscription_set_queue_capacity(lcmtypes_trajectory_t_subscription_t* subs,\n                              int num_messages);\n\n/**\n * Encode a message of type lcmtypes_trajectory_t into binary form.\n *\n * @param buf The output buffer.\n * @param offset Encoding starts at this byte offset into @p buf.\n * @param maxlen Maximum number of bytes to write.  This should generally\n *               be equal to lcmtypes_trajectory_t_encoded_size().\n * @param msg The message to encode.\n * @return The number of bytes encoded, or <0 if an error occured.\n */\nint lcmtypes_trajectory_t_encode(void *buf, int offset, int maxlen, const lcmtypes_trajectory_t *p);\n\n/**\n * Decode a message of type lcmtypes_trajectory_t from binary form.\n * When decoding messages containing strings or variable-length arrays, this\n * function may allocate memory.  When finished with the decoded message,\n * release allocated resources with lcmtypes_trajectory_t_decode_cleanup().\n *\n * @param buf The buffer containing the encoded message\n * @param offset The byte offset into @p buf where the encoded message starts.\n * @param maxlen The maximum number of bytes to read while decoding.\n * @param msg Output parameter where the decoded message is stored\n * @return The number of bytes decoded, or <0 if an error occured.\n */\nint lcmtypes_trajectory_t_decode(const void *buf, int offset, int maxlen, lcmtypes_trajectory_t *msg);\n\n/**\n * Release resources allocated by lcmtypes_trajectory_t_decode()\n * @return 0\n */\nint lcmtypes_trajectory_t_decode_cleanup(lcmtypes_trajectory_t *p);\n\n/**\n * Check how many bytes are required to encode a message of type lcmtypes_trajectory_t\n */\nint lcmtypes_trajectory_t_encoded_size(const lcmtypes_trajectory_t *p);\n\n// LCM support functions. Users should not call these\nint64_t __lcmtypes_trajectory_t_get_hash(void);\nuint64_t __lcmtypes_trajectory_t_hash_recursive(const __lcm_hash_ptr *p);\nint     __lcmtypes_trajectory_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_trajectory_t *p, int elements);\nint     __lcmtypes_trajectory_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_trajectory_t *p, int elements);\nint     __lcmtypes_trajectory_t_decode_array_cleanup(lcmtypes_trajectory_t *p, int elements);\nint     __lcmtypes_trajectory_t_encoded_array_size(const lcmtypes_trajectory_t *p, int elements);\nint     __lcmtypes_trajectory_t_clone_array(const lcmtypes_trajectory_t *p, lcmtypes_trajectory_t *q, int elements);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_vertex_t.c",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <string.h>\n#include \"lcmtypes/lcmtypes_vertex_t.h\"\n\nstatic int __lcmtypes_vertex_t_hash_computed;\nstatic uint64_t __lcmtypes_vertex_t_hash;\n\nuint64_t __lcmtypes_vertex_t_hash_recursive(const __lcm_hash_ptr *p)\n{\n    const __lcm_hash_ptr *fp;\n    for (fp = p; fp != NULL; fp = fp->parent)\n        if (fp->v == __lcmtypes_vertex_t_get_hash)\n            return 0;\n\n    __lcm_hash_ptr cp;\n    cp.parent =  p;\n    cp.v = (void*)__lcmtypes_vertex_t_get_hash;\n    (void) cp;\n\n    uint64_t hash = (uint64_t)0x780573746198cdacLL\n         + __lcmtypes_state_t_hash_recursive(&cp)\n        ;\n\n    return (hash<<1) + ((hash>>63)&1);\n}\n\nint64_t __lcmtypes_vertex_t_get_hash(void)\n{\n    if (!__lcmtypes_vertex_t_hash_computed) {\n        __lcmtypes_vertex_t_hash = (int64_t)__lcmtypes_vertex_t_hash_recursive(NULL);\n        __lcmtypes_vertex_t_hash_computed = 1;\n    }\n\n    return __lcmtypes_vertex_t_hash;\n}\n\nint __lcmtypes_vertex_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_vertex_t *p, int elements)\n{\n    int pos = 0, element;\n    int thislen;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __lcmtypes_state_t_encode_array(buf, offset + pos, maxlen - pos, &(p[element].state), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint lcmtypes_vertex_t_encode(void *buf, int offset, int maxlen, const lcmtypes_vertex_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_vertex_t_get_hash();\n\n    thislen = __int64_t_encode_array(buf, offset + pos, maxlen - pos, &hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    thislen = __lcmtypes_vertex_t_encode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint __lcmtypes_vertex_t_encoded_array_size(const lcmtypes_vertex_t *p, int elements)\n{\n    int size = 0, element;\n    for (element = 0; element < elements; element++) {\n\n        size += __lcmtypes_state_t_encoded_array_size(&(p[element].state), 1);\n\n    }\n    return size;\n}\n\nint lcmtypes_vertex_t_encoded_size(const lcmtypes_vertex_t *p)\n{\n    return 8 + __lcmtypes_vertex_t_encoded_array_size(p, 1);\n}\n\nint __lcmtypes_vertex_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_vertex_t *p, int elements)\n{\n    int pos = 0, thislen, element;\n\n    for (element = 0; element < elements; element++) {\n\n        thislen = __lcmtypes_state_t_decode_array(buf, offset + pos, maxlen - pos, &(p[element].state), 1);\n        if (thislen < 0) return thislen; else pos += thislen;\n\n    }\n    return pos;\n}\n\nint __lcmtypes_vertex_t_decode_array_cleanup(lcmtypes_vertex_t *p, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __lcmtypes_state_t_decode_array_cleanup(&(p[element].state), 1);\n\n    }\n    return 0;\n}\n\nint lcmtypes_vertex_t_decode(const void *buf, int offset, int maxlen, lcmtypes_vertex_t *p)\n{\n    int pos = 0, thislen;\n    int64_t hash = __lcmtypes_vertex_t_get_hash();\n\n    int64_t this_hash;\n    thislen = __int64_t_decode_array(buf, offset + pos, maxlen - pos, &this_hash, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n    if (this_hash != hash) return -1;\n\n    thislen = __lcmtypes_vertex_t_decode_array(buf, offset + pos, maxlen - pos, p, 1);\n    if (thislen < 0) return thislen; else pos += thislen;\n\n    return pos;\n}\n\nint lcmtypes_vertex_t_decode_cleanup(lcmtypes_vertex_t *p)\n{\n    return __lcmtypes_vertex_t_decode_array_cleanup(p, 1);\n}\n\nint __lcmtypes_vertex_t_clone_array(const lcmtypes_vertex_t *p, lcmtypes_vertex_t *q, int elements)\n{\n    int element;\n    for (element = 0; element < elements; element++) {\n\n        __lcmtypes_state_t_clone_array(&(p[element].state), &(q[element].state), 1);\n\n    }\n    return 0;\n}\n\nlcmtypes_vertex_t *lcmtypes_vertex_t_copy(const lcmtypes_vertex_t *p)\n{\n    lcmtypes_vertex_t *q = (lcmtypes_vertex_t*) malloc(sizeof(lcmtypes_vertex_t));\n    __lcmtypes_vertex_t_clone_array(p, q, 1);\n    return q;\n}\n\nvoid lcmtypes_vertex_t_destroy(lcmtypes_vertex_t *p)\n{\n    __lcmtypes_vertex_t_decode_array_cleanup(p, 1);\n    free(p);\n}\n\nint lcmtypes_vertex_t_publish(lcm_t *lc, const char *channel, const lcmtypes_vertex_t *p)\n{\n      int max_data_size = lcmtypes_vertex_t_encoded_size (p);\n      uint8_t *buf = (uint8_t*) malloc (max_data_size);\n      if (!buf) return -1;\n      int data_size = lcmtypes_vertex_t_encode (buf, 0, max_data_size, p);\n      if (data_size < 0) {\n          free (buf);\n          return data_size;\n      }\n      int status = lcm_publish (lc, channel, buf, data_size);\n      free (buf);\n      return status;\n}\n\nstruct _lcmtypes_vertex_t_subscription_t {\n    lcmtypes_vertex_t_handler_t user_handler;\n    void *userdata;\n    lcm_subscription_t *lc_h;\n};\nstatic\nvoid lcmtypes_vertex_t_handler_stub (const lcm_recv_buf_t *rbuf,\n                            const char *channel, void *userdata)\n{\n    int status;\n    lcmtypes_vertex_t p;\n    memset(&p, 0, sizeof(lcmtypes_vertex_t));\n    status = lcmtypes_vertex_t_decode (rbuf->data, 0, rbuf->data_size, &p);\n    if (status < 0) {\n        fprintf (stderr, \"error %d decoding lcmtypes_vertex_t!!!\\n\", status);\n        return;\n    }\n\n    lcmtypes_vertex_t_subscription_t *h = (lcmtypes_vertex_t_subscription_t*) userdata;\n    h->user_handler (rbuf, channel, &p, h->userdata);\n\n    lcmtypes_vertex_t_decode_cleanup (&p);\n}\n\nlcmtypes_vertex_t_subscription_t* lcmtypes_vertex_t_subscribe (lcm_t *lcm,\n                    const char *channel,\n                    lcmtypes_vertex_t_handler_t f, void *userdata)\n{\n    lcmtypes_vertex_t_subscription_t *n = (lcmtypes_vertex_t_subscription_t*)\n                       malloc(sizeof(lcmtypes_vertex_t_subscription_t));\n    n->user_handler = f;\n    n->userdata = userdata;\n    n->lc_h = lcm_subscribe (lcm, channel,\n                                 lcmtypes_vertex_t_handler_stub, n);\n    if (n->lc_h == NULL) {\n        fprintf (stderr,\"couldn't reg lcmtypes_vertex_t LCM handler!\\n\");\n        free (n);\n        return NULL;\n    }\n    return n;\n}\n\nint lcmtypes_vertex_t_subscription_set_queue_capacity (lcmtypes_vertex_t_subscription_t* subs,\n                              int num_messages)\n{\n    return lcm_subscription_set_queue_capacity (subs->lc_h, num_messages);\n}\n\nint lcmtypes_vertex_t_unsubscribe(lcm_t *lcm, lcmtypes_vertex_t_subscription_t* hid)\n{\n    int status = lcm_unsubscribe (lcm, hid->lc_h);\n    if (0 != status) {\n        fprintf(stderr,\n           \"couldn't unsubscribe lcmtypes_vertex_t_handler %p!\\n\", hid);\n        return -1;\n    }\n    free (hid);\n    return 0;\n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/c/lcmtypes/lcmtypes_vertex_t.h",
    "content": "// THIS IS AN AUTOMATICALLY GENERATED FILE.  DO NOT MODIFY\n// BY HAND!!\n//\n// Generated by lcm-gen\n\n#include <stdint.h>\n#include <stdlib.h>\n#include <lcm/lcm_coretypes.h>\n#include <lcm/lcm.h>\n\n#ifndef _lcmtypes_vertex_t_h\n#define _lcmtypes_vertex_t_h\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#include \"lcmtypes/lcmtypes_state_t.h\"\ntypedef struct _lcmtypes_vertex_t lcmtypes_vertex_t;\nstruct _lcmtypes_vertex_t\n{\n    lcmtypes_state_t state;\n};\n\n/**\n * Create a deep copy of a lcmtypes_vertex_t.\n * When no longer needed, destroy it with lcmtypes_vertex_t_destroy()\n */\nlcmtypes_vertex_t* lcmtypes_vertex_t_copy(const lcmtypes_vertex_t* to_copy);\n\n/**\n * Destroy an instance of lcmtypes_vertex_t created by lcmtypes_vertex_t_copy()\n */\nvoid lcmtypes_vertex_t_destroy(lcmtypes_vertex_t* to_destroy);\n\n/**\n * Identifies a single subscription.  This is an opaque data type.\n */\ntypedef struct _lcmtypes_vertex_t_subscription_t lcmtypes_vertex_t_subscription_t;\n\n/**\n * Prototype for a callback function invoked when a message of type\n * lcmtypes_vertex_t is received.\n */\ntypedef void(*lcmtypes_vertex_t_handler_t)(const lcm_recv_buf_t *rbuf,\n             const char *channel, const lcmtypes_vertex_t *msg, void *userdata);\n\n/**\n * Publish a message of type lcmtypes_vertex_t using LCM.\n *\n * @param lcm The LCM instance to publish with.\n * @param channel The channel to publish on.\n * @param msg The message to publish.\n * @return 0 on success, <0 on error.  Success means LCM has transferred\n * responsibility of the message data to the OS.\n */\nint lcmtypes_vertex_t_publish(lcm_t *lcm, const char *channel, const lcmtypes_vertex_t *msg);\n\n/**\n * Subscribe to messages of type lcmtypes_vertex_t using LCM.\n *\n * @param lcm The LCM instance to subscribe with.\n * @param channel The channel to subscribe to.\n * @param handler The callback function invoked by LCM when a message is received.\n *                This function is invoked by LCM during calls to lcm_handle() and\n *                lcm_handle_timeout().\n * @param userdata An opaque pointer passed to @p handler when it is invoked.\n * @return 0 on success, <0 if an error occured\n */\nlcmtypes_vertex_t_subscription_t* lcmtypes_vertex_t_subscribe(lcm_t *lcm, const char *channel, lcmtypes_vertex_t_handler_t handler, void *userdata);\n\n/**\n * Removes and destroys a subscription created by lcmtypes_vertex_t_subscribe()\n */\nint lcmtypes_vertex_t_unsubscribe(lcm_t *lcm, lcmtypes_vertex_t_subscription_t* hid);\n\n/**\n * Sets the queue capacity for a subscription.\n * Some LCM providers (e.g., the default multicast provider) are implemented\n * using a background receive thread that constantly revceives messages from\n * the network.  As these messages are received, they are buffered on\n * per-subscription queues until dispatched by lcm_handle().  This function\n * how many messages are queued before dropping messages.\n *\n * @param subs the subscription to modify.\n * @param num_messages The maximum number of messages to queue\n *  on the subscription.\n * @return 0 on success, <0 if an error occured\n */\nint lcmtypes_vertex_t_subscription_set_queue_capacity(lcmtypes_vertex_t_subscription_t* subs,\n                              int num_messages);\n\n/**\n * Encode a message of type lcmtypes_vertex_t into binary form.\n *\n * @param buf The output buffer.\n * @param offset Encoding starts at this byte offset into @p buf.\n * @param maxlen Maximum number of bytes to write.  This should generally\n *               be equal to lcmtypes_vertex_t_encoded_size().\n * @param msg The message to encode.\n * @return The number of bytes encoded, or <0 if an error occured.\n */\nint lcmtypes_vertex_t_encode(void *buf, int offset, int maxlen, const lcmtypes_vertex_t *p);\n\n/**\n * Decode a message of type lcmtypes_vertex_t from binary form.\n * When decoding messages containing strings or variable-length arrays, this\n * function may allocate memory.  When finished with the decoded message,\n * release allocated resources with lcmtypes_vertex_t_decode_cleanup().\n *\n * @param buf The buffer containing the encoded message\n * @param offset The byte offset into @p buf where the encoded message starts.\n * @param maxlen The maximum number of bytes to read while decoding.\n * @param msg Output parameter where the decoded message is stored\n * @return The number of bytes decoded, or <0 if an error occured.\n */\nint lcmtypes_vertex_t_decode(const void *buf, int offset, int maxlen, lcmtypes_vertex_t *msg);\n\n/**\n * Release resources allocated by lcmtypes_vertex_t_decode()\n * @return 0\n */\nint lcmtypes_vertex_t_decode_cleanup(lcmtypes_vertex_t *p);\n\n/**\n * Check how many bytes are required to encode a message of type lcmtypes_vertex_t\n */\nint lcmtypes_vertex_t_encoded_size(const lcmtypes_vertex_t *p);\n\n// LCM support functions. Users should not call these\nint64_t __lcmtypes_vertex_t_get_hash(void);\nuint64_t __lcmtypes_vertex_t_hash_recursive(const __lcm_hash_ptr *p);\nint     __lcmtypes_vertex_t_encode_array(void *buf, int offset, int maxlen, const lcmtypes_vertex_t *p, int elements);\nint     __lcmtypes_vertex_t_decode_array(const void *buf, int offset, int maxlen, lcmtypes_vertex_t *p, int elements);\nint     __lcmtypes_vertex_t_decode_array_cleanup(lcmtypes_vertex_t *p, int elements);\nint     __lcmtypes_vertex_t_encoded_array_size(const lcmtypes_vertex_t *p, int elements);\nint     __lcmtypes_vertex_t_clone_array(const lcmtypes_vertex_t *p, lcmtypes_vertex_t *q, int elements);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/java/lcmtypes/edge_t.java",
    "content": "/* LCM type definition class file\n * This file was automatically generated by lcm-gen\n * DO NOT MODIFY BY HAND!!!!\n */\n\npackage lcmtypes;\n \nimport java.io.*;\nimport java.util.*;\nimport lcm.lcm.*;\n \npublic final class edge_t implements lcm.lcm.LCMEncodable\n{\n    public lcmtypes.vertex_t vertex_src;\n    public lcmtypes.vertex_t vertex_dst;\n    public lcmtypes.trajectory_t trajectory;\n \n    public edge_t()\n    {\n    }\n \n    public static final long LCM_FINGERPRINT;\n    public static final long LCM_FINGERPRINT_BASE = 0x1fae492d71eedf94L;\n \n    static {\n        LCM_FINGERPRINT = _hashRecursive(new ArrayList<Class<?>>());\n    }\n \n    public static long _hashRecursive(ArrayList<Class<?>> classes)\n    {\n        if (classes.contains(lcmtypes.edge_t.class))\n            return 0L;\n \n        classes.add(lcmtypes.edge_t.class);\n        long hash = LCM_FINGERPRINT_BASE\n             + lcmtypes.vertex_t._hashRecursive(classes)\n             + lcmtypes.vertex_t._hashRecursive(classes)\n             + lcmtypes.trajectory_t._hashRecursive(classes)\n            ;\n        classes.remove(classes.size() - 1);\n        return (hash<<1) + ((hash>>63)&1);\n    }\n \n    public void encode(DataOutput outs) throws IOException\n    {\n        outs.writeLong(LCM_FINGERPRINT);\n        _encodeRecursive(outs);\n    }\n \n    public void _encodeRecursive(DataOutput outs) throws IOException\n    {\n        this.vertex_src._encodeRecursive(outs); \n \n        this.vertex_dst._encodeRecursive(outs); \n \n        this.trajectory._encodeRecursive(outs); \n \n    }\n \n    public edge_t(byte[] data) throws IOException\n    {\n        this(new LCMDataInputStream(data));\n    }\n \n    public edge_t(DataInput ins) throws IOException\n    {\n        if (ins.readLong() != LCM_FINGERPRINT)\n            throw new IOException(\"LCM Decode error: bad fingerprint\");\n \n        _decodeRecursive(ins);\n    }\n \n    public static lcmtypes.edge_t _decodeRecursiveFactory(DataInput ins) throws IOException\n    {\n        lcmtypes.edge_t o = new lcmtypes.edge_t();\n        o._decodeRecursive(ins);\n        return o;\n    }\n \n    public void _decodeRecursive(DataInput ins) throws IOException\n    {\n        this.vertex_src = lcmtypes.vertex_t._decodeRecursiveFactory(ins);\n \n        this.vertex_dst = lcmtypes.vertex_t._decodeRecursiveFactory(ins);\n \n        this.trajectory = lcmtypes.trajectory_t._decodeRecursiveFactory(ins);\n \n    }\n \n    public lcmtypes.edge_t copy()\n    {\n        lcmtypes.edge_t outobj = new lcmtypes.edge_t();\n        outobj.vertex_src = this.vertex_src.copy();\n \n        outobj.vertex_dst = this.vertex_dst.copy();\n \n        outobj.trajectory = this.trajectory.copy();\n \n        return outobj;\n    }\n \n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/java/lcmtypes/environment_t.java",
    "content": "/* LCM type definition class file\n * This file was automatically generated by lcm-gen\n * DO NOT MODIFY BY HAND!!!!\n */\n\npackage lcmtypes;\n \nimport java.io.*;\nimport java.util.*;\nimport lcm.lcm.*;\n \npublic final class environment_t implements lcm.lcm.LCMEncodable\n{\n    public lcmtypes.region_3d_t operating;\n    public lcmtypes.region_3d_t goal;\n    public int num_obstacles;\n    public lcmtypes.region_3d_t obstacles[];\n \n    public environment_t()\n    {\n    }\n \n    public static final long LCM_FINGERPRINT;\n    public static final long LCM_FINGERPRINT_BASE = 0x8caabc2a2ba0f9c7L;\n \n    static {\n        LCM_FINGERPRINT = _hashRecursive(new ArrayList<Class<?>>());\n    }\n \n    public static long _hashRecursive(ArrayList<Class<?>> classes)\n    {\n        if (classes.contains(lcmtypes.environment_t.class))\n            return 0L;\n \n        classes.add(lcmtypes.environment_t.class);\n        long hash = LCM_FINGERPRINT_BASE\n             + lcmtypes.region_3d_t._hashRecursive(classes)\n             + lcmtypes.region_3d_t._hashRecursive(classes)\n             + lcmtypes.region_3d_t._hashRecursive(classes)\n            ;\n        classes.remove(classes.size() - 1);\n        return (hash<<1) + ((hash>>63)&1);\n    }\n \n    public void encode(DataOutput outs) throws IOException\n    {\n        outs.writeLong(LCM_FINGERPRINT);\n        _encodeRecursive(outs);\n    }\n \n    public void _encodeRecursive(DataOutput outs) throws IOException\n    {\n        this.operating._encodeRecursive(outs); \n \n        this.goal._encodeRecursive(outs); \n \n        outs.writeInt(this.num_obstacles); \n \n        for (int a = 0; a < this.num_obstacles; a++) {\n            this.obstacles[a]._encodeRecursive(outs); \n        }\n \n    }\n \n    public environment_t(byte[] data) throws IOException\n    {\n        this(new LCMDataInputStream(data));\n    }\n \n    public environment_t(DataInput ins) throws IOException\n    {\n        if (ins.readLong() != LCM_FINGERPRINT)\n            throw new IOException(\"LCM Decode error: bad fingerprint\");\n \n        _decodeRecursive(ins);\n    }\n \n    public static lcmtypes.environment_t _decodeRecursiveFactory(DataInput ins) throws IOException\n    {\n        lcmtypes.environment_t o = new lcmtypes.environment_t();\n        o._decodeRecursive(ins);\n        return o;\n    }\n \n    public void _decodeRecursive(DataInput ins) throws IOException\n    {\n        this.operating = lcmtypes.region_3d_t._decodeRecursiveFactory(ins);\n \n        this.goal = lcmtypes.region_3d_t._decodeRecursiveFactory(ins);\n \n        this.num_obstacles = ins.readInt();\n \n        this.obstacles = new lcmtypes.region_3d_t[(int) num_obstacles];\n        for (int a = 0; a < this.num_obstacles; a++) {\n            this.obstacles[a] = lcmtypes.region_3d_t._decodeRecursiveFactory(ins);\n        }\n \n    }\n \n    public lcmtypes.environment_t copy()\n    {\n        lcmtypes.environment_t outobj = new lcmtypes.environment_t();\n        outobj.operating = this.operating.copy();\n \n        outobj.goal = this.goal.copy();\n \n        outobj.num_obstacles = this.num_obstacles;\n \n        outobj.obstacles = new lcmtypes.region_3d_t[(int) num_obstacles];\n        for (int a = 0; a < this.num_obstacles; a++) {\n            outobj.obstacles[a] = this.obstacles[a].copy();\n        }\n \n        return outobj;\n    }\n \n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/java/lcmtypes/graph_t.java",
    "content": "/* LCM type definition class file\n * This file was automatically generated by lcm-gen\n * DO NOT MODIFY BY HAND!!!!\n */\n\npackage lcmtypes;\n \nimport java.io.*;\nimport java.util.*;\nimport lcm.lcm.*;\n \npublic final class graph_t implements lcm.lcm.LCMEncodable\n{\n    public int num_vertices;\n    public lcmtypes.vertex_t vertices[];\n    public int num_edges;\n    public lcmtypes.edge_t edges[];\n \n    public graph_t()\n    {\n    }\n \n    public static final long LCM_FINGERPRINT;\n    public static final long LCM_FINGERPRINT_BASE = 0x49189ad7b639b453L;\n \n    static {\n        LCM_FINGERPRINT = _hashRecursive(new ArrayList<Class<?>>());\n    }\n \n    public static long _hashRecursive(ArrayList<Class<?>> classes)\n    {\n        if (classes.contains(lcmtypes.graph_t.class))\n            return 0L;\n \n        classes.add(lcmtypes.graph_t.class);\n        long hash = LCM_FINGERPRINT_BASE\n             + lcmtypes.vertex_t._hashRecursive(classes)\n             + lcmtypes.edge_t._hashRecursive(classes)\n            ;\n        classes.remove(classes.size() - 1);\n        return (hash<<1) + ((hash>>63)&1);\n    }\n \n    public void encode(DataOutput outs) throws IOException\n    {\n        outs.writeLong(LCM_FINGERPRINT);\n        _encodeRecursive(outs);\n    }\n \n    public void _encodeRecursive(DataOutput outs) throws IOException\n    {\n        outs.writeInt(this.num_vertices); \n \n        for (int a = 0; a < this.num_vertices; a++) {\n            this.vertices[a]._encodeRecursive(outs); \n        }\n \n        outs.writeInt(this.num_edges); \n \n        for (int a = 0; a < this.num_edges; a++) {\n            this.edges[a]._encodeRecursive(outs); \n        }\n \n    }\n \n    public graph_t(byte[] data) throws IOException\n    {\n        this(new LCMDataInputStream(data));\n    }\n \n    public graph_t(DataInput ins) throws IOException\n    {\n        if (ins.readLong() != LCM_FINGERPRINT)\n            throw new IOException(\"LCM Decode error: bad fingerprint\");\n \n        _decodeRecursive(ins);\n    }\n \n    public static lcmtypes.graph_t _decodeRecursiveFactory(DataInput ins) throws IOException\n    {\n        lcmtypes.graph_t o = new lcmtypes.graph_t();\n        o._decodeRecursive(ins);\n        return o;\n    }\n \n    public void _decodeRecursive(DataInput ins) throws IOException\n    {\n        this.num_vertices = ins.readInt();\n \n        this.vertices = new lcmtypes.vertex_t[(int) num_vertices];\n        for (int a = 0; a < this.num_vertices; a++) {\n            this.vertices[a] = lcmtypes.vertex_t._decodeRecursiveFactory(ins);\n        }\n \n        this.num_edges = ins.readInt();\n \n        this.edges = new lcmtypes.edge_t[(int) num_edges];\n        for (int a = 0; a < this.num_edges; a++) {\n            this.edges[a] = lcmtypes.edge_t._decodeRecursiveFactory(ins);\n        }\n \n    }\n \n    public lcmtypes.graph_t copy()\n    {\n        lcmtypes.graph_t outobj = new lcmtypes.graph_t();\n        outobj.num_vertices = this.num_vertices;\n \n        outobj.vertices = new lcmtypes.vertex_t[(int) num_vertices];\n        for (int a = 0; a < this.num_vertices; a++) {\n            outobj.vertices[a] = this.vertices[a].copy();\n        }\n \n        outobj.num_edges = this.num_edges;\n \n        outobj.edges = new lcmtypes.edge_t[(int) num_edges];\n        for (int a = 0; a < this.num_edges; a++) {\n            outobj.edges[a] = this.edges[a].copy();\n        }\n \n        return outobj;\n    }\n \n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/java/lcmtypes/region_3d_t.java",
    "content": "/* LCM type definition class file\n * This file was automatically generated by lcm-gen\n * DO NOT MODIFY BY HAND!!!!\n */\n\npackage lcmtypes;\n \nimport java.io.*;\nimport java.util.*;\nimport lcm.lcm.*;\n \npublic final class region_3d_t implements lcm.lcm.LCMEncodable\n{\n    public double center[];\n    public double size[];\n \n    public region_3d_t()\n    {\n        center = new double[3];\n        size = new double[3];\n    }\n \n    public static final long LCM_FINGERPRINT;\n    public static final long LCM_FINGERPRINT_BASE = 0x94830fc8d7404191L;\n \n    static {\n        LCM_FINGERPRINT = _hashRecursive(new ArrayList<Class<?>>());\n    }\n \n    public static long _hashRecursive(ArrayList<Class<?>> classes)\n    {\n        if (classes.contains(lcmtypes.region_3d_t.class))\n            return 0L;\n \n        classes.add(lcmtypes.region_3d_t.class);\n        long hash = LCM_FINGERPRINT_BASE\n            ;\n        classes.remove(classes.size() - 1);\n        return (hash<<1) + ((hash>>63)&1);\n    }\n \n    public void encode(DataOutput outs) throws IOException\n    {\n        outs.writeLong(LCM_FINGERPRINT);\n        _encodeRecursive(outs);\n    }\n \n    public void _encodeRecursive(DataOutput outs) throws IOException\n    {\n        for (int a = 0; a < 3; a++) {\n            outs.writeDouble(this.center[a]); \n        }\n \n        for (int a = 0; a < 3; a++) {\n            outs.writeDouble(this.size[a]); \n        }\n \n    }\n \n    public region_3d_t(byte[] data) throws IOException\n    {\n        this(new LCMDataInputStream(data));\n    }\n \n    public region_3d_t(DataInput ins) throws IOException\n    {\n        if (ins.readLong() != LCM_FINGERPRINT)\n            throw new IOException(\"LCM Decode error: bad fingerprint\");\n \n        _decodeRecursive(ins);\n    }\n \n    public static lcmtypes.region_3d_t _decodeRecursiveFactory(DataInput ins) throws IOException\n    {\n        lcmtypes.region_3d_t o = new lcmtypes.region_3d_t();\n        o._decodeRecursive(ins);\n        return o;\n    }\n \n    public void _decodeRecursive(DataInput ins) throws IOException\n    {\n        this.center = new double[(int) 3];\n        for (int a = 0; a < 3; a++) {\n            this.center[a] = ins.readDouble();\n        }\n \n        this.size = new double[(int) 3];\n        for (int a = 0; a < 3; a++) {\n            this.size[a] = ins.readDouble();\n        }\n \n    }\n \n    public lcmtypes.region_3d_t copy()\n    {\n        lcmtypes.region_3d_t outobj = new lcmtypes.region_3d_t();\n        outobj.center = new double[(int) 3];\n        System.arraycopy(this.center, 0, outobj.center, 0, 3); \n        outobj.size = new double[(int) 3];\n        System.arraycopy(this.size, 0, outobj.size, 0, 3); \n        return outobj;\n    }\n \n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/java/lcmtypes/state_t.java",
    "content": "/* LCM type definition class file\n * This file was automatically generated by lcm-gen\n * DO NOT MODIFY BY HAND!!!!\n */\n\npackage lcmtypes;\n \nimport java.io.*;\nimport java.util.*;\nimport lcm.lcm.*;\n \npublic final class state_t implements lcm.lcm.LCMEncodable\n{\n    public double x;\n    public double y;\n    public double z;\n \n    public state_t()\n    {\n    }\n \n    public static final long LCM_FINGERPRINT;\n    public static final long LCM_FINGERPRINT_BASE = 0x573f2fdd2f76508fL;\n \n    static {\n        LCM_FINGERPRINT = _hashRecursive(new ArrayList<Class<?>>());\n    }\n \n    public static long _hashRecursive(ArrayList<Class<?>> classes)\n    {\n        if (classes.contains(lcmtypes.state_t.class))\n            return 0L;\n \n        classes.add(lcmtypes.state_t.class);\n        long hash = LCM_FINGERPRINT_BASE\n            ;\n        classes.remove(classes.size() - 1);\n        return (hash<<1) + ((hash>>63)&1);\n    }\n \n    public void encode(DataOutput outs) throws IOException\n    {\n        outs.writeLong(LCM_FINGERPRINT);\n        _encodeRecursive(outs);\n    }\n \n    public void _encodeRecursive(DataOutput outs) throws IOException\n    {\n        outs.writeDouble(this.x); \n \n        outs.writeDouble(this.y); \n \n        outs.writeDouble(this.z); \n \n    }\n \n    public state_t(byte[] data) throws IOException\n    {\n        this(new LCMDataInputStream(data));\n    }\n \n    public state_t(DataInput ins) throws IOException\n    {\n        if (ins.readLong() != LCM_FINGERPRINT)\n            throw new IOException(\"LCM Decode error: bad fingerprint\");\n \n        _decodeRecursive(ins);\n    }\n \n    public static lcmtypes.state_t _decodeRecursiveFactory(DataInput ins) throws IOException\n    {\n        lcmtypes.state_t o = new lcmtypes.state_t();\n        o._decodeRecursive(ins);\n        return o;\n    }\n \n    public void _decodeRecursive(DataInput ins) throws IOException\n    {\n        this.x = ins.readDouble();\n \n        this.y = ins.readDouble();\n \n        this.z = ins.readDouble();\n \n    }\n \n    public lcmtypes.state_t copy()\n    {\n        lcmtypes.state_t outobj = new lcmtypes.state_t();\n        outobj.x = this.x;\n \n        outobj.y = this.y;\n \n        outobj.z = this.z;\n \n        return outobj;\n    }\n \n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/java/lcmtypes/trajectory_t.java",
    "content": "/* LCM type definition class file\n * This file was automatically generated by lcm-gen\n * DO NOT MODIFY BY HAND!!!!\n */\n\npackage lcmtypes;\n \nimport java.io.*;\nimport java.util.*;\nimport lcm.lcm.*;\n \npublic final class trajectory_t implements lcm.lcm.LCMEncodable\n{\n    public int num_states;\n    public lcmtypes.state_t states[];\n \n    public trajectory_t()\n    {\n    }\n \n    public static final long LCM_FINGERPRINT;\n    public static final long LCM_FINGERPRINT_BASE = 0x67039c5ec5ece44fL;\n \n    static {\n        LCM_FINGERPRINT = _hashRecursive(new ArrayList<Class<?>>());\n    }\n \n    public static long _hashRecursive(ArrayList<Class<?>> classes)\n    {\n        if (classes.contains(lcmtypes.trajectory_t.class))\n            return 0L;\n \n        classes.add(lcmtypes.trajectory_t.class);\n        long hash = LCM_FINGERPRINT_BASE\n             + lcmtypes.state_t._hashRecursive(classes)\n            ;\n        classes.remove(classes.size() - 1);\n        return (hash<<1) + ((hash>>63)&1);\n    }\n \n    public void encode(DataOutput outs) throws IOException\n    {\n        outs.writeLong(LCM_FINGERPRINT);\n        _encodeRecursive(outs);\n    }\n \n    public void _encodeRecursive(DataOutput outs) throws IOException\n    {\n        outs.writeInt(this.num_states); \n \n        for (int a = 0; a < this.num_states; a++) {\n            this.states[a]._encodeRecursive(outs); \n        }\n \n    }\n \n    public trajectory_t(byte[] data) throws IOException\n    {\n        this(new LCMDataInputStream(data));\n    }\n \n    public trajectory_t(DataInput ins) throws IOException\n    {\n        if (ins.readLong() != LCM_FINGERPRINT)\n            throw new IOException(\"LCM Decode error: bad fingerprint\");\n \n        _decodeRecursive(ins);\n    }\n \n    public static lcmtypes.trajectory_t _decodeRecursiveFactory(DataInput ins) throws IOException\n    {\n        lcmtypes.trajectory_t o = new lcmtypes.trajectory_t();\n        o._decodeRecursive(ins);\n        return o;\n    }\n \n    public void _decodeRecursive(DataInput ins) throws IOException\n    {\n        this.num_states = ins.readInt();\n \n        this.states = new lcmtypes.state_t[(int) num_states];\n        for (int a = 0; a < this.num_states; a++) {\n            this.states[a] = lcmtypes.state_t._decodeRecursiveFactory(ins);\n        }\n \n    }\n \n    public lcmtypes.trajectory_t copy()\n    {\n        lcmtypes.trajectory_t outobj = new lcmtypes.trajectory_t();\n        outobj.num_states = this.num_states;\n \n        outobj.states = new lcmtypes.state_t[(int) num_states];\n        for (int a = 0; a < this.num_states; a++) {\n            outobj.states[a] = this.states[a].copy();\n        }\n \n        return outobj;\n    }\n \n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/java/lcmtypes/vertex_t.java",
    "content": "/* LCM type definition class file\n * This file was automatically generated by lcm-gen\n * DO NOT MODIFY BY HAND!!!!\n */\n\npackage lcmtypes;\n \nimport java.io.*;\nimport java.util.*;\nimport lcm.lcm.*;\n \npublic final class vertex_t implements lcm.lcm.LCMEncodable\n{\n    public lcmtypes.state_t state;\n \n    public vertex_t()\n    {\n    }\n \n    public static final long LCM_FINGERPRINT;\n    public static final long LCM_FINGERPRINT_BASE = 0x780573746198cdacL;\n \n    static {\n        LCM_FINGERPRINT = _hashRecursive(new ArrayList<Class<?>>());\n    }\n \n    public static long _hashRecursive(ArrayList<Class<?>> classes)\n    {\n        if (classes.contains(lcmtypes.vertex_t.class))\n            return 0L;\n \n        classes.add(lcmtypes.vertex_t.class);\n        long hash = LCM_FINGERPRINT_BASE\n             + lcmtypes.state_t._hashRecursive(classes)\n            ;\n        classes.remove(classes.size() - 1);\n        return (hash<<1) + ((hash>>63)&1);\n    }\n \n    public void encode(DataOutput outs) throws IOException\n    {\n        outs.writeLong(LCM_FINGERPRINT);\n        _encodeRecursive(outs);\n    }\n \n    public void _encodeRecursive(DataOutput outs) throws IOException\n    {\n        this.state._encodeRecursive(outs); \n \n    }\n \n    public vertex_t(byte[] data) throws IOException\n    {\n        this(new LCMDataInputStream(data));\n    }\n \n    public vertex_t(DataInput ins) throws IOException\n    {\n        if (ins.readLong() != LCM_FINGERPRINT)\n            throw new IOException(\"LCM Decode error: bad fingerprint\");\n \n        _decodeRecursive(ins);\n    }\n \n    public static lcmtypes.vertex_t _decodeRecursiveFactory(DataInput ins) throws IOException\n    {\n        lcmtypes.vertex_t o = new lcmtypes.vertex_t();\n        o._decodeRecursive(ins);\n        return o;\n    }\n \n    public void _decodeRecursive(DataInput ins) throws IOException\n    {\n        this.state = lcmtypes.state_t._decodeRecursiveFactory(ins);\n \n    }\n \n    public lcmtypes.vertex_t copy()\n    {\n        lcmtypes.vertex_t outobj = new lcmtypes.vertex_t();\n        outobj.state = this.state.copy();\n \n        return outobj;\n    }\n \n}\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/lcmtypes_edge_t.lcm",
    "content": "package lcmtypes;\n\nstruct edge_t {\n\n    vertex_t vertex_src;\n    vertex_t vertex_dst;\n    trajectory_t trajectory;\n}   "
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/lcmtypes_environment_t.lcm",
    "content": "package lcmtypes;\n\nstruct environment_t {\n    region_3d_t operating;\n    region_3d_t goal;\n\n    int32_t num_obstacles;\n    region_3d_t obstacles[num_obstacles];\n}"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/lcmtypes_graph_t.lcm",
    "content": "package lcmtypes;\n\nstruct graph_t {\n    \n    int32_t num_vertices;\n    vertex_t vertices[num_vertices];\n\n    int32_t num_edges;\n    edge_t edges[num_edges];\n}"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/lcmtypes_region_3d_t.lcm",
    "content": "package lcmtypes;\n\nstruct region_3d_t {\n    double center[3];\n    double size[3];\n}"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/lcmtypes_state_t.lcm",
    "content": "package lcmtypes;\n\nstruct state_t {\n\n    double x;\n    double y;\n    double z;\n}"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/lcmtypes_trajectory_t.lcm",
    "content": "package lcmtypes;\n\nstruct trajectory_t {\n    \n    int32_t num_states;\n    state_t states[num_states];\n}"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/lcmtypes_vertex_t.lcm",
    "content": "package lcmtypes;\n\nstruct vertex_t {\n    \n    state_t state;\n}"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/python/lcmtypes/__init__.py",
    "content": "\"\"\"LCM package __init__.py file\nThis file automatically generated by lcm-gen.\nDO NOT MODIFY BY HAND!!!!\n\"\"\"\n\nfrom .environment_t import environment_t\nfrom .region_3d_t import region_3d_t\nfrom .trajectory_t import trajectory_t\nfrom .graph_t import graph_t\nfrom .edge_t import edge_t\nfrom .vertex_t import vertex_t\nfrom .state_t import state_t\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/python/lcmtypes/edge_t.py",
    "content": "\"\"\"LCM type definitions\nThis file automatically generated by lcm.\nDO NOT MODIFY BY HAND!!!!\n\"\"\"\n\ntry:\n    import cStringIO.StringIO as BytesIO\nexcept ImportError:\n    from io import BytesIO\nimport struct\n\nimport lcmtypes.trajectory_t\n\nimport lcmtypes.vertex_t\n\nclass edge_t(object):\n    __slots__ = [\"vertex_src\", \"vertex_dst\", \"trajectory\"]\n\n    def __init__(self):\n        self.vertex_src = lcmtypes.vertex_t()\n        self.vertex_dst = lcmtypes.vertex_t()\n        self.trajectory = lcmtypes.trajectory_t()\n\n    def encode(self):\n        buf = BytesIO()\n        buf.write(edge_t._get_packed_fingerprint())\n        self._encode_one(buf)\n        return buf.getvalue()\n\n    def _encode_one(self, buf):\n        assert self.vertex_src._get_packed_fingerprint() == lcmtypes.vertex_t._get_packed_fingerprint()\n        self.vertex_src._encode_one(buf)\n        assert self.vertex_dst._get_packed_fingerprint() == lcmtypes.vertex_t._get_packed_fingerprint()\n        self.vertex_dst._encode_one(buf)\n        assert self.trajectory._get_packed_fingerprint() == lcmtypes.trajectory_t._get_packed_fingerprint()\n        self.trajectory._encode_one(buf)\n\n    def decode(data):\n        if hasattr(data, 'read'):\n            buf = data\n        else:\n            buf = BytesIO(data)\n        if buf.read(8) != edge_t._get_packed_fingerprint():\n            raise ValueError(\"Decode error\")\n        return edge_t._decode_one(buf)\n    decode = staticmethod(decode)\n\n    def _decode_one(buf):\n        self = edge_t()\n        self.vertex_src = lcmtypes.vertex_t._decode_one(buf)\n        self.vertex_dst = lcmtypes.vertex_t._decode_one(buf)\n        self.trajectory = lcmtypes.trajectory_t._decode_one(buf)\n        return self\n    _decode_one = staticmethod(_decode_one)\n\n    _hash = None\n    def _get_hash_recursive(parents):\n        if edge_t in parents: return 0\n        newparents = parents + [edge_t]\n        tmphash = (0x1fae492d71eedf94+ lcmtypes.vertex_t._get_hash_recursive(newparents)+ lcmtypes.vertex_t._get_hash_recursive(newparents)+ lcmtypes.trajectory_t._get_hash_recursive(newparents)) & 0xffffffffffffffff\n        tmphash  = (((tmphash<<1)&0xffffffffffffffff)  + (tmphash>>63)) & 0xffffffffffffffff\n        return tmphash\n    _get_hash_recursive = staticmethod(_get_hash_recursive)\n    _packed_fingerprint = None\n\n    def _get_packed_fingerprint():\n        if edge_t._packed_fingerprint is None:\n            edge_t._packed_fingerprint = struct.pack(\">Q\", edge_t._get_hash_recursive([]))\n        return edge_t._packed_fingerprint\n    _get_packed_fingerprint = staticmethod(_get_packed_fingerprint)\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/python/lcmtypes/environment_t.py",
    "content": "\"\"\"LCM type definitions\nThis file automatically generated by lcm.\nDO NOT MODIFY BY HAND!!!!\n\"\"\"\n\ntry:\n    import cStringIO.StringIO as BytesIO\nexcept ImportError:\n    from io import BytesIO\nimport struct\n\nimport lcmtypes.region_3d_t\n\nclass environment_t(object):\n    __slots__ = [\"operating\", \"goal\", \"num_obstacles\", \"obstacles\"]\n\n    def __init__(self):\n        self.operating = lcmtypes.region_3d_t()\n        self.goal = lcmtypes.region_3d_t()\n        self.num_obstacles = 0\n        self.obstacles = []\n\n    def encode(self):\n        buf = BytesIO()\n        buf.write(environment_t._get_packed_fingerprint())\n        self._encode_one(buf)\n        return buf.getvalue()\n\n    def _encode_one(self, buf):\n        assert self.operating._get_packed_fingerprint() == lcmtypes.region_3d_t._get_packed_fingerprint()\n        self.operating._encode_one(buf)\n        assert self.goal._get_packed_fingerprint() == lcmtypes.region_3d_t._get_packed_fingerprint()\n        self.goal._encode_one(buf)\n        buf.write(struct.pack(\">i\", self.num_obstacles))\n        for i0 in range(self.num_obstacles):\n            assert self.obstacles[i0]._get_packed_fingerprint() == lcmtypes.region_3d_t._get_packed_fingerprint()\n            self.obstacles[i0]._encode_one(buf)\n\n    def decode(data):\n        if hasattr(data, 'read'):\n            buf = data\n        else:\n            buf = BytesIO(data)\n        if buf.read(8) != environment_t._get_packed_fingerprint():\n            raise ValueError(\"Decode error\")\n        return environment_t._decode_one(buf)\n    decode = staticmethod(decode)\n\n    def _decode_one(buf):\n        self = environment_t()\n        self.operating = lcmtypes.region_3d_t._decode_one(buf)\n        self.goal = lcmtypes.region_3d_t._decode_one(buf)\n        self.num_obstacles = struct.unpack(\">i\", buf.read(4))[0]\n        self.obstacles = []\n        for i0 in range(self.num_obstacles):\n            self.obstacles.append(lcmtypes.region_3d_t._decode_one(buf))\n        return self\n    _decode_one = staticmethod(_decode_one)\n\n    _hash = None\n    def _get_hash_recursive(parents):\n        if environment_t in parents: return 0\n        newparents = parents + [environment_t]\n        tmphash = (0x8caabc2a2ba0f9c7+ lcmtypes.region_3d_t._get_hash_recursive(newparents)+ lcmtypes.region_3d_t._get_hash_recursive(newparents)+ lcmtypes.region_3d_t._get_hash_recursive(newparents)) & 0xffffffffffffffff\n        tmphash  = (((tmphash<<1)&0xffffffffffffffff)  + (tmphash>>63)) & 0xffffffffffffffff\n        return tmphash\n    _get_hash_recursive = staticmethod(_get_hash_recursive)\n    _packed_fingerprint = None\n\n    def _get_packed_fingerprint():\n        if environment_t._packed_fingerprint is None:\n            environment_t._packed_fingerprint = struct.pack(\">Q\", environment_t._get_hash_recursive([]))\n        return environment_t._packed_fingerprint\n    _get_packed_fingerprint = staticmethod(_get_packed_fingerprint)\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/python/lcmtypes/graph_t.py",
    "content": "\"\"\"LCM type definitions\nThis file automatically generated by lcm.\nDO NOT MODIFY BY HAND!!!!\n\"\"\"\n\ntry:\n    import cStringIO.StringIO as BytesIO\nexcept ImportError:\n    from io import BytesIO\nimport struct\n\nimport lcmtypes.vertex_t\n\nimport lcmtypes.edge_t\n\nclass graph_t(object):\n    __slots__ = [\"num_vertices\", \"vertices\", \"num_edges\", \"edges\"]\n\n    def __init__(self):\n        self.num_vertices = 0\n        self.vertices = []\n        self.num_edges = 0\n        self.edges = []\n\n    def encode(self):\n        buf = BytesIO()\n        buf.write(graph_t._get_packed_fingerprint())\n        self._encode_one(buf)\n        return buf.getvalue()\n\n    def _encode_one(self, buf):\n        buf.write(struct.pack(\">i\", self.num_vertices))\n        for i0 in range(self.num_vertices):\n            assert self.vertices[i0]._get_packed_fingerprint() == lcmtypes.vertex_t._get_packed_fingerprint()\n            self.vertices[i0]._encode_one(buf)\n        buf.write(struct.pack(\">i\", self.num_edges))\n        for i0 in range(self.num_edges):\n            assert self.edges[i0]._get_packed_fingerprint() == lcmtypes.edge_t._get_packed_fingerprint()\n            self.edges[i0]._encode_one(buf)\n\n    def decode(data):\n        if hasattr(data, 'read'):\n            buf = data\n        else:\n            buf = BytesIO(data)\n        if buf.read(8) != graph_t._get_packed_fingerprint():\n            raise ValueError(\"Decode error\")\n        return graph_t._decode_one(buf)\n    decode = staticmethod(decode)\n\n    def _decode_one(buf):\n        self = graph_t()\n        self.num_vertices = struct.unpack(\">i\", buf.read(4))[0]\n        self.vertices = []\n        for i0 in range(self.num_vertices):\n            self.vertices.append(lcmtypes.vertex_t._decode_one(buf))\n        self.num_edges = struct.unpack(\">i\", buf.read(4))[0]\n        self.edges = []\n        for i0 in range(self.num_edges):\n            self.edges.append(lcmtypes.edge_t._decode_one(buf))\n        return self\n    _decode_one = staticmethod(_decode_one)\n\n    _hash = None\n    def _get_hash_recursive(parents):\n        if graph_t in parents: return 0\n        newparents = parents + [graph_t]\n        tmphash = (0x49189ad7b639b453+ lcmtypes.vertex_t._get_hash_recursive(newparents)+ lcmtypes.edge_t._get_hash_recursive(newparents)) & 0xffffffffffffffff\n        tmphash  = (((tmphash<<1)&0xffffffffffffffff)  + (tmphash>>63)) & 0xffffffffffffffff\n        return tmphash\n    _get_hash_recursive = staticmethod(_get_hash_recursive)\n    _packed_fingerprint = None\n\n    def _get_packed_fingerprint():\n        if graph_t._packed_fingerprint is None:\n            graph_t._packed_fingerprint = struct.pack(\">Q\", graph_t._get_hash_recursive([]))\n        return graph_t._packed_fingerprint\n    _get_packed_fingerprint = staticmethod(_get_packed_fingerprint)\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/python/lcmtypes/region_3d_t.py",
    "content": "\"\"\"LCM type definitions\nThis file automatically generated by lcm.\nDO NOT MODIFY BY HAND!!!!\n\"\"\"\n\ntry:\n    import cStringIO.StringIO as BytesIO\nexcept ImportError:\n    from io import BytesIO\nimport struct\n\nclass region_3d_t(object):\n    __slots__ = [\"center\", \"size\"]\n\n    def __init__(self):\n        self.center = [ 0.0 for dim0 in range(3) ]\n        self.size = [ 0.0 for dim0 in range(3) ]\n\n    def encode(self):\n        buf = BytesIO()\n        buf.write(region_3d_t._get_packed_fingerprint())\n        self._encode_one(buf)\n        return buf.getvalue()\n\n    def _encode_one(self, buf):\n        buf.write(struct.pack('>3d', *self.center[:3]))\n        buf.write(struct.pack('>3d', *self.size[:3]))\n\n    def decode(data):\n        if hasattr(data, 'read'):\n            buf = data\n        else:\n            buf = BytesIO(data)\n        if buf.read(8) != region_3d_t._get_packed_fingerprint():\n            raise ValueError(\"Decode error\")\n        return region_3d_t._decode_one(buf)\n    decode = staticmethod(decode)\n\n    def _decode_one(buf):\n        self = region_3d_t()\n        self.center = struct.unpack('>3d', buf.read(24))\n        self.size = struct.unpack('>3d', buf.read(24))\n        return self\n    _decode_one = staticmethod(_decode_one)\n\n    _hash = None\n    def _get_hash_recursive(parents):\n        if region_3d_t in parents: return 0\n        tmphash = (0x94830fc8d7404191) & 0xffffffffffffffff\n        tmphash  = (((tmphash<<1)&0xffffffffffffffff)  + (tmphash>>63)) & 0xffffffffffffffff\n        return tmphash\n    _get_hash_recursive = staticmethod(_get_hash_recursive)\n    _packed_fingerprint = None\n\n    def _get_packed_fingerprint():\n        if region_3d_t._packed_fingerprint is None:\n            region_3d_t._packed_fingerprint = struct.pack(\">Q\", region_3d_t._get_hash_recursive([]))\n        return region_3d_t._packed_fingerprint\n    _get_packed_fingerprint = staticmethod(_get_packed_fingerprint)\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/python/lcmtypes/state_t.py",
    "content": "\"\"\"LCM type definitions\nThis file automatically generated by lcm.\nDO NOT MODIFY BY HAND!!!!\n\"\"\"\n\ntry:\n    import cStringIO.StringIO as BytesIO\nexcept ImportError:\n    from io import BytesIO\nimport struct\n\nclass state_t(object):\n    __slots__ = [\"x\", \"y\", \"z\"]\n\n    def __init__(self):\n        self.x = 0.0\n        self.y = 0.0\n        self.z = 0.0\n\n    def encode(self):\n        buf = BytesIO()\n        buf.write(state_t._get_packed_fingerprint())\n        self._encode_one(buf)\n        return buf.getvalue()\n\n    def _encode_one(self, buf):\n        buf.write(struct.pack(\">ddd\", self.x, self.y, self.z))\n\n    def decode(data):\n        if hasattr(data, 'read'):\n            buf = data\n        else:\n            buf = BytesIO(data)\n        if buf.read(8) != state_t._get_packed_fingerprint():\n            raise ValueError(\"Decode error\")\n        return state_t._decode_one(buf)\n    decode = staticmethod(decode)\n\n    def _decode_one(buf):\n        self = state_t()\n        self.x, self.y, self.z = struct.unpack(\">ddd\", buf.read(24))\n        return self\n    _decode_one = staticmethod(_decode_one)\n\n    _hash = None\n    def _get_hash_recursive(parents):\n        if state_t in parents: return 0\n        tmphash = (0x573f2fdd2f76508f) & 0xffffffffffffffff\n        tmphash  = (((tmphash<<1)&0xffffffffffffffff)  + (tmphash>>63)) & 0xffffffffffffffff\n        return tmphash\n    _get_hash_recursive = staticmethod(_get_hash_recursive)\n    _packed_fingerprint = None\n\n    def _get_packed_fingerprint():\n        if state_t._packed_fingerprint is None:\n            state_t._packed_fingerprint = struct.pack(\">Q\", state_t._get_hash_recursive([]))\n        return state_t._packed_fingerprint\n    _get_packed_fingerprint = staticmethod(_get_packed_fingerprint)\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/python/lcmtypes/trajectory_t.py",
    "content": "\"\"\"LCM type definitions\nThis file automatically generated by lcm.\nDO NOT MODIFY BY HAND!!!!\n\"\"\"\n\ntry:\n    import cStringIO.StringIO as BytesIO\nexcept ImportError:\n    from io import BytesIO\nimport struct\n\nimport lcmtypes.state_t\n\nclass trajectory_t(object):\n    __slots__ = [\"num_states\", \"states\"]\n\n    def __init__(self):\n        self.num_states = 0\n        self.states = []\n\n    def encode(self):\n        buf = BytesIO()\n        buf.write(trajectory_t._get_packed_fingerprint())\n        self._encode_one(buf)\n        return buf.getvalue()\n\n    def _encode_one(self, buf):\n        buf.write(struct.pack(\">i\", self.num_states))\n        for i0 in range(self.num_states):\n            assert self.states[i0]._get_packed_fingerprint() == lcmtypes.state_t._get_packed_fingerprint()\n            self.states[i0]._encode_one(buf)\n\n    def decode(data):\n        if hasattr(data, 'read'):\n            buf = data\n        else:\n            buf = BytesIO(data)\n        if buf.read(8) != trajectory_t._get_packed_fingerprint():\n            raise ValueError(\"Decode error\")\n        return trajectory_t._decode_one(buf)\n    decode = staticmethod(decode)\n\n    def _decode_one(buf):\n        self = trajectory_t()\n        self.num_states = struct.unpack(\">i\", buf.read(4))[0]\n        self.states = []\n        for i0 in range(self.num_states):\n            self.states.append(lcmtypes.state_t._decode_one(buf))\n        return self\n    _decode_one = staticmethod(_decode_one)\n\n    _hash = None\n    def _get_hash_recursive(parents):\n        if trajectory_t in parents: return 0\n        newparents = parents + [trajectory_t]\n        tmphash = (0x67039c5ec5ece44f+ lcmtypes.state_t._get_hash_recursive(newparents)) & 0xffffffffffffffff\n        tmphash  = (((tmphash<<1)&0xffffffffffffffff)  + (tmphash>>63)) & 0xffffffffffffffff\n        return tmphash\n    _get_hash_recursive = staticmethod(_get_hash_recursive)\n    _packed_fingerprint = None\n\n    def _get_packed_fingerprint():\n        if trajectory_t._packed_fingerprint is None:\n            trajectory_t._packed_fingerprint = struct.pack(\">Q\", trajectory_t._get_hash_recursive([]))\n        return trajectory_t._packed_fingerprint\n    _get_packed_fingerprint = staticmethod(_get_packed_fingerprint)\n\n"
  },
  {
    "path": "data_generation/lcmtypes/lcmtypes/python/lcmtypes/vertex_t.py",
    "content": "\"\"\"LCM type definitions\nThis file automatically generated by lcm.\nDO NOT MODIFY BY HAND!!!!\n\"\"\"\n\ntry:\n    import cStringIO.StringIO as BytesIO\nexcept ImportError:\n    from io import BytesIO\nimport struct\n\nimport lcmtypes.state_t\n\nclass vertex_t(object):\n    __slots__ = [\"state\"]\n\n    def __init__(self):\n        self.state = lcmtypes.state_t()\n\n    def encode(self):\n        buf = BytesIO()\n        buf.write(vertex_t._get_packed_fingerprint())\n        self._encode_one(buf)\n        return buf.getvalue()\n\n    def _encode_one(self, buf):\n        assert self.state._get_packed_fingerprint() == lcmtypes.state_t._get_packed_fingerprint()\n        self.state._encode_one(buf)\n\n    def decode(data):\n        if hasattr(data, 'read'):\n            buf = data\n        else:\n            buf = BytesIO(data)\n        if buf.read(8) != vertex_t._get_packed_fingerprint():\n            raise ValueError(\"Decode error\")\n        return vertex_t._decode_one(buf)\n    decode = staticmethod(decode)\n\n    def _decode_one(buf):\n        self = vertex_t()\n        self.state = lcmtypes.state_t._decode_one(buf)\n        return self\n    _decode_one = staticmethod(_decode_one)\n\n    _hash = None\n    def _get_hash_recursive(parents):\n        if vertex_t in parents: return 0\n        newparents = parents + [vertex_t]\n        tmphash = (0x780573746198cdac+ lcmtypes.state_t._get_hash_recursive(newparents)) & 0xffffffffffffffff\n        tmphash  = (((tmphash<<1)&0xffffffffffffffff)  + (tmphash>>63)) & 0xffffffffffffffff\n        return tmphash\n    _get_hash_recursive = staticmethod(_get_hash_recursive)\n    _packed_fingerprint = None\n\n    def _get_packed_fingerprint():\n        if vertex_t._packed_fingerprint is None:\n            vertex_t._packed_fingerprint = struct.pack(\">Q\", vertex_t._get_hash_recursive([]))\n        return vertex_t._packed_fingerprint\n    _get_packed_fingerprint = staticmethod(_get_packed_fingerprint)\n\n"
  },
  {
    "path": "data_generation/lcmtypes/pod.xml",
    "content": "<pod>\n    <name>racecar_lcmtypes</name>\n\n    <maintainers>\n    </maintainers>\n\n    <summary>\n    </summary>\n\n    <description>\n    </description>\n\n    <requirements>\n    </requirements>\n</pod>\n"
  },
  {
    "path": "data_generation/permute.cpp",
    "content": "// To generate obstacles permutation to generate new environments\n#include <iostream>\n#include <stdio.h>\n#include <fstream>\n#include <algorithm>\nusing namespace std;\nvoid combinationUtil(int arr[], int data[], int start, int end, int index, int r, int &count, int (&node)[77520][7]);\n\n// The main function that prints all combinations of size r\n// in arr[] of size n. This function mainly uses combinationUtil()\n\n\nvoid printCombination(int arr[], int n, int r)\n{\n    // A temporary array to store all combination one by one\n\n\n\n\n\n    int data[r];\n \t int count=0;\n\t int node[77520][7];\n\t //node[0]=0;\n\t //node[1]=0;\t\n    // Print all combination using temprary array 'data[]'\n    combinationUtil(arr, data, 0, n-1, 0, r, count, node);\n\tcout<<\"count: \"<<count<<endl;\n\tfor (int i=0;i<10;i++){\n\t\tfor (int j=0; j<7;j++)\n\t\t\tcout<<node[i][j]<<' ';\n\t\tcout<<endl;\n\t}\n\n\t\n\n\n\trandom_shuffle(&node[0], &node[77520]);\n\tcout<<\"count: \"<<count<<endl;\n\tfor (int i=0;i<10;i++){\n\t\tfor (int j=0; j<7;j++)\n\t\t\tcout<<node[i][j]<<' ';\n\t\tcout<<endl;\n\t}\n\n\n\tofstream out(\"obs_perm2.dat\", ios::out | ios::binary);\n          if(!out) {\n                        cout << \"Cannot open file.\";\n                return;\n                }\n\n          out.write((char *) &node, sizeof node);\n          out.close();\n\n\t\n\n}\n \n/* arr[]  ---> Input Array\n   data[] ---> Temporary array to store current combination\n   start & end ---> Staring and Ending indexes in arr[]\n   index  ---> Current index in data[]\n   r ---> Size of a combination to be printed */\nvoid combinationUtil(int arr[], int data[], int start, int end,\n                     int index, int r, int &count, int (&node)[77520][7])\n{\n    // Current combination is ready to be printed, print it\n    if (index == r)\n    {\t  int j=0;\t\n        for (j=0; j<r; j++){\n            cout<<data[j];\n\t\t\t\tnode[count][j]=data[j];}\n        cout<<endl;\n\n\t\t  count=count+1;\n        return;\n    }\n \n    // replace index with all possible elements. The condition\n    // \"end-i+1 >= r-index\" makes sure that including one element\n    // at index will make a combination with remaining elements\n    // at remaining positions\n\t int i=0;\n    for (i=start; i<=end && end-i+1 >= r-index; i++)\n    {\n        data[index] = arr[i];\n        combinationUtil(arr, data, i+1, end, index+1, r, count, node);\n\t\t  \n    }\n\t\n}\n\n\n\nint main()\n{\n    int arr[] = {0,1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15,16,17,18,19};\n    int r = 7;\n    int n = sizeof(arr)/sizeof(arr[0]);\n    printCombination(arr, n, r);\n}\n\n\n"
  },
  {
    "path": "data_generation/rrtstar/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.6.0)\n\n# pull in the pods macros. See cmake/pods.cmake for documentation\nset(POD_NAME rrtstar)\ninclude(cmake/pods.cmake)\n\nfind_package(PkgConfig REQUIRED)\n\npkg_check_modules(LCM REQUIRED lcm)\n\n#tell cmake to build these subdirectories\nadd_subdirectory(src)\n\n"
  },
  {
    "path": "data_generation/rrtstar/Makefile",
    "content": "# Default makefile distributed with pods version: 10.11.18\n\ndefault_target: all\n\n# Default to a less-verbose build.  If you want all the gory compiler output,\n# run \"make VERBOSE=1\"\n$(VERBOSE).SILENT:\n\n# Figure out where to build the software.\n#   Use BUILD_PREFIX if it was passed in.\n#   If not, search up to four parent directories for a 'build' directory.\n#   Otherwise, use ./build.\nifeq \"$(BUILD_PREFIX)\" \"\"\nBUILD_PREFIX=$(shell for pfx in .. ../.. ../../.. ../../../..; do d=`pwd`/$$pfx/build; \\\n               if [ -d $$d ]; then echo $$d; exit 0; fi; done; echo `pwd`/build)\nendif\n\n# Default to a release build.  If you want to enable debugging flags, run\n# \"make BUILD_TYPE=Debug\"\nifeq \"$(BUILD_TYPE)\" \"\"\nBUILD_TYPE=\"Release\"\nendif\n\nall: pod-build/Makefile\n\t$(MAKE) -C pod-build all install\n\npod-build/Makefile:\n\t$(MAKE) configure\n\n.PHONY: configure\nconfigure:\n\t@echo \"\\nBUILD_PREFIX: $(BUILD_PREFIX)\\n\\n\"\n\n\t# create the build directories if necessary\n\t@[ -d $(BUILD_PREFIX) ] || mkdir -p $(BUILD_PREFIX) || exit 1\n\t@[ -d pod-build ] || mkdir pod-build || exit 1\n\t@echo \"$(BUILD_PREFIX)\" > pod-build/build_prefix\n\n\t# run CMake to generate and configure the build scripts\n\t@cd pod-build && cmake -DCMAKE_INSTALL_PREFIX=$(BUILD_PREFIX) \\\n\t                       -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) ..\n\nclean:\n\t-if [ -e pod-build/install_manifest.txt ]; then rm -f `cat pod-build/install_manifest.txt`; fi\n\t-if [ -d pod-build ]; then $(MAKE) -C pod-build clean; rm -rf pod-build; fi\n"
  },
  {
    "path": "data_generation/rrtstar/cmake/pods.cmake",
    "content": "# Macros to simplify compliance with the pods build policies.\n#\n# To enable the macros, add the following lines to CMakeLists.txt:\n#   set(POD_NAME <pod-name>)\n#   include(cmake/pods.cmake)\n#\n# If POD_NAME is not set, then the CMake source directory is used as POD_NAME\n#\n# Next, any of the following macros can be used.  See the individual macro\n# definitions in this file for individual documentation.\n#\n# C/C++\n#   pods_install_headers(...)\n#   pods_install_libraries(...)\n#   pods_install_executables(...)\n#   pods_install_pkg_config_file(...)\n#\n#   pods_use_pkg_config_packages(...)\n#\n# Python\n#   pods_install_python_packages(...)\n#   pods_install_python_script(...)\n#\n# Java\n#   None yet\n#\n# ----\n# File: pods.cmake\n# Distributed with pods version: 10.11.18\n\n# pods_install_headers(<header1.h> ... DESTINATION <subdir_name>)\n# \n# Install a (list) of header files.\n#\n# Header files will all be installed to include/<subdir_name>\n#\n# example:\n#   add_library(perception detector.h sensor.h)\n#   pods_install_headers(detector.h sensor.h DESTINATION perception)\n#\nfunction(pods_install_headers)\n    list(GET ARGV -2 checkword)\n    if(NOT checkword STREQUAL DESTINATION)\n        message(FATAL_ERROR \"pods_install_headers missing DESTINATION parameter\")\n    endif()\n\n    list(GET ARGV -1 dest_dir)\n    list(REMOVE_AT ARGV -1)\n    list(REMOVE_AT ARGV -1)\n    #copy the headers to the INCLUDE_OUTPUT_PATH (pod-build/include)\n    foreach(header ${ARGV})\n        get_filename_component(_header_name ${header} NAME)\n        configure_file(${header} ${INCLUDE_OUTPUT_PATH}/${dest_dir}/${_header_name} COPYONLY)\n\tendforeach(header)\n\t#mark them to be installed\n\tinstall(FILES ${ARGV} DESTINATION include/${dest_dir})\n\n\nendfunction(pods_install_headers)\n\n# pods_install_executables(<executable1> ...)\n#\n# Install a (list) of executables to bin/\nfunction(pods_install_executables)\n    install(TARGETS ${ARGV} RUNTIME DESTINATION bin)\nendfunction(pods_install_executables)\n\n# pods_install_libraries(<library1> ...)\n#\n# Install a (list) of libraries to lib/\nfunction(pods_install_libraries)\n    install(TARGETS ${ARGV} LIBRARY DESTINATION lib ARCHIVE DESTINATION lib)\nendfunction(pods_install_libraries)\n\n\n# pods_install_pkg_config_file(<package-name> \n#                              [VERSION <version>]\n#                              [DESCRIPTION <description>]\n#                              [CFLAGS <cflag> ...]\n#                              [LIBS <lflag> ...]\n#                              [REQUIRES <required-package-name> ...])\n# \n# Create and install a pkg-config .pc file.\n#\n# example:\n#    add_library(mylib mylib.c)\n#    pods_install_pkg_config_file(mylib LIBS -lmylib REQUIRES glib-2.0)\nfunction(pods_install_pkg_config_file)\n    list(GET ARGV 0 pc_name)\n    # TODO error check\n\n    set(pc_version 0.0.1)\n    set(pc_description ${pc_name})\n    set(pc_requires \"\")\n    set(pc_libs \"\")\n    set(pc_cflags \"\")\n    set(pc_fname \"${PKG_CONFIG_OUTPUT_PATH}/${pc_name}.pc\")\n    \n    set(modewords LIBS CFLAGS REQUIRES VERSION DESCRIPTION)\n    set(curmode \"\")\n\n    # parse function arguments and populate pkg-config parameters\n    list(REMOVE_AT ARGV 0)\n    foreach(word ${ARGV})\n        list(FIND modewords ${word} mode_index)\n        if(${mode_index} GREATER -1)\n            set(curmode ${word})\n        elseif(curmode STREQUAL LIBS)\n            set(pc_libs \"${pc_libs} ${word}\")\n        elseif(curmode STREQUAL CFLAGS)\n            set(pc_cflags \"${pc_cflags} ${word}\")\n        elseif(curmode STREQUAL REQUIRES)\n            set(pc_requires \"${pc_requires} ${word}\")\n        elseif(curmode STREQUAL VERSION)\n            set(pc_version ${word})\n            set(curmode \"\")\n        elseif(curmode STREQUAL DESCRIPTION)\n            set(pc_description \"${word}\")\n            set(curmode \"\")\n        else(${mode_index} GREATER -1)\n            message(\"WARNING incorrect use of pods_add_pkg_config (${word})\")\n            break()\n        endif(${mode_index} GREATER -1)\n    endforeach(word)\n\n    # write the .pc file out\n    file(WRITE ${pc_fname}\n        \"prefix=${CMAKE_INSTALL_PREFIX}\\n\"\n        \"exec_prefix=\\${prefix}\\n\"\n        \"libdir=\\${exec_prefix}/lib\\n\"\n        \"includedir=\\${prefix}/include\\n\"\n        \"\\n\"\n        \"Name: ${pc_name}\\n\"\n        \"Description: ${pc_description}\\n\"\n        \"Requires: ${pc_requires}\\n\"\n        \"Version: ${pc_version}\\n\"\n        \"Libs: -L\\${exec_prefix}/lib ${pc_libs}\\n\"\n        \"Cflags: ${pc_cflags}\\n\")\n\n    # mark the .pc file for installation to the lib/pkgconfig directory\n    install(FILES ${pc_fname} DESTINATION lib/pkgconfig)\n    \n    # find targets that this pkg-config file depends on\n    string(REPLACE \" \" \";\" split_lib ${pc_libs})\n    foreach(lib ${split_lib})\n        string(REGEX REPLACE \"^-l\" \"\" libname ${lib})\n        get_target_property(IS_TARGET ${libname} LOCATION)\n        if (NOT IS_TARGET STREQUAL \"IS_TARGET-NOTFOUND\")\n            set_property(GLOBAL APPEND PROPERTY \"PODS_PKG_CONFIG_TARGETS-${pc_name}\" ${libname})\n        endif() \n    endforeach()\n    \nendfunction(pods_install_pkg_config_file)\n\n\n# pods_install_python_script(<script_name> <python_module>)\n#\n# Create and install a script that invokes the python interpreter with a\n# specified module.\n#\n# A script will be installed to bin/<script_name>.  The script simply\n# adds <install-prefix>/lib/pythonX.Y/site-packages to the python path, and\n# then invokes `python -m <python_module>`.\n#\n# example:\n#    pods_install_python_script(run-pdb pdb)\nfunction(pods_install_python_script script_name py_module)\n    find_package(PythonInterp REQUIRED)\n\n    # which python version?\n    execute_process(COMMAND \n        ${PYTHON_EXECUTABLE} -c \"import sys; sys.stdout.write(sys.version[:3])\"\n        OUTPUT_VARIABLE pyversion)\n\n    # where do we install .py files to?\n    set(python_install_dir \n        ${CMAKE_INSTALL_PREFIX}/lib/python${pyversion}/site-packages)\n\n    # write the script file\n    file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${script_name} \"#!/bin/sh\\n\"\n        \"export PYTHONPATH=${python_install_dir}:\\${PYTHONPATH}\\n\"\n        \"exec ${PYTHON_EXECUTABLE} -m ${py_module} $*\\n\")\n\n    # install it...\n    install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${script_name} DESTINATION bin)\nendfunction()\n\n# pods_install_python_packages(<src_dir>)\n#\n# Install python packages to lib/pythonX.Y/site-packages, where X.Y refers to\n# the current python version (e.g., 2.6)\n#\n# Recursively searches <src_dir> for .py files, byte-compiles them, and\n# installs them\nfunction(pods_install_python_packages py_src_dir)\n    find_package(PythonInterp REQUIRED)\n\n    # which python version?\n    execute_process(COMMAND \n        ${PYTHON_EXECUTABLE} -c \"import sys; sys.stdout.write(sys.version[:3])\"\n        OUTPUT_VARIABLE pyversion)\n\n    # where do we install .py files to?\n    set(python_install_dir \n        ${CMAKE_INSTALL_PREFIX}/lib/python${pyversion}/site-packages)\n\n    if(ARGC GREATER 1)\n        message(FATAL_ERROR \"NYI\")\n    else()\n        # get a list of all .py files\n        file(GLOB_RECURSE py_files RELATIVE ${py_src_dir} ${py_src_dir}/*.py)\n\n        # add rules for byte-compiling .py --> .pyc\n        foreach(py_file ${py_files})\n            get_filename_component(py_dirname ${py_file} PATH)\n            add_custom_command(OUTPUT \"${py_src_dir}/${py_file}c\" \n                COMMAND ${PYTHON_EXECUTABLE} -m py_compile ${py_src_dir}/${py_file} \n                DEPENDS ${py_src_dir}/${py_file})\n            list(APPEND pyc_files \"${py_src_dir}/${py_file}c\")\n\n            # install python file and byte-compiled file\n            install(FILES ${py_src_dir}/${py_file} ${py_src_dir}/${py_file}c\n                DESTINATION \"${python_install_dir}/${py_dirname}\")\n#            message(\"${py_src_dir}/${py_file} -> ${python_install_dir}/${py_dirname}\")\n        endforeach()\n        string(REGEX REPLACE \"[^a-zA-Z0-9]\" \"_\" san_src_dir \"${py_src_dir}\")\n        add_custom_target(\"pyc_${san_src_dir}\" ALL DEPENDS ${pyc_files})\n    endif()\nendfunction()\n\n\n# pods_use_pkg_config_packages(<target> <package-name> ...)\n#\n# Convenience macro to get compiler and linker flags from pkg-config and apply them\n# to the specified target.\n#\n# Invokes `pkg-config --cflags-only-I <package-name> ...` and adds the result to the\n# include directories.\n#\n# Additionally, invokes `pkg-config --libs <package-name> ...` and adds the result to\n# the target's link flags (via target_link_libraries)\n#\n# example:\n#   add_executable(myprogram main.c)\n#   pods_use_pkg_config_packages(myprogram glib-2.0 opencv)\nmacro(pods_use_pkg_config_packages target)\n    if(${ARGC} LESS 2)\n        message(WARNING \"Useless invocation of pods_use_pkg_config_packages\")\n        return()\n    endif()\n    find_package(PkgConfig REQUIRED)\n    execute_process(COMMAND \n        ${PKG_CONFIG_EXECUTABLE} --cflags-only-I ${ARGN}\n        OUTPUT_VARIABLE _pods_pkg_include_flags)\n    string(STRIP ${_pods_pkg_include_flags} _pods_pkg_include_flags)\n    string(REPLACE \"-I\" \"\" _pods_pkg_include_flags \"${_pods_pkg_include_flags}\")\n\tseparate_arguments(_pods_pkg_include_flags)\n    #    message(\"include: ${_pods_pkg_include_flags}\")\n    execute_process(COMMAND \n        ${PKG_CONFIG_EXECUTABLE} --libs ${ARGN}\n        OUTPUT_VARIABLE _pods_pkg_ldflags)\n    string(STRIP ${_pods_pkg_ldflags} _pods_pkg_ldflags)\n    #    message(\"ldflags: ${_pods_pkg_ldflags}\")\n    include_directories(${_pods_pkg_include_flags})\n    target_link_libraries(${target} ${_pods_pkg_ldflags})\n   \n    # make the target depend on libraries being installed by this source build\n    foreach(_pkg ${ARGN})\n        get_property(_has_dependencies GLOBAL PROPERTY \"PODS_PKG_CONFIG_TARGETS-${_pkg}\" SET)\n        if(_has_dependencies)\n            get_property(_dependencies GLOBAL PROPERTY \"PODS_PKG_CONFIG_TARGETS-${_pkg}\")\n            add_dependencies(${target} ${_dependencies})\n            #            message(\"Found dependencies for ${_pkg}: ${dependencies}\")\n        endif()\n        unset(_has_dependencies)\n        unset(_dependencies)\n    endforeach()\n\n    unset(_pods_pkg_include_flags)\n    unset(_pods_pkg_ldflags)\nendmacro()\n\n\n# pods_config_search_paths()\n#\n# Setup include, linker, and pkg-config paths according to the pods core\n# policy.  This macro is automatically invoked, there is no need to do so\n# manually.\nmacro(pods_config_search_paths)\n    if(NOT DEFINED __pods_setup)\n\t\t#set where files should be output locally\n\t    set(LIBRARY_OUTPUT_PATH ${CMAKE_SOURCE_DIR}/pod-build/lib)\n\t    set(EXECUTABLE_OUTPUT_PATH ${CMAKE_SOURCE_DIR}/pod-build/bin)\n\t    set(INCLUDE_OUTPUT_PATH ${CMAKE_SOURCE_DIR}/pod-build/include)\n\t    set(PKG_CONFIG_OUTPUT_PATH ${CMAKE_SOURCE_DIR}/pod-build/lib/pkgconfig)\n\t\t\n\t\t#set where files should be installed to\n\t    set(LIBRARY_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/lib)\n\t    set(EXECUTABLE_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/bin)\n\t    set(INCLUDE_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/include)\n\t    set(PKG_CONFIG_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/lib/pkgconfig)\n\n\n        # add build/lib/pkgconfig to the pkg-config search path\n        set(ENV{PKG_CONFIG_PATH} ${PKG_CONFIG_INSTALL_PATH}:$ENV{PKG_CONFIG_PATH})\n        set(ENV{PKG_CONFIG_PATH} ${PKG_CONFIG_OUTPUT_PATH}:$ENV{PKG_CONFIG_PATH})\n\n        # add build/include to the compiler include path\n        include_directories(${INCLUDE_INSTALL_PATH})\n        include_directories(${INCLUDE_OUTPUT_PATH})\n\n        # add build/lib to the link path\n        link_directories(${LIBRARY_INSTALL_PATH})\n        link_directories(${LIBRARY_OUTPUT_PATH})\n\n        # abuse RPATH\n        if(${CMAKE_INSTALL_RPATH})\n            set(CMAKE_INSTALL_RPATH ${LIBRARY_INSTALL_PATH}:${CMAKE_INSTALL_RPATH})\n        else(${CMAKE_INSTALL_RPATH})\n            set(CMAKE_INSTALL_RPATH ${LIBRARY_INSTALL_PATH})\n        endif(${CMAKE_INSTALL_RPATH})\n\n        # for osx, which uses \"install name\" path rather than rpath\n        #set(CMAKE_INSTALL_NAME_DIR ${LIBRARY_OUTPUT_PATH})\n        set(CMAKE_INSTALL_NAME_DIR ${CMAKE_INSTALL_RPATH})\n        \n        # hack to force cmake always create install and clean targets \n        install(FILES DESTINATION)\n        add_custom_target(tmp)\n\n        set(__pods_setup true)\n    endif(NOT DEFINED __pods_setup)\nendmacro(pods_config_search_paths)\n\nmacro(enforce_out_of_source)\n    if(CMAKE_BINARY_DIR STREQUAL PROJECT_SOURCE_DIR)\n      message(FATAL_ERROR \n      \"\\n\n      Do not run cmake directly in the pod directory. \n      use the supplied Makefile instead!  You now need to\n      remove CMakeCache.txt and the CMakeFiles directory.\n\n      Then to build, simply type: \n       $ make\n      \")\n    endif()\nendmacro(enforce_out_of_source)\n\n#set the variable POD_NAME to the directory path, and set the cmake PROJECT_NAME\nif(NOT POD_NAME)\n    get_filename_component(POD_NAME ${CMAKE_SOURCE_DIR} NAME)\n    message(STATUS \"POD_NAME is not set... Defaulting to directory name: ${POD_NAME}\") \nendif(NOT POD_NAME)\nproject(${POD_NAME})\n\n#make sure we're running an out-of-source build\nenforce_out_of_source()\n\n#call the function to setup paths\npods_config_search_paths()\n"
  },
  {
    "path": "data_generation/rrtstar/doxy/doxy.conf",
    "content": "# Doxyfile 1.7.2\n\n# This file describes the settings to be used by the documentation system\n# doxygen (www.doxygen.org) for a project.\n#\n# All text after a hash (#) is considered a comment and will be ignored.\n# The format is:\n#       TAG = value [value, ...]\n# For lists items can also be appended using:\n#       TAG += value [value, ...]\n# Values that contain spaces should be placed between quotes (\" \").\n\n#---------------------------------------------------------------------------\n# Project related configuration options\n#---------------------------------------------------------------------------\n\n# This tag specifies the encoding used for all characters in the config file\n# that follow. The default is UTF-8 which is also the encoding used for all\n# text before the first occurrence of this tag. Doxygen uses libiconv (or the\n# iconv built into libc) for the transcoding. See\n# http://www.gnu.org/software/libiconv for the list of possible encodings.\n\nDOXYFILE_ENCODING      = UTF-8\n\n# The PROJECT_NAME tag is a single word (or a sequence of words surrounded\n# by quotes) that should identify the project.\n\nPROJECT_NAME           = RRT*\n\n# The PROJECT_NUMBER tag can be used to enter a project or revision number.\n# This could be handy for archiving the generated documentation or\n# if some version control system is used.\n\nPROJECT_NUMBER         =\n\n# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)\n# base path where the generated documentation will be put.\n# If a relative path is entered, it will be relative to the location\n# where doxygen was started. If left blank the current directory will be used.\n\nOUTPUT_DIRECTORY       = ./doc\n\n# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create\n# 4096 sub-directories (in 2 levels) under the output directory of each output\n# format and will distribute the generated files over these directories.\n# Enabling this option can be useful when feeding doxygen a huge amount of\n# source files, where putting all generated files in the same directory would\n# otherwise cause performance problems for the file system.\n\nCREATE_SUBDIRS         = NO\n\n# The OUTPUT_LANGUAGE tag is used to specify the language in which all\n# documentation generated by doxygen is written. Doxygen will use this\n# information to generate all constant output in the proper language.\n# The default language is English, other supported languages are:\n# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,\n# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,\n# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English\n# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,\n# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak,\n# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.\n\nOUTPUT_LANGUAGE        = English\n\n# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will\n# include brief member descriptions after the members that are listed in\n# the file and class documentation (similar to JavaDoc).\n# Set to NO to disable this.\n\nBRIEF_MEMBER_DESC      = YES\n\n# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend\n# the brief description of a member or function before the detailed description.\n# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the\n# brief descriptions will be completely suppressed.\n\nREPEAT_BRIEF           = YES\n\n# This tag implements a quasi-intelligent brief description abbreviator\n# that is used to form the text in various listings. Each string\n# in this list, if found as the leading text of the brief description, will be\n# stripped from the text and the result after processing the whole list, is\n# used as the annotated text. Otherwise, the brief description is used as-is.\n# If left blank, the following values are used (\"$name\" is automatically\n# replaced with the name of the entity): \"The $name class\" \"The $name widget\"\n# \"The $name file\" \"is\" \"provides\" \"specifies\" \"contains\"\n# \"represents\" \"a\" \"an\" \"the\"\n\nABBREVIATE_BRIEF       =\n\n# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then\n# Doxygen will generate a detailed section even if there is only a brief\n# description.\n\nALWAYS_DETAILED_SEC    = NO\n\n# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all\n# inherited members of a class in the documentation of that class as if those\n# members were ordinary class members. Constructors, destructors and assignment\n# operators of the base classes will not be shown.\n\nINLINE_INHERITED_MEMB  = NO\n\n# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full\n# path before files name in the file list and in the header files. If set\n# to NO the shortest path that makes the file name unique will be used.\n\nFULL_PATH_NAMES        = YES\n\n# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag\n# can be used to strip a user-defined part of the path. Stripping is\n# only done if one of the specified strings matches the left-hand part of\n# the path. The tag can be used to show relative paths in the file list.\n# If left blank the directory from which doxygen is run is used as the\n# path to strip.\n\nSTRIP_FROM_PATH        =\n\n# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of\n# the path mentioned in the documentation of a class, which tells\n# the reader which header file to include in order to use a class.\n# If left blank only the name of the header file containing the class\n# definition is used. Otherwise one should specify the include paths that\n# are normally passed to the compiler using the -I flag.\n\nSTRIP_FROM_INC_PATH    =\n\n# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter\n# (but less readable) file names. This can be useful if your file system\n# doesn't support long names like on DOS, Mac, or CD-ROM.\n\nSHORT_NAMES            = NO\n\n# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen\n# will interpret the first line (until the first dot) of a JavaDoc-style\n# comment as the brief description. If set to NO, the JavaDoc\n# comments will behave just like regular Qt-style comments\n# (thus requiring an explicit @brief command for a brief description.)\n\nJAVADOC_AUTOBRIEF      = NO\n\n# If the QT_AUTOBRIEF tag is set to YES then Doxygen will\n# interpret the first line (until the first dot) of a Qt-style\n# comment as the brief description. If set to NO, the comments\n# will behave just like regular Qt-style comments (thus requiring\n# an explicit \\brief command for a brief description.)\n\nQT_AUTOBRIEF           = NO\n\n# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen\n# treat a multi-line C++ special comment block (i.e. a block of //! or ///\n# comments) as a brief description. This used to be the default behaviour.\n# The new default is to treat a multi-line C++ comment block as a detailed\n# description. Set this tag to YES if you prefer the old behaviour instead.\n\nMULTILINE_CPP_IS_BRIEF = NO\n\n# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented\n# member inherits the documentation from any documented member that it\n# re-implements.\n\nINHERIT_DOCS           = YES\n\n# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce\n# a new page for each member. If set to NO, the documentation of a member will\n# be part of the file/class/namespace that contains it.\n\nSEPARATE_MEMBER_PAGES  = NO\n\n# The TAB_SIZE tag can be used to set the number of spaces in a tab.\n# Doxygen uses this value to replace tabs by spaces in code fragments.\n\nTAB_SIZE               = 8\n\n# This tag can be used to specify a number of aliases that acts\n# as commands in the documentation. An alias has the form \"name=value\".\n# For example adding \"sideeffect=\\par Side Effects:\\n\" will allow you to\n# put the command \\sideeffect (or @sideeffect) in the documentation, which\n# will result in a user-defined paragraph with heading \"Side Effects:\".\n# You can put \\n's in the value part of an alias to insert newlines.\n\nALIASES                =\n\n# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C\n# sources only. Doxygen will then generate output that is more tailored for C.\n# For instance, some of the names that are used will be different. The list\n# of all members will be omitted, etc.\n\nOPTIMIZE_OUTPUT_FOR_C  = NO\n\n# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java\n# sources only. Doxygen will then generate output that is more tailored for\n# Java. For instance, namespaces will be presented as packages, qualified\n# scopes will look different, etc.\n\nOPTIMIZE_OUTPUT_JAVA   = NO\n\n# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran\n# sources only. Doxygen will then generate output that is more tailored for\n# Fortran.\n\nOPTIMIZE_FOR_FORTRAN   = NO\n\n# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL\n# sources. Doxygen will then generate output that is tailored for\n# VHDL.\n\nOPTIMIZE_OUTPUT_VHDL   = NO\n\n# Doxygen selects the parser to use depending on the extension of the files it\n# parses. With this tag you can assign which parser to use for a given extension.\n# Doxygen has a built-in mapping, but you can override or extend it using this\n# tag. The format is ext=language, where ext is a file extension, and language\n# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,\n# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make\n# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C\n# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions\n# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.\n\nEXTENSION_MAPPING      =\n\n# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want\n# to include (a tag file for) the STL sources as input, then you should\n# set this tag to YES in order to let doxygen match functions declarations and\n# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.\n# func(std::string) {}). This also makes the inheritance and collaboration\n# diagrams that involve STL classes more complete and accurate.\n\nBUILTIN_STL_SUPPORT    = NO\n\n# If you use Microsoft's C++/CLI language, you should set this option to YES to\n# enable parsing support.\n\nCPP_CLI_SUPPORT        = NO\n\n# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.\n# Doxygen will parse them like normal C++ but will assume all classes use public\n# instead of private inheritance when no explicit protection keyword is present.\n\nSIP_SUPPORT            = NO\n\n# For Microsoft's IDL there are propget and propput attributes to indicate getter\n# and setter methods for a property. Setting this option to YES (the default)\n# will make doxygen replace the get and set methods by a property in the\n# documentation. This will only work if the methods are indeed getting or\n# setting a simple type. If this is not the case, or you want to show the\n# methods anyway, you should set this option to NO.\n\nIDL_PROPERTY_SUPPORT   = YES\n\n# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC\n# tag is set to YES, then doxygen will reuse the documentation of the first\n# member in the group (if any) for the other members of the group. By default\n# all members of a group must be documented explicitly.\n\nDISTRIBUTE_GROUP_DOC   = NO\n\n# Set the SUBGROUPING tag to YES (the default) to allow class member groups of\n# the same type (for instance a group of public functions) to be put as a\n# subgroup of that type (e.g. under the Public Functions section). Set it to\n# NO to prevent subgrouping. Alternatively, this can be done per class using\n# the \\nosubgrouping command.\n\nSUBGROUPING            = YES\n\n# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum\n# is documented as struct, union, or enum with the name of the typedef. So\n# typedef struct TypeS {} TypeT, will appear in the documentation as a struct\n# with name TypeT. When disabled the typedef will appear as a member of a file,\n# namespace, or class. And the struct will be named TypeS. This can typically\n# be useful for C code in case the coding convention dictates that all compound\n# types are typedef'ed and only the typedef is referenced, never the tag name.\n\nTYPEDEF_HIDES_STRUCT   = NO\n\n# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to\n# determine which symbols to keep in memory and which to flush to disk.\n# When the cache is full, less often used symbols will be written to disk.\n# For small to medium size projects (<1000 input files) the default value is\n# probably good enough. For larger projects a too small cache size can cause\n# doxygen to be busy swapping symbols to and from disk most of the time\n# causing a significant performance penality.\n# If the system has enough physical memory increasing the cache will improve the\n# performance by keeping more symbols in memory. Note that the value works on\n# a logarithmic scale so increasing the size by one will roughly double the\n# memory usage. The cache size is given by this formula:\n# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,\n# corresponding to a cache size of 2^16 = 65536 symbols\n\nSYMBOL_CACHE_SIZE      = 0\n\n#---------------------------------------------------------------------------\n# Build related configuration options\n#---------------------------------------------------------------------------\n\n# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in\n# documentation are documented, even if no documentation was available.\n# Private class members and static file members will be hidden unless\n# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES\n\nEXTRACT_ALL            = NO\n\n# If the EXTRACT_PRIVATE tag is set to YES all private members of a class\n# will be included in the documentation.\n\nEXTRACT_PRIVATE        = NO\n\n# If the EXTRACT_STATIC tag is set to YES all static members of a file\n# will be included in the documentation.\n\nEXTRACT_STATIC         = NO\n\n# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)\n# defined locally in source files will be included in the documentation.\n# If set to NO only classes defined in header files are included.\n\nEXTRACT_LOCAL_CLASSES  = YES\n\n# This flag is only useful for Objective-C code. When set to YES local\n# methods, which are defined in the implementation section but not in\n# the interface are included in the documentation.\n# If set to NO (the default) only methods in the interface are included.\n\nEXTRACT_LOCAL_METHODS  = NO\n\n# If this flag is set to YES, the members of anonymous namespaces will be\n# extracted and appear in the documentation as a namespace called\n# 'anonymous_namespace{file}', where file will be replaced with the base\n# name of the file that contains the anonymous namespace. By default\n# anonymous namespaces are hidden.\n\nEXTRACT_ANON_NSPACES   = NO\n\n# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all\n# undocumented members of documented classes, files or namespaces.\n# If set to NO (the default) these members will be included in the\n# various overviews, but no documentation section is generated.\n# This option has no effect if EXTRACT_ALL is enabled.\n\nHIDE_UNDOC_MEMBERS     = NO\n\n# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all\n# undocumented classes that are normally visible in the class hierarchy.\n# If set to NO (the default) these classes will be included in the various\n# overviews. This option has no effect if EXTRACT_ALL is enabled.\n\nHIDE_UNDOC_CLASSES     = NO\n\n# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all\n# friend (class|struct|union) declarations.\n# If set to NO (the default) these declarations will be included in the\n# documentation.\n\nHIDE_FRIEND_COMPOUNDS  = NO\n\n# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any\n# documentation blocks found inside the body of a function.\n# If set to NO (the default) these blocks will be appended to the\n# function's detailed documentation block.\n\nHIDE_IN_BODY_DOCS      = NO\n\n# The INTERNAL_DOCS tag determines if documentation\n# that is typed after a \\internal command is included. If the tag is set\n# to NO (the default) then the documentation will be excluded.\n# Set it to YES to include the internal documentation.\n\nINTERNAL_DOCS          = NO\n\n# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate\n# file names in lower-case letters. If set to YES upper-case letters are also\n# allowed. This is useful if you have classes or files whose names only differ\n# in case and if your file system supports case sensitive file names. Windows\n# and Mac users are advised to set this option to NO.\n\nCASE_SENSE_NAMES       = NO\n\n# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen\n# will show members with their full class and namespace scopes in the\n# documentation. If set to YES the scope will be hidden.\n\nHIDE_SCOPE_NAMES       = NO\n\n# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen\n# will put a list of the files that are included by a file in the documentation\n# of that file.\n\nSHOW_INCLUDE_FILES     = YES\n\n# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen\n# will list include files with double quotes in the documentation\n# rather than with sharp brackets.\n\nFORCE_LOCAL_INCLUDES   = NO\n\n# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]\n# is inserted in the documentation for inline members.\n\nINLINE_INFO            = YES\n\n# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen\n# will sort the (detailed) documentation of file and class members\n# alphabetically by member name. If set to NO the members will appear in\n# declaration order.\n\nSORT_MEMBER_DOCS       = YES\n\n# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the\n# brief documentation of file, namespace and class members alphabetically\n# by member name. If set to NO (the default) the members will appear in\n# declaration order.\n\nSORT_BRIEF_DOCS        = NO\n\n# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen\n# will sort the (brief and detailed) documentation of class members so that\n# constructors and destructors are listed first. If set to NO (the default)\n# the constructors will appear in the respective orders defined by\n# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.\n# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO\n# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.\n\nSORT_MEMBERS_CTORS_1ST = NO\n\n# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the\n# hierarchy of group names into alphabetical order. If set to NO (the default)\n# the group names will appear in their defined order.\n\nSORT_GROUP_NAMES       = NO\n\n# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be\n# sorted by fully-qualified names, including namespaces. If set to\n# NO (the default), the class list will be sorted only by class name,\n# not including the namespace part.\n# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.\n# Note: This option applies only to the class list, not to the\n# alphabetical list.\n\nSORT_BY_SCOPE_NAME     = NO\n\n# The GENERATE_TODOLIST tag can be used to enable (YES) or\n# disable (NO) the todo list. This list is created by putting \\todo\n# commands in the documentation.\n\nGENERATE_TODOLIST      = YES\n\n# The GENERATE_TESTLIST tag can be used to enable (YES) or\n# disable (NO) the test list. This list is created by putting \\test\n# commands in the documentation.\n\nGENERATE_TESTLIST      = YES\n\n# The GENERATE_BUGLIST tag can be used to enable (YES) or\n# disable (NO) the bug list. This list is created by putting \\bug\n# commands in the documentation.\n\nGENERATE_BUGLIST       = YES\n\n# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or\n# disable (NO) the deprecated list. This list is created by putting\n# \\deprecated commands in the documentation.\n\nGENERATE_DEPRECATEDLIST= YES\n\n# The ENABLED_SECTIONS tag can be used to enable conditional\n# documentation sections, marked by \\if sectionname ... \\endif.\n\nENABLED_SECTIONS       =\n\n# The MAX_INITIALIZER_LINES tag determines the maximum number of lines\n# the initial value of a variable or macro consists of for it to appear in\n# the documentation. If the initializer consists of more lines than specified\n# here it will be hidden. Use a value of 0 to hide initializers completely.\n# The appearance of the initializer of individual variables and macros in the\n# documentation can be controlled using \\showinitializer or \\hideinitializer\n# command in the documentation regardless of this setting.\n\nMAX_INITIALIZER_LINES  = 30\n\n# Set the SHOW_USED_FILES tag to NO to disable the list of files generated\n# at the bottom of the documentation of classes and structs. If set to YES the\n# list will mention the files that were used to generate the documentation.\n\nSHOW_USED_FILES        = YES\n\n# If the sources in your project are distributed over multiple directories\n# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy\n# in the documentation. The default is NO.\n\nSHOW_DIRECTORIES       = NO\n\n# Set the SHOW_FILES tag to NO to disable the generation of the Files page.\n# This will remove the Files entry from the Quick Index and from the\n# Folder Tree View (if specified). The default is YES.\n\nSHOW_FILES             = YES\n\n# Set the SHOW_NAMESPACES tag to NO to disable the generation of the\n# Namespaces page.\n# This will remove the Namespaces entry from the Quick Index\n# and from the Folder Tree View (if specified). The default is YES.\n\nSHOW_NAMESPACES        = YES\n\n# The FILE_VERSION_FILTER tag can be used to specify a program or script that\n# doxygen should invoke to get the current version for each file (typically from\n# the version control system). Doxygen will invoke the program by executing (via\n# popen()) the command <command> <input-file>, where <command> is the value of\n# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file\n# provided by doxygen. Whatever the program writes to standard output\n# is used as the file version. See the manual for examples.\n\nFILE_VERSION_FILTER    =\n\n# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed\n# by doxygen. The layout file controls the global structure of the generated\n# output files in an output format independent way. The create the layout file\n# that represents doxygen's defaults, run doxygen with the -l option.\n# You can optionally specify a file name after the option, if omitted\n# DoxygenLayout.xml will be used as the name of the layout file.\n\nLAYOUT_FILE            =\n\n#---------------------------------------------------------------------------\n# configuration options related to warning and progress messages\n#---------------------------------------------------------------------------\n\n# The QUIET tag can be used to turn on/off the messages that are generated\n# by doxygen. Possible values are YES and NO. If left blank NO is used.\n\nQUIET                  = NO\n\n# The WARNINGS tag can be used to turn on/off the warning messages that are\n# generated by doxygen. Possible values are YES and NO. If left blank\n# NO is used.\n\nWARNINGS               = YES\n\n# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings\n# for undocumented members. If EXTRACT_ALL is set to YES then this flag will\n# automatically be disabled.\n\nWARN_IF_UNDOCUMENTED   = YES\n\n# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for\n# potential errors in the documentation, such as not documenting some\n# parameters in a documented function, or documenting parameters that\n# don't exist or using markup commands wrongly.\n\nWARN_IF_DOC_ERROR      = YES\n\n# The WARN_NO_PARAMDOC option can be enabled to get warnings for\n# functions that are documented, but have no documentation for their parameters\n# or return value. If set to NO (the default) doxygen will only warn about\n# wrong or incomplete parameter documentation, but not about the absence of\n# documentation.\n\nWARN_NO_PARAMDOC       = NO\n\n# The WARN_FORMAT tag determines the format of the warning messages that\n# doxygen can produce. The string should contain the $file, $line, and $text\n# tags, which will be replaced by the file and line number from which the\n# warning originated and the warning text. Optionally the format may contain\n# $version, which will be replaced by the version of the file (if it could\n# be obtained via FILE_VERSION_FILTER)\n\nWARN_FORMAT            = \"$file:$line: $text\"\n\n# The WARN_LOGFILE tag can be used to specify a file to which warning\n# and error messages should be written. If left blank the output is written\n# to stderr.\n\nWARN_LOGFILE           =\n\n#---------------------------------------------------------------------------\n# configuration options related to the input files\n#---------------------------------------------------------------------------\n\n# The INPUT tag can be used to specify the files and/or directories that contain\n# documented source files. You may enter file names like \"myfile.cpp\" or\n# directories like \"/usr/src/myproject\". Separate the files or directories\n# with spaces.\n\nINPUT                  = ./src\n\n# This tag can be used to specify the character encoding of the source files\n# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is\n# also the default input encoding. Doxygen uses libiconv (or the iconv built\n# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for\n# the list of possible encodings.\n\nINPUT_ENCODING         = UTF-8\n\n# If the value of the INPUT tag contains directories, you can use the\n# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp\n# and *.h) to filter out the source-files in the directories. If left\n# blank the following patterns are tested:\n# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh\n# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py\n# *.f90 *.f *.vhd *.vhdl\n\nFILE_PATTERNS          =\n\n# The RECURSIVE tag can be used to turn specify whether or not subdirectories\n# should be searched for input files as well. Possible values are YES and NO.\n# If left blank NO is used.\n\nRECURSIVE              = NO\n\n# The EXCLUDE tag can be used to specify files and/or directories that should\n# excluded from the INPUT source files. This way you can easily exclude a\n# subdirectory from a directory tree whose root is specified with the INPUT tag.\n\nEXCLUDE                = ./src/kdtree.c ./src/kdtree.h\n\n# The EXCLUDE_SYMLINKS tag can be used select whether or not files or\n# directories that are symbolic links (a Unix filesystem feature) are excluded\n# from the input.\n\nEXCLUDE_SYMLINKS       = NO\n\n# If the value of the INPUT tag contains directories, you can use the\n# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude\n# certain files from those directories. Note that the wildcards are matched\n# against the file with absolute path, so to exclude all test directories\n# for example use the pattern */test/*\n\nEXCLUDE_PATTERNS       =\n\n# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names\n# (namespaces, classes, functions, etc.) that should be excluded from the\n# output. The symbol name can be a fully qualified name, a word, or if the\n# wildcard * is used, a substring. Examples: ANamespace, AClass,\n# AClass::ANamespace, ANamespace::*Test\n\nEXCLUDE_SYMBOLS        =\n\n# The EXAMPLE_PATH tag can be used to specify one or more files or\n# directories that contain example code fragments that are included (see\n# the \\include command).\n\nEXAMPLE_PATH           =\n\n# If the value of the EXAMPLE_PATH tag contains directories, you can use the\n# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp\n# and *.h) to filter out the source-files in the directories. If left\n# blank all files are included.\n\nEXAMPLE_PATTERNS       =\n\n# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be\n# searched for input files to be used with the \\include or \\dontinclude\n# commands irrespective of the value of the RECURSIVE tag.\n# Possible values are YES and NO. If left blank NO is used.\n\nEXAMPLE_RECURSIVE      = NO\n\n# The IMAGE_PATH tag can be used to specify one or more files or\n# directories that contain image that are included in the documentation (see\n# the \\image command).\n\nIMAGE_PATH             =\n\n# The INPUT_FILTER tag can be used to specify a program that doxygen should\n# invoke to filter for each input file. Doxygen will invoke the filter program\n# by executing (via popen()) the command <filter> <input-file>, where <filter>\n# is the value of the INPUT_FILTER tag, and <input-file> is the name of an\n# input file. Doxygen will then use the output that the filter program writes\n# to standard output.\n# If FILTER_PATTERNS is specified, this tag will be\n# ignored.\n\nINPUT_FILTER           =\n\n# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern\n# basis.\n# Doxygen will compare the file name with each pattern and apply the\n# filter if there is a match.\n# The filters are a list of the form:\n# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further\n# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER\n# is applied to all files.\n\nFILTER_PATTERNS        =\n\n# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using\n# INPUT_FILTER) will be used to filter the input files when producing source\n# files to browse (i.e. when SOURCE_BROWSER is set to YES).\n\nFILTER_SOURCE_FILES    = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to source browsing\n#---------------------------------------------------------------------------\n\n# If the SOURCE_BROWSER tag is set to YES then a list of source files will\n# be generated. Documented entities will be cross-referenced with these sources.\n# Note: To get rid of all source code in the generated output, make sure also\n# VERBATIM_HEADERS is set to NO.\n\nSOURCE_BROWSER         = NO\n\n# Setting the INLINE_SOURCES tag to YES will include the body\n# of functions and classes directly in the documentation.\n\nINLINE_SOURCES         = NO\n\n# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct\n# doxygen to hide any special comment blocks from generated source code\n# fragments. Normal C and C++ comments will always remain visible.\n\nSTRIP_CODE_COMMENTS    = YES\n\n# If the REFERENCED_BY_RELATION tag is set to YES\n# then for each documented function all documented\n# functions referencing it will be listed.\n\nREFERENCED_BY_RELATION = NO\n\n# If the REFERENCES_RELATION tag is set to YES\n# then for each documented function all documented entities\n# called/used by that function will be listed.\n\nREFERENCES_RELATION    = NO\n\n# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)\n# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from\n# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will\n# link to the source code.\n# Otherwise they will link to the documentation.\n\nREFERENCES_LINK_SOURCE = YES\n\n# If the USE_HTAGS tag is set to YES then the references to source code\n# will point to the HTML generated by the htags(1) tool instead of doxygen\n# built-in source browser. The htags tool is part of GNU's global source\n# tagging system (see http://www.gnu.org/software/global/global.html). You\n# will need version 4.8.6 or higher.\n\nUSE_HTAGS              = NO\n\n# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen\n# will generate a verbatim copy of the header file for each class for\n# which an include is specified. Set to NO to disable this.\n\nVERBATIM_HEADERS       = YES\n\n#---------------------------------------------------------------------------\n# configuration options related to the alphabetical class index\n#---------------------------------------------------------------------------\n\n# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index\n# of all compounds will be generated. Enable this if the project\n# contains a lot of classes, structs, unions or interfaces.\n\nALPHABETICAL_INDEX     = YES\n\n# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then\n# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns\n# in which this list will be split (can be a number in the range [1..20])\n\nCOLS_IN_ALPHA_INDEX    = 5\n\n# In case all classes in a project start with a common prefix, all\n# classes will be put under the same header in the alphabetical index.\n# The IGNORE_PREFIX tag can be used to specify one or more prefixes that\n# should be ignored while generating the index headers.\n\nIGNORE_PREFIX          =\n\n#---------------------------------------------------------------------------\n# configuration options related to the HTML output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_HTML tag is set to YES (the default) Doxygen will\n# generate HTML output.\n\nGENERATE_HTML          = YES\n\n# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.\n# If a relative path is entered the value of OUTPUT_DIRECTORY will be\n# put in front of it. If left blank `html' will be used as the default path.\n\nHTML_OUTPUT            = html\n\n# The HTML_FILE_EXTENSION tag can be used to specify the file extension for\n# each generated HTML page (for example: .htm,.php,.asp). If it is left blank\n# doxygen will generate files with .html extension.\n\nHTML_FILE_EXTENSION    = .html\n\n# The HTML_HEADER tag can be used to specify a personal HTML header for\n# each generated HTML page. If it is left blank doxygen will generate a\n# standard header.\n\nHTML_HEADER            =\n\n# The HTML_FOOTER tag can be used to specify a personal HTML footer for\n# each generated HTML page. If it is left blank doxygen will generate a\n# standard footer.\n\nHTML_FOOTER            =\n\n# The HTML_STYLESHEET tag can be used to specify a user-defined cascading\n# style sheet that is used by each HTML page. It can be used to\n# fine-tune the look of the HTML output. If the tag is left blank doxygen\n# will generate a default style sheet. Note that doxygen will try to copy\n# the style sheet file to the HTML output directory, so don't put your own\n# stylesheet in the HTML output directory as well, or it will be erased!\n\nHTML_STYLESHEET        =\n\n# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.\n# Doxygen will adjust the colors in the stylesheet and background images\n# according to this color. Hue is specified as an angle on a colorwheel,\n# see http://en.wikipedia.org/wiki/Hue for more information.\n# For instance the value 0 represents red, 60 is yellow, 120 is green,\n# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.\n# The allowed range is 0 to 359.\n\nHTML_COLORSTYLE_HUE    = 220\n\n# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of\n# the colors in the HTML output. For a value of 0 the output will use\n# grayscales only. A value of 255 will produce the most vivid colors.\n\nHTML_COLORSTYLE_SAT    = 100\n\n# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to\n# the luminance component of the colors in the HTML output. Values below\n# 100 gradually make the output lighter, whereas values above 100 make\n# the output darker. The value divided by 100 is the actual gamma applied,\n# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,\n# and 100 does not change the gamma.\n\nHTML_COLORSTYLE_GAMMA  = 80\n\n# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML\n# page will contain the date and time when the page was generated. Setting\n# this to NO can help when comparing the output of multiple runs.\n\nHTML_TIMESTAMP         = YES\n\n# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,\n# files or namespaces will be aligned in HTML using tables. If set to\n# NO a bullet list will be used.\n\nHTML_ALIGN_MEMBERS     = YES\n\n# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML\n# documentation will contain sections that can be hidden and shown after the\n# page has loaded. For this to work a browser that supports\n# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox\n# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).\n\nHTML_DYNAMIC_SECTIONS  = NO\n\n# If the GENERATE_DOCSET tag is set to YES, additional index files\n# will be generated that can be used as input for Apple's Xcode 3\n# integrated development environment, introduced with OSX 10.5 (Leopard).\n# To create a documentation set, doxygen will generate a Makefile in the\n# HTML output directory. Running make will produce the docset in that\n# directory and running \"make install\" will install the docset in\n# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find\n# it at startup.\n# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html\n# for more information.\n\nGENERATE_DOCSET        = NO\n\n# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the\n# feed. A documentation feed provides an umbrella under which multiple\n# documentation sets from a single provider (such as a company or product suite)\n# can be grouped.\n\nDOCSET_FEEDNAME        = \"Doxygen generated docs\"\n\n# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that\n# should uniquely identify the documentation set bundle. This should be a\n# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen\n# will append .docset to the name.\n\nDOCSET_BUNDLE_ID       = org.doxygen.Project\n\n# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify\n# the documentation publisher. This should be a reverse domain-name style\n# string, e.g. com.mycompany.MyDocSet.documentation.\n\nDOCSET_PUBLISHER_ID    = org.doxygen.Publisher\n\n# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.\n\nDOCSET_PUBLISHER_NAME  = Publisher\n\n# If the GENERATE_HTMLHELP tag is set to YES, additional index files\n# will be generated that can be used as input for tools like the\n# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)\n# of the generated HTML documentation.\n\nGENERATE_HTMLHELP      = NO\n\n# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can\n# be used to specify the file name of the resulting .chm file. You\n# can add a path in front of the file if the result should not be\n# written to the html output directory.\n\nCHM_FILE               =\n\n# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can\n# be used to specify the location (absolute path including file name) of\n# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run\n# the HTML help compiler on the generated index.hhp.\n\nHHC_LOCATION           =\n\n# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag\n# controls if a separate .chi index file is generated (YES) or that\n# it should be included in the master .chm file (NO).\n\nGENERATE_CHI           = NO\n\n# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING\n# is used to encode HtmlHelp index (hhk), content (hhc) and project file\n# content.\n\nCHM_INDEX_ENCODING     =\n\n# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag\n# controls whether a binary table of contents is generated (YES) or a\n# normal table of contents (NO) in the .chm file.\n\nBINARY_TOC             = NO\n\n# The TOC_EXPAND flag can be set to YES to add extra items for group members\n# to the contents of the HTML help documentation and to the tree view.\n\nTOC_EXPAND             = NO\n\n# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and\n# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated\n# that can be used as input for Qt's qhelpgenerator to generate a\n# Qt Compressed Help (.qch) of the generated HTML documentation.\n\nGENERATE_QHP           = NO\n\n# If the QHG_LOCATION tag is specified, the QCH_FILE tag can\n# be used to specify the file name of the resulting .qch file.\n# The path specified is relative to the HTML output folder.\n\nQCH_FILE               =\n\n# The QHP_NAMESPACE tag specifies the namespace to use when generating\n# Qt Help Project output. For more information please see\n# http://doc.trolltech.com/qthelpproject.html#namespace\n\nQHP_NAMESPACE          = org.doxygen.Project\n\n# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating\n# Qt Help Project output. For more information please see\n# http://doc.trolltech.com/qthelpproject.html#virtual-folders\n\nQHP_VIRTUAL_FOLDER     = doc\n\n# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to\n# add. For more information please see\n# http://doc.trolltech.com/qthelpproject.html#custom-filters\n\nQHP_CUST_FILTER_NAME   =\n\n# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the\n# custom filter to add. For more information please see\n# <a href=\"http://doc.trolltech.com/qthelpproject.html#custom-filters\">\n# Qt Help Project / Custom Filters</a>.\n\nQHP_CUST_FILTER_ATTRS  =\n\n# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this\n# project's\n# filter section matches.\n# <a href=\"http://doc.trolltech.com/qthelpproject.html#filter-attributes\">\n# Qt Help Project / Filter Attributes</a>.\n\nQHP_SECT_FILTER_ATTRS  =\n\n# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can\n# be used to specify the location of Qt's qhelpgenerator.\n# If non-empty doxygen will try to run qhelpgenerator on the generated\n# .qhp file.\n\nQHG_LOCATION           =\n\n# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files\n#  will be generated, which together with the HTML files, form an Eclipse help\n# plugin. To install this plugin and make it available under the help contents\n# menu in Eclipse, the contents of the directory containing the HTML and XML\n# files needs to be copied into the plugins directory of eclipse. The name of\n# the directory within the plugins directory should be the same as\n# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before\n# the help appears.\n\nGENERATE_ECLIPSEHELP   = NO\n\n# A unique identifier for the eclipse help plugin. When installing the plugin\n# the directory name containing the HTML and XML files should also have\n# this name.\n\nECLIPSE_DOC_ID         = org.doxygen.Project\n\n# The DISABLE_INDEX tag can be used to turn on/off the condensed index at\n# top of each HTML page. The value NO (the default) enables the index and\n# the value YES disables it.\n\nDISABLE_INDEX          = NO\n\n# This tag can be used to set the number of enum values (range [0,1..20])\n# that doxygen will group on one line in the generated HTML documentation.\n# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.\n\nENUM_VALUES_PER_LINE   = 4\n\n# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index\n# structure should be generated to display hierarchical information.\n# If the tag value is set to YES, a side panel will be generated\n# containing a tree-like index structure (just like the one that\n# is generated for HTML Help). For this to work a browser that supports\n# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).\n# Windows users are probably better off using the HTML help feature.\n\nGENERATE_TREEVIEW      = NO\n\n# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,\n# and Class Hierarchy pages using a tree view instead of an ordered list.\n\nUSE_INLINE_TREES       = NO\n\n# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be\n# used to set the initial width (in pixels) of the frame in which the tree\n# is shown.\n\nTREEVIEW_WIDTH         = 250\n\n# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open\n# links to external symbols imported via tag files in a separate window.\n\nEXT_LINKS_IN_WINDOW    = NO\n\n# Use this tag to change the font size of Latex formulas included\n# as images in the HTML documentation. The default is 10. Note that\n# when you change the font size after a successful doxygen run you need\n# to manually remove any form_*.png images from the HTML output directory\n# to force them to be regenerated.\n\nFORMULA_FONTSIZE       = 10\n\n# Use the FORMULA_TRANPARENT tag to determine whether or not the images\n# generated for formulas are transparent PNGs. Transparent PNGs are\n# not supported properly for IE 6.0, but are supported on all modern browsers.\n# Note that when changing this option you need to delete any form_*.png files\n# in the HTML output before the changes have effect.\n\nFORMULA_TRANSPARENT    = YES\n\n# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax\n# (see http://www.mathjax.org) which uses client side Javascript for the\n# rendering instead of using prerendered bitmaps. Use this if you do not\n# have LaTeX installed or if you want to formulas look prettier in the HTML\n# output. When enabled you also need to install MathJax separately and\n# configure the path to it using the MATHJAX_RELPATH option.\n\nUSE_MATHJAX            = NO\n\n# When MathJax is enabled you need to specify the location relative to the\n# HTML output directory using the MATHJAX_RELPATH option. The destination\n# directory should contain the MathJax.js script. For instance, if the mathjax\n# directory is located at the same level as the HTML output directory, then\n# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing\n# MathJax, but it is strongly recommended to install a local copy of MathJax\n# before deployment.\n\nMATHJAX_RELPATH        = http://www.mathjax.org/mathjax\n\n# When the SEARCHENGINE tag is enabled doxygen will generate a search box\n# for the HTML output. The underlying search engine uses javascript\n# and DHTML and should work on any modern browser. Note that when using\n# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets\n# (GENERATE_DOCSET) there is already a search function so this one should\n# typically be disabled. For large projects the javascript based search engine\n# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.\n\nSEARCHENGINE           = YES\n\n# When the SERVER_BASED_SEARCH tag is enabled the search engine will be\n# implemented using a PHP enabled web server instead of at the web client\n# using Javascript. Doxygen will generate the search PHP script and index\n# file to put on the web server. The advantage of the server\n# based approach is that it scales better to large projects and allows\n# full text search. The disadvantages are that it is more difficult to setup\n# and does not have live searching capabilities.\n\nSERVER_BASED_SEARCH    = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to the LaTeX output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will\n# generate Latex output.\n\nGENERATE_LATEX         = YES\n\n# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.\n# If a relative path is entered the value of OUTPUT_DIRECTORY will be\n# put in front of it. If left blank `latex' will be used as the default path.\n\nLATEX_OUTPUT           = latex\n\n# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be\n# invoked. If left blank `latex' will be used as the default command name.\n# Note that when enabling USE_PDFLATEX this option is only used for\n# generating bitmaps for formulas in the HTML output, but not in the\n# Makefile that is written to the output directory.\n\nLATEX_CMD_NAME         = latex\n\n# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to\n# generate index for LaTeX. If left blank `makeindex' will be used as the\n# default command name.\n\nMAKEINDEX_CMD_NAME     = makeindex\n\n# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact\n# LaTeX documents. This may be useful for small projects and may help to\n# save some trees in general.\n\nCOMPACT_LATEX          = NO\n\n# The PAPER_TYPE tag can be used to set the paper type that is used\n# by the printer. Possible values are: a4, letter, legal and\n# executive. If left blank a4wide will be used.\n\nPAPER_TYPE             = a4\n\n# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX\n# packages that should be included in the LaTeX output.\n\nEXTRA_PACKAGES         =\n\n# The LATEX_HEADER tag can be used to specify a personal LaTeX header for\n# the generated latex document. The header should contain everything until\n# the first chapter. If it is left blank doxygen will generate a\n# standard header. Notice: only use this tag if you know what you are doing!\n\nLATEX_HEADER           =\n\n# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated\n# is prepared for conversion to pdf (using ps2pdf). The pdf file will\n# contain links (just like the HTML output) instead of page references\n# This makes the output suitable for online browsing using a pdf viewer.\n\nPDF_HYPERLINKS         = YES\n\n# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of\n# plain latex in the generated Makefile. Set this option to YES to get a\n# higher quality PDF documentation.\n\nUSE_PDFLATEX           = YES\n\n# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\\\batchmode.\n# command to the generated LaTeX files. This will instruct LaTeX to keep\n# running if errors occur, instead of asking the user for help.\n# This option is also used when generating formulas in HTML.\n\nLATEX_BATCHMODE        = NO\n\n# If LATEX_HIDE_INDICES is set to YES then doxygen will not\n# include the index chapters (such as File Index, Compound Index, etc.)\n# in the output.\n\nLATEX_HIDE_INDICES     = NO\n\n# If LATEX_SOURCE_CODE is set to YES then doxygen will include\n# source code with syntax highlighting in the LaTeX output.\n# Note that which sources are shown also depends on other settings\n# such as SOURCE_BROWSER.\n\nLATEX_SOURCE_CODE      = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to the RTF output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output\n# The RTF output is optimized for Word 97 and may not look very pretty with\n# other RTF readers or editors.\n\nGENERATE_RTF           = NO\n\n# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.\n# If a relative path is entered the value of OUTPUT_DIRECTORY will be\n# put in front of it. If left blank `rtf' will be used as the default path.\n\nRTF_OUTPUT             = rtf\n\n# If the COMPACT_RTF tag is set to YES Doxygen generates more compact\n# RTF documents. This may be useful for small projects and may help to\n# save some trees in general.\n\nCOMPACT_RTF            = NO\n\n# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated\n# will contain hyperlink fields. The RTF file will\n# contain links (just like the HTML output) instead of page references.\n# This makes the output suitable for online browsing using WORD or other\n# programs which support those fields.\n# Note: wordpad (write) and others do not support links.\n\nRTF_HYPERLINKS         = NO\n\n# Load stylesheet definitions from file. Syntax is similar to doxygen's\n# config file, i.e. a series of assignments. You only have to provide\n# replacements, missing definitions are set to their default value.\n\nRTF_STYLESHEET_FILE    =\n\n# Set optional variables used in the generation of an rtf document.\n# Syntax is similar to doxygen's config file.\n\nRTF_EXTENSIONS_FILE    =\n\n#---------------------------------------------------------------------------\n# configuration options related to the man page output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_MAN tag is set to YES (the default) Doxygen will\n# generate man pages\n\nGENERATE_MAN           = NO\n\n# The MAN_OUTPUT tag is used to specify where the man pages will be put.\n# If a relative path is entered the value of OUTPUT_DIRECTORY will be\n# put in front of it. If left blank `man' will be used as the default path.\n\nMAN_OUTPUT             = man\n\n# The MAN_EXTENSION tag determines the extension that is added to\n# the generated man pages (default is the subroutine's section .3)\n\nMAN_EXTENSION          = .3\n\n# If the MAN_LINKS tag is set to YES and Doxygen generates man output,\n# then it will generate one additional man file for each entity\n# documented in the real man page(s). These additional files\n# only source the real man page, but without them the man command\n# would be unable to find the correct page. The default is NO.\n\nMAN_LINKS              = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to the XML output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_XML tag is set to YES Doxygen will\n# generate an XML file that captures the structure of\n# the code including all documentation.\n\nGENERATE_XML           = NO\n\n# The XML_OUTPUT tag is used to specify where the XML pages will be put.\n# If a relative path is entered the value of OUTPUT_DIRECTORY will be\n# put in front of it. If left blank `xml' will be used as the default path.\n\nXML_OUTPUT             = xml\n\n# The XML_SCHEMA tag can be used to specify an XML schema,\n# which can be used by a validating XML parser to check the\n# syntax of the XML files.\n\nXML_SCHEMA             =\n\n# The XML_DTD tag can be used to specify an XML DTD,\n# which can be used by a validating XML parser to check the\n# syntax of the XML files.\n\nXML_DTD                =\n\n# If the XML_PROGRAMLISTING tag is set to YES Doxygen will\n# dump the program listings (including syntax highlighting\n# and cross-referencing information) to the XML output. Note that\n# enabling this will significantly increase the size of the XML output.\n\nXML_PROGRAMLISTING     = YES\n\n#---------------------------------------------------------------------------\n# configuration options for the AutoGen Definitions output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will\n# generate an AutoGen Definitions (see autogen.sf.net) file\n# that captures the structure of the code including all\n# documentation. Note that this feature is still experimental\n# and incomplete at the moment.\n\nGENERATE_AUTOGEN_DEF   = NO\n\n#---------------------------------------------------------------------------\n# configuration options related to the Perl module output\n#---------------------------------------------------------------------------\n\n# If the GENERATE_PERLMOD tag is set to YES Doxygen will\n# generate a Perl module file that captures the structure of\n# the code including all documentation. Note that this\n# feature is still experimental and incomplete at the\n# moment.\n\nGENERATE_PERLMOD       = NO\n\n# If the PERLMOD_LATEX tag is set to YES Doxygen will generate\n# the necessary Makefile rules, Perl scripts and LaTeX code to be able\n# to generate PDF and DVI output from the Perl module output.\n\nPERLMOD_LATEX          = NO\n\n# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be\n# nicely formatted so it can be parsed by a human reader.\n# This is useful\n# if you want to understand what is going on.\n# On the other hand, if this\n# tag is set to NO the size of the Perl module output will be much smaller\n# and Perl will parse it just the same.\n\nPERLMOD_PRETTY         = YES\n\n# The names of the make variables in the generated doxyrules.make file\n# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.\n# This is useful so different doxyrules.make files included by the same\n# Makefile don't overwrite each other's variables.\n\nPERLMOD_MAKEVAR_PREFIX =\n\n#---------------------------------------------------------------------------\n# Configuration options related to the preprocessor\n#---------------------------------------------------------------------------\n\n# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will\n# evaluate all C-preprocessor directives found in the sources and include\n# files.\n\nENABLE_PREPROCESSING   = YES\n\n# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro\n# names in the source code. If set to NO (the default) only conditional\n# compilation will be performed. Macro expansion can be done in a controlled\n# way by setting EXPAND_ONLY_PREDEF to YES.\n\nMACRO_EXPANSION        = NO\n\n# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES\n# then the macro expansion is limited to the macros specified with the\n# PREDEFINED and EXPAND_AS_DEFINED tags.\n\nEXPAND_ONLY_PREDEF     = NO\n\n# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files\n# in the INCLUDE_PATH (see below) will be search if a #include is found.\n\nSEARCH_INCLUDES        = YES\n\n# The INCLUDE_PATH tag can be used to specify one or more directories that\n# contain include files that are not input files but should be processed by\n# the preprocessor.\n\nINCLUDE_PATH           =\n\n# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard\n# patterns (like *.h and *.hpp) to filter out the header-files in the\n# directories. If left blank, the patterns specified with FILE_PATTERNS will\n# be used.\n\nINCLUDE_FILE_PATTERNS  =\n\n# The PREDEFINED tag can be used to specify one or more macro names that\n# are defined before the preprocessor is started (similar to the -D option of\n# gcc). The argument of the tag is a list of macros of the form: name\n# or name=definition (no spaces). If the definition and the = are\n# omitted =1 is assumed. To prevent a macro definition from being\n# undefined via #undef or recursively expanded use the := operator\n# instead of the = operator.\n\nPREDEFINED             =\n\n# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then\n# this tag can be used to specify a list of macro names that should be expanded.\n# The macro definition that is found in the sources will be used.\n# Use the PREDEFINED tag if you want to use a different macro definition.\n\nEXPAND_AS_DEFINED      =\n\n# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then\n# doxygen's preprocessor will remove all function-like macros that are alone\n# on a line, have an all uppercase name, and do not end with a semicolon. Such\n# function macros are typically used for boiler-plate code, and will confuse\n# the parser if not removed.\n\nSKIP_FUNCTION_MACROS   = YES\n\n#---------------------------------------------------------------------------\n# Configuration::additions related to external references\n#---------------------------------------------------------------------------\n\n# The TAGFILES option can be used to specify one or more tagfiles.\n# Optionally an initial location of the external documentation\n# can be added for each tagfile. The format of a tag file without\n# this location is as follows:\n#\n# TAGFILES = file1 file2 ...\n# Adding location for the tag files is done as follows:\n#\n# TAGFILES = file1=loc1 \"file2 = loc2\" ...\n# where \"loc1\" and \"loc2\" can be relative or absolute paths or\n# URLs. If a location is present for each tag, the installdox tool\n# does not have to be run to correct the links.\n# Note that each tag file must have a unique name\n# (where the name does NOT include the path)\n# If a tag file is not located in the directory in which doxygen\n# is run, you must also specify the path to the tagfile here.\n\nTAGFILES               =\n\n# When a file name is specified after GENERATE_TAGFILE, doxygen will create\n# a tag file that is based on the input files it reads.\n\nGENERATE_TAGFILE       =\n\n# If the ALLEXTERNALS tag is set to YES all external classes will be listed\n# in the class index. If set to NO only the inherited external classes\n# will be listed.\n\nALLEXTERNALS           = NO\n\n# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed\n# in the modules index. If set to NO, only the current project's groups will\n# be listed.\n\nEXTERNAL_GROUPS        = YES\n\n# The PERL_PATH should be the absolute path and name of the perl script\n# interpreter (i.e. the result of `which perl').\n\nPERL_PATH              = /usr/bin/perl\n\n#---------------------------------------------------------------------------\n# Configuration options related to the dot tool\n#---------------------------------------------------------------------------\n\n# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will\n# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base\n# or super classes. Setting the tag to NO turns the diagrams off. Note that\n# this option also works with HAVE_DOT disabled, but it is recommended to\n# install and use dot, since it yields more powerful graphs.\n\nCLASS_DIAGRAMS         = YES\n\n# You can define message sequence charts within doxygen comments using the \\msc\n# command. Doxygen will then run the mscgen tool (see\n# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the\n# documentation. The MSCGEN_PATH tag allows you to specify the directory where\n# the mscgen tool resides. If left empty the tool is assumed to be found in the\n# default search path.\n\nMSCGEN_PATH            =\n\n# If set to YES, the inheritance and collaboration graphs will hide\n# inheritance and usage relations if the target is undocumented\n# or is not a class.\n\nHIDE_UNDOC_RELATIONS   = YES\n\n# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is\n# available from the path. This tool is part of Graphviz, a graph visualization\n# toolkit from AT&T and Lucent Bell Labs. The other options in this section\n# have no effect if this option is set to NO (the default)\n\nHAVE_DOT               = NO\n\n# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is\n# allowed to run in parallel. When set to 0 (the default) doxygen will\n# base this on the number of processors available in the system. You can set it\n# explicitly to a value larger than 0 to get control over the balance\n# between CPU load and processing speed.\n\nDOT_NUM_THREADS        = 0\n\n# By default doxygen will write a font called FreeSans.ttf to the output\n# directory and reference it in all dot files that doxygen generates. This\n# font does not include all possible unicode characters however, so when you need\n# these (or just want a differently looking font) you can specify the font name\n# using DOT_FONTNAME. You need need to make sure dot is able to find the font,\n# which can be done by putting it in a standard location or by setting the\n# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory\n# containing the font.\n\nDOT_FONTNAME           = FreeSans.ttf\n\n# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.\n# The default size is 10pt.\n\nDOT_FONTSIZE           = 10\n\n# By default doxygen will tell dot to use the output directory to look for the\n# FreeSans.ttf font (which doxygen will put there itself). If you specify a\n# different font using DOT_FONTNAME you can set the path where dot\n# can find it using this tag.\n\nDOT_FONTPATH           =\n\n# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen\n# will generate a graph for each documented class showing the direct and\n# indirect inheritance relations. Setting this tag to YES will force the\n# the CLASS_DIAGRAMS tag to NO.\n\nCLASS_GRAPH            = YES\n\n# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen\n# will generate a graph for each documented class showing the direct and\n# indirect implementation dependencies (inheritance, containment, and\n# class references variables) of the class with other documented classes.\n\nCOLLABORATION_GRAPH    = YES\n\n# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen\n# will generate a graph for groups, showing the direct groups dependencies\n\nGROUP_GRAPHS           = YES\n\n# If the UML_LOOK tag is set to YES doxygen will generate inheritance and\n# collaboration diagrams in a style similar to the OMG's Unified Modeling\n# Language.\n\nUML_LOOK               = NO\n\n# If set to YES, the inheritance and collaboration graphs will show the\n# relations between templates and their instances.\n\nTEMPLATE_RELATIONS     = NO\n\n# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT\n# tags are set to YES then doxygen will generate a graph for each documented\n# file showing the direct and indirect include dependencies of the file with\n# other documented files.\n\nINCLUDE_GRAPH          = YES\n\n# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and\n# HAVE_DOT tags are set to YES then doxygen will generate a graph for each\n# documented header file showing the documented files that directly or\n# indirectly include this file.\n\nINCLUDED_BY_GRAPH      = YES\n\n# If the CALL_GRAPH and HAVE_DOT options are set to YES then\n# doxygen will generate a call dependency graph for every global function\n# or class method. Note that enabling this option will significantly increase\n# the time of a run. So in most cases it will be better to enable call graphs\n# for selected functions only using the \\callgraph command.\n\nCALL_GRAPH             = NO\n\n# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then\n# doxygen will generate a caller dependency graph for every global function\n# or class method. Note that enabling this option will significantly increase\n# the time of a run. So in most cases it will be better to enable caller\n# graphs for selected functions only using the \\callergraph command.\n\nCALLER_GRAPH           = NO\n\n# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen\n# will generate a graphical hierarchy of all classes instead of a textual one.\n\nGRAPHICAL_HIERARCHY    = YES\n\n# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES\n# then doxygen will show the dependencies a directory has on other directories\n# in a graphical way. The dependency relations are determined by the #include\n# relations between the files in the directories.\n\nDIRECTORY_GRAPH        = YES\n\n# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images\n# generated by dot. Possible values are png, jpg, or gif.\n# If left blank png will be used.\n\nDOT_IMAGE_FORMAT       = png\n\n# The tag DOT_PATH can be used to specify the path where the dot tool can be\n# found. If left blank, it is assumed the dot tool can be found in the path.\n\nDOT_PATH               =\n\n# The DOTFILE_DIRS tag can be used to specify one or more directories that\n# contain dot files that are included in the documentation (see the\n# \\dotfile command).\n\nDOTFILE_DIRS           =\n\n# The MSCFILE_DIRS tag can be used to specify one or more directories that\n# contain msc files that are included in the documentation (see the\n# \\mscfile command).\n\nMSCFILE_DIRS           =\n\n# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of\n# nodes that will be shown in the graph. If the number of nodes in a graph\n# becomes larger than this value, doxygen will truncate the graph, which is\n# visualized by representing a node as a red box. Note that doxygen if the\n# number of direct children of the root node in a graph is already larger than\n# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note\n# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.\n\nDOT_GRAPH_MAX_NODES    = 50\n\n# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the\n# graphs generated by dot. A depth value of 3 means that only nodes reachable\n# from the root by following a path via at most 3 edges will be shown. Nodes\n# that lay further from the root node will be omitted. Note that setting this\n# option to 1 or 2 may greatly reduce the computation time needed for large\n# code bases. Also note that the size of a graph can be further restricted by\n# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.\n\nMAX_DOT_GRAPH_DEPTH    = 0\n\n# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent\n# background. This is disabled by default, because dot on Windows does not\n# seem to support this out of the box. Warning: Depending on the platform used,\n# enabling this option may lead to badly anti-aliased labels on the edges of\n# a graph (i.e. they become hard to read).\n\nDOT_TRANSPARENT        = NO\n\n# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output\n# files in one run (i.e. multiple -o and -T options on the command line). This\n# makes dot run faster, but since only newer versions of dot (>1.8.10)\n# support this, this feature is disabled by default.\n\nDOT_MULTI_TARGETS      = NO\n\n# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will\n# generate a legend page explaining the meaning of the various boxes and\n# arrows in the dot generated graphs.\n\nGENERATE_LEGEND        = YES\n\n# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will\n# remove the intermediate dot files that are used to generate\n# the various graphs.\n\nDOT_CLEANUP            = YES\n"
  },
  {
    "path": "data_generation/rrtstar/src/CMakeLists.txt",
    "content": "SET(ENV{PKG_CONFIG_PATH} \"$ENV{PKG_CONFIG_PATH}:/usr/local/lib/pkgconfig:/opt/local/lib/pkgconfig:/usr/local/share/pkgconfig\")\n\npods_install_pkg_config_file(rrtstar\n    CFLAGS\n    LIBS  -lrrtstar\n    REQUIRES lcmtypes\n    VERSION 0.0.1)\n\ninclude_directories(\n    ${LCM_INCLUDE_DIRS})\n\nadd_executable(rrtstar rrts_main.cpp system_single_integrator.cpp kdtree.c)\n\npods_use_pkg_config_packages(rrtstar\n    bot2-core\n    lcmtypes)\n\npods_install_executables(rrtstar)\n\ntarget_link_libraries(rrtstar -llcm)\n"
  },
  {
    "path": "data_generation/rrtstar/src/CMakeLists.txt~",
    "content": "SET(ENV{PKG_CONFIG_PATH} \"$ENV{PKG_CONFIG_PATH}:/usr/local/lib/pkgconfig:/opt/local/lib/pkgconfig:/usr/local/share/pkgconfig\")\n\npods_install_pkg_config_file(rrtstar\n    CFLAGS\n    LIBS  -lrrtstar\n    REQUIRES lcmtypes\n    VERSION 0.0.1)\n\ninclude_directories(\n    ${LCM_INCLUDE_DIRS})\n\nadd_executable(rrtstar rrts_main.cpp system_single_integrator.cpp kdtree.c)\n\npods_use_pkg_config_packages(rrtstar\n    bot2-core\n    lcmtypes)\n\npods_install_executables(rrtstar)"
  },
  {
    "path": "data_generation/rrtstar/src/kdtree.c",
    "content": "/*\nThis file is part of ``kdtree'', a library for working with kd-trees.\nCopyright (C) 2007-2009 John Tsiombikas <nuclear@siggraph.org>\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n3. The name of the author may not be used to endorse or promote products\n   derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED\nWARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\nEVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\nOF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\nIN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY\nOF SUCH DAMAGE.\n*/\n/* single nearest neighbor search written by Tamas Nepusz <tamas@cs.rhul.ac.uk> */\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <math.h>\n#include \"kdtree.h\"\n\n#if defined(WIN32) || defined(__WIN32__)\n#include <malloc.h>\n#endif\n\n#ifdef USE_LIST_NODE_ALLOCATOR\n\n#ifndef NO_PTHREADS\n#include <pthread.h>\n#else\n\n#ifndef I_WANT_THREAD_BUGS\n#error \"You are compiling with the fast list node allocator, with pthreads disabled! This WILL break if used from multiple threads.\"\n#endif\t/* I want thread bugs */\n\n#endif\t/* pthread support */\n#endif\t/* use list node allocator */\n\nstruct kdhyperrect {\n\tint dim;\n\tdouble *min, *max;              /* minimum/maximum coords */\n};\n\nstruct kdnode {\n\tdouble *pos;\n\tint dir;\n\tvoid *data;\n\n\tstruct kdnode *left, *right;\t/* negative/positive side */\n};\n\nstruct res_node {\n\tstruct kdnode *item;\n\tdouble dist_sq;\n\tstruct res_node *next;\n};\n\nstruct kdtree {\n\tint dim;\n\tstruct kdnode *root;\n\tstruct kdhyperrect *rect;\n\tvoid (*destr)(void*);\n};\n\nstruct kdres {\n\tstruct kdtree *tree;\n\tstruct res_node *rlist, *riter;\n\tint size;\n};\n\n#define SQ(x)\t\t\t((x) * (x))\n\n\nstatic void clear_rec(struct kdnode *node, void (*destr)(void*));\nstatic int insert_rec(struct kdnode **node, const double *pos, void *data, int dir, int dim);\nstatic int rlist_insert(struct res_node *list, struct kdnode *item, double dist_sq);\nstatic void clear_results(struct kdres *set);\n\nstatic struct kdhyperrect* hyperrect_create(int dim, const double *min, const double *max);\nstatic void hyperrect_free(struct kdhyperrect *rect);\nstatic struct kdhyperrect* hyperrect_duplicate(const struct kdhyperrect *rect);\nstatic void hyperrect_extend(struct kdhyperrect *rect, const double *pos);\nstatic double hyperrect_dist_sq(struct kdhyperrect *rect, const double *pos);\n\n#ifdef USE_LIST_NODE_ALLOCATOR\nstatic struct res_node *alloc_resnode(void);\nstatic void free_resnode(struct res_node*);\n#else\n#define alloc_resnode()\t\tmalloc(sizeof(struct res_node))\n#define free_resnode(n)\t\tfree(n)\n#endif\n\n\n\nstruct kdtree *kd_create(int k)\n{\n\tstruct kdtree *tree;\n\n\tif(!(tree = malloc(sizeof *tree))) {\n\t\treturn 0;\n\t}\n\n\ttree->dim = k;\n\ttree->root = 0;\n\ttree->destr = 0;\n\ttree->rect = 0;\n\n\treturn tree;\n}\n\nvoid kd_free(struct kdtree *tree)\n{\n\tif(tree) {\n\t\tkd_clear(tree);\n\t\tfree(tree);\n\t}\n}\n\nstatic void clear_rec(struct kdnode *node, void (*destr)(void*))\n{\n\tif(!node) return;\n\n\tclear_rec(node->left, destr);\n\tclear_rec(node->right, destr);\n\t\n\tif(destr) {\n\t\tdestr(node->data);\n\t}\n\tfree(node->pos);\n\tfree(node);\n}\n\nvoid kd_clear(struct kdtree *tree)\n{\n\tclear_rec(tree->root, tree->destr);\n\ttree->root = 0;\n\n\tif (tree->rect) {\n\t\thyperrect_free(tree->rect);\n\t\ttree->rect = 0;\n\t}\n}\n\nvoid kd_data_destructor(struct kdtree *tree, void (*destr)(void*))\n{\n\ttree->destr = destr;\n}\n\n\nstatic int insert_rec(struct kdnode **nptr, const double *pos, void *data, int dir, int dim)\n{\n\tint new_dir;\n\tstruct kdnode *node;\n\n\tif(!*nptr) {\n\t\tif(!(node = malloc(sizeof *node))) {\n\t\t\treturn -1;\n\t\t}\n\t\tif(!(node->pos = malloc(dim * sizeof *node->pos))) {\n\t\t\tfree(node);\n\t\t\treturn -1;\n\t\t}\n\t\tmemcpy(node->pos, pos, dim * sizeof *node->pos);\n\t\tnode->data = data;\n\t\tnode->dir = dir;\n\t\tnode->left = node->right = 0;\n\t\t*nptr = node;\n\t\treturn 0;\n\t}\n\n\tnode = *nptr;\n\tnew_dir = (node->dir + 1) % dim;\n\tif(pos[node->dir] < node->pos[node->dir]) {\n\t\treturn insert_rec(&(*nptr)->left, pos, data, new_dir, dim);\n\t}\n\treturn insert_rec(&(*nptr)->right, pos, data, new_dir, dim);\n}\n\nint kd_insert(struct kdtree *tree, const double *pos, void *data)\n{\n\tif (insert_rec(&tree->root, pos, data, 0, tree->dim)) {\n\t\treturn -1;\n\t}\n\n\tif (tree->rect == 0) {\n\t\ttree->rect = hyperrect_create(tree->dim, pos, pos);\n\t} else {\n\t\thyperrect_extend(tree->rect, pos);\n\t}\n\n\treturn 0;\n}\n\nint kd_insertf(struct kdtree *tree, const float *pos, void *data)\n{\n\tstatic double sbuf[16];\n\tdouble *bptr, *buf = 0;\n\tint res, dim = tree->dim;\n\n\tif(dim > 16) {\n#ifndef NO_ALLOCA\n\t\tif(dim <= 256)\n\t\t\tbptr = buf = alloca(dim * sizeof *bptr);\n\t\telse\n#endif\n\t\t\tif(!(bptr = buf = malloc(dim * sizeof *bptr))) {\n\t\t\t\treturn -1;\n\t\t\t}\n\t} else {\n\t\tbptr = sbuf;\n\t}\n\n\twhile(dim-- > 0) {\n\t\t*bptr++ = *pos++;\n\t}\n\n\tres = kd_insert(tree, buf, data);\n#ifndef NO_ALLOCA\n\tif(tree->dim > 256)\n#else\n\tif(tree->dim > 16)\n#endif\n\t\tfree(buf);\n\treturn res;\n}\n\nint kd_insert3(struct kdtree *tree, double x, double y, double z, void *data)\n{\n\tdouble buf[3];\n\tbuf[0] = x;\n\tbuf[1] = y;\n\tbuf[2] = z;\n\treturn kd_insert(tree, buf, data);\n}\n\nint kd_insert3f(struct kdtree *tree, float x, float y, float z, void *data)\n{\n\tdouble buf[3];\n\tbuf[0] = x;\n\tbuf[1] = y;\n\tbuf[2] = z;\n\treturn kd_insert(tree, buf, data);\n}\n\nstatic int find_nearest(struct kdnode *node, const double *pos, double range, struct res_node *list, int ordered, int dim)\n{\n\tdouble dist_sq, dx;\n\tint i, ret, added_res = 0;\n\n\tif(!node) return 0;\n\n\tdist_sq = 0;\n\tfor(i=0; i<dim; i++) {\n\t\tdist_sq += SQ(node->pos[i] - pos[i]);\n\t}\n\tif(dist_sq <= SQ(range)) {\n\t\tif(rlist_insert(list, node, ordered ? dist_sq : -1.0) == -1) {\n\t\t\treturn -1;\n\t\t}\n\t\tadded_res = 1;\n\t}\n\n\tdx = pos[node->dir] - node->pos[node->dir];\n\n\tret = find_nearest(dx <= 0.0 ? node->left : node->right, pos, range, list, ordered, dim);\n\tif(ret >= 0 && fabs(dx) < range) {\n\t\tadded_res += ret;\n\t\tret = find_nearest(dx <= 0.0 ? node->right : node->left, pos, range, list, ordered, dim);\n\t}\n\tif(ret == -1) {\n\t\treturn -1;\n\t}\n\tadded_res += ret;\n\n\treturn added_res;\n}\n\nstatic void kd_nearest_i(struct kdnode *node, const double *pos, struct kdnode **result, double *result_dist_sq, struct kdhyperrect* rect)\n{\n\tint dir = node->dir;\n\tint i, side;\n\tdouble dummy, dist_sq;\n\tstruct kdnode *nearer_subtree, *farther_subtree;\n\tdouble *nearer_hyperrect_coord, *farther_hyperrect_coord;\n\n\t/* Decide whether to go left or right in the tree */\n\tdummy = pos[dir] - node->pos[dir];\n\tif (dummy <= 0) {\n\t\tnearer_subtree = node->left;\n\t\tfarther_subtree = node->right;\n\t\tnearer_hyperrect_coord = rect->max + dir;\n\t\tfarther_hyperrect_coord = rect->min + dir;\n\t\tside = 0;\n\t} else {\n\t\tnearer_subtree = node->right;\n\t\tfarther_subtree = node->left;\n\t\tnearer_hyperrect_coord = rect->min + dir;\n\t\tfarther_hyperrect_coord = rect->max + dir;\n\t\tside = 1;\n\t}\n\n\tif (nearer_subtree) {\n\t\t/* Slice the hyperrect to get the hyperrect of the nearer subtree */\n\t\tdummy = *nearer_hyperrect_coord;\n\t\t*nearer_hyperrect_coord = node->pos[dir];\n\t\t/* Recurse down into nearer subtree */\n\t\tkd_nearest_i(nearer_subtree, pos, result, result_dist_sq, rect);\n\t\t/* Undo the slice */\n\t\t*nearer_hyperrect_coord = dummy;\n\t}\n\n\t/* Check the distance of the point at the current node, compare it\n\t * with our best so far */\n\tdist_sq = 0;\n\tfor(i=0; i < rect->dim; i++) {\n\t\tdist_sq += SQ(node->pos[i] - pos[i]);\n\t}\n\tif (dist_sq < *result_dist_sq) {\n\t\t*result = node;\n\t\t*result_dist_sq = dist_sq;\n\t}\n\n\tif (farther_subtree) {\n\t\t/* Get the hyperrect of the farther subtree */\n\t\tdummy = *farther_hyperrect_coord;\n\t\t*farther_hyperrect_coord = node->pos[dir];\n\t\t/* Check if we have to recurse down by calculating the closest\n\t\t * point of the hyperrect and see if it's closer than our\n\t\t * minimum distance in result_dist_sq. */\n\t\tif (hyperrect_dist_sq(rect, pos) < *result_dist_sq) {\n\t\t\t/* Recurse down into farther subtree */\n\t\t\tkd_nearest_i(farther_subtree, pos, result, result_dist_sq, rect);\n\t\t}\n\t\t/* Undo the slice on the hyperrect */\n\t\t*farther_hyperrect_coord = dummy;\n\t}\n}\n\nstruct kdres *kd_nearest(struct kdtree *kd, const double *pos)\n{\n\tstruct kdhyperrect *rect;\n\tstruct kdnode *result;\n\tstruct kdres *rset;\n\tdouble dist_sq;\n\tint i;\n\n\tif (!kd) return 0;\n\tif (!kd->rect) return 0;\n\n\t/* Allocate result set */\n\tif(!(rset = malloc(sizeof *rset))) {\n\t\treturn 0;\n\t}\n\tif(!(rset->rlist = alloc_resnode())) {\n\t\tfree(rset);\n\t\treturn 0;\n\t}\n\trset->rlist->next = 0;\n\trset->tree = kd;\n\n\t/* Duplicate the bounding hyperrectangle, we will work on the copy */\n\tif (!(rect = hyperrect_duplicate(kd->rect))) {\n\t\tkd_res_free(rset);\n\t\treturn 0;\n\t}\n\n\t/* Our first guesstimate is the root node */\n\tresult = kd->root;\n\tdist_sq = 0;\n\tfor (i = 0; i < kd->dim; i++)\n\t\tdist_sq += SQ(result->pos[i] - pos[i]);\n\n\t/* Search for the nearest neighbour recursively */\n\tkd_nearest_i(kd->root, pos, &result, &dist_sq, rect);\n\n\t/* Free the copy of the hyperrect */\n\thyperrect_free(rect);\n\n\t/* Store the result */\n\tif (result) {\n\t\tif (rlist_insert(rset->rlist, result, -1.0) == -1) {\n\t\t\tkd_res_free(rset);\n\t\t\treturn 0;\n\t\t}\n\t\trset->size = 1;\n\t\tkd_res_rewind(rset);\n\t\treturn rset;\n\t} else {\n\t\tkd_res_free(rset);\n\t\treturn 0;\n\t}\n}\n\nstruct kdres *kd_nearestf(struct kdtree *tree, const float *pos)\n{\n\tstatic double sbuf[16];\n\tdouble *bptr, *buf = 0;\n\tint dim = tree->dim;\n\tstruct kdres *res;\n\n\tif(dim > 16) {\n#ifndef NO_ALLOCA\n\t\tif(dim <= 256)\n\t\t\tbptr = buf = alloca(dim * sizeof *bptr);\n\t\telse\n#endif\n\t\t\tif(!(bptr = buf = malloc(dim * sizeof *bptr))) {\n\t\t\t\treturn 0;\n\t\t\t}\n\t} else {\n\t\tbptr = sbuf;\n\t}\n\n\twhile(dim-- > 0) {\n\t\t*bptr++ = *pos++;\n\t}\n\n\tres = kd_nearest(tree, buf);\n#ifndef NO_ALLOCA\n\tif(tree->dim > 256)\n#else\n\tif(tree->dim > 16)\n#endif\n\t\tfree(buf);\n\treturn res;\n}\n\nstruct kdres *kd_nearest3(struct kdtree *tree, double x, double y, double z)\n{\n\tdouble pos[3];\n\tpos[0] = x;\n\tpos[1] = y;\n\tpos[2] = z;\n\treturn kd_nearest(tree, pos);\n}\n\nstruct kdres *kd_nearest3f(struct kdtree *tree, float x, float y, float z)\n{\n\tdouble pos[3];\n\tpos[0] = x;\n\tpos[1] = y;\n\tpos[2] = z;\n\treturn kd_nearest(tree, pos);\n}\n\nstruct kdres *kd_nearest_range(struct kdtree *kd, const double *pos, double range)\n{\n\tint ret;\n\tstruct kdres *rset;\n\n\tif(!(rset = malloc(sizeof *rset))) {\n\t\treturn 0;\n\t}\n\tif(!(rset->rlist = alloc_resnode())) {\n\t\tfree(rset);\n\t\treturn 0;\n\t}\n\trset->rlist->next = 0;\n\trset->tree = kd;\n\n\tif((ret = find_nearest(kd->root, pos, range, rset->rlist, 0, kd->dim)) == -1) {\n\t\tkd_res_free(rset);\n\t\treturn 0;\n\t}\n\trset->size = ret;\n\tkd_res_rewind(rset);\n\treturn rset;\n}\n\nstruct kdres *kd_nearest_rangef(struct kdtree *kd, const float *pos, float range)\n{\n\tstatic double sbuf[16];\n\tdouble *bptr, *buf = 0;\n\tint dim = kd->dim;\n\tstruct kdres *res;\n\n\tif(dim > 16) {\n#ifndef NO_ALLOCA\n\t\tif(dim <= 256)\n\t\t\tbptr = buf = alloca(dim * sizeof *bptr);\n\t\telse\n#endif\n\t\t\tif(!(bptr = buf = malloc(dim * sizeof *bptr))) {\n\t\t\t\treturn 0;\n\t\t\t}\n\t} else {\n\t\tbptr = sbuf;\n\t}\n\n\twhile(dim-- > 0) {\n\t\t*bptr++ = *pos++;\n\t}\n\n\tres = kd_nearest_range(kd, buf, range);\n#ifndef NO_ALLOCA\n\tif(kd->dim > 256)\n#else\n\tif(kd->dim > 16)\n#endif\n\t\tfree(buf);\n\treturn res;\n}\n\nstruct kdres *kd_nearest_range3(struct kdtree *tree, double x, double y, double z, double range)\n{\n\tdouble buf[3];\n\tbuf[0] = x;\n\tbuf[1] = y;\n\tbuf[2] = z;\n\treturn kd_nearest_range(tree, buf, range);\n}\n\nstruct kdres *kd_nearest_range3f(struct kdtree *tree, float x, float y, float z, float range)\n{\n\tdouble buf[3];\n\tbuf[0] = x;\n\tbuf[1] = y;\n\tbuf[2] = z;\n\treturn kd_nearest_range(tree, buf, range);\n}\n\nvoid kd_res_free(struct kdres *rset)\n{\n\tclear_results(rset);\n\tfree_resnode(rset->rlist);\n\tfree(rset);\n}\n\nint kd_res_size(struct kdres *set)\n{\n\treturn (set->size);\n}\n\nvoid kd_res_rewind(struct kdres *rset)\n{\n\trset->riter = rset->rlist->next;\n}\n\nint kd_res_end(struct kdres *rset)\n{\n\treturn rset->riter == 0;\n}\n\nint kd_res_next(struct kdres *rset)\n{\n\trset->riter = rset->riter->next;\n\treturn rset->riter != 0;\n}\n\nvoid *kd_res_item(struct kdres *rset, double *pos)\n{\n\tif(rset->riter) {\n\t\tif(pos) {\n\t\t\tmemcpy(pos, rset->riter->item->pos, rset->tree->dim * sizeof *pos);\n\t\t}\n\t\treturn rset->riter->item->data;\n\t}\n\treturn 0;\n}\n\nvoid *kd_res_itemf(struct kdres *rset, float *pos)\n{\n\tif(rset->riter) {\n\t\tif(pos) {\n\t\t\tint i;\n\t\t\tfor(i=0; i<rset->tree->dim; i++) {\n\t\t\t\tpos[i] = rset->riter->item->pos[i];\n\t\t\t}\n\t\t}\n\t\treturn rset->riter->item->data;\n\t}\n\treturn 0;\n}\n\nvoid *kd_res_item3(struct kdres *rset, double *x, double *y, double *z)\n{\n\tif(rset->riter) {\n\t\tif(*x) *x = rset->riter->item->pos[0];\n\t\tif(*y) *y = rset->riter->item->pos[1];\n\t\tif(*z) *z = rset->riter->item->pos[2];\n\t}\n\treturn 0;\n}\n\nvoid *kd_res_item3f(struct kdres *rset, float *x, float *y, float *z)\n{\n\tif(rset->riter) {\n\t\tif(*x) *x = rset->riter->item->pos[0];\n\t\tif(*y) *y = rset->riter->item->pos[1];\n\t\tif(*z) *z = rset->riter->item->pos[2];\n\t}\n\treturn 0;\n}\n\nvoid *kd_res_item_data(struct kdres *set)\n{\n\treturn kd_res_item(set, 0);\n}\n\n/* ---- hyperrectangle helpers ---- */\nstatic struct kdhyperrect* hyperrect_create(int dim, const double *min, const double *max)\n{\n\tsize_t size = dim * sizeof(double);\n\tstruct kdhyperrect* rect = 0;\n\n\tif (!(rect = malloc(sizeof(struct kdhyperrect)))) {\n\t\treturn 0;\n\t}\n\n\trect->dim = dim;\n\tif (!(rect->min = malloc(size))) {\n\t\tfree(rect);\n\t\treturn 0;\n\t}\n\tif (!(rect->max = malloc(size))) {\n\t\tfree(rect->min);\n\t\tfree(rect);\n\t\treturn 0;\n\t}\n\tmemcpy(rect->min, min, size);\n\tmemcpy(rect->max, max, size);\n\n\treturn rect;\n}\n\nstatic void hyperrect_free(struct kdhyperrect *rect)\n{\n\tfree(rect->min);\n\tfree(rect->max);\n\tfree(rect);\n}\n\nstatic struct kdhyperrect* hyperrect_duplicate(const struct kdhyperrect *rect)\n{\n\treturn hyperrect_create(rect->dim, rect->min, rect->max);\n}\n\nstatic void hyperrect_extend(struct kdhyperrect *rect, const double *pos)\n{\n\tint i;\n\n\tfor (i=0; i < rect->dim; i++) {\n\t\tif (pos[i] < rect->min[i]) {\n\t\t\trect->min[i] = pos[i];\n\t\t}\n\t\tif (pos[i] > rect->max[i]) {\n\t\t\trect->max[i] = pos[i];\n\t\t}\n\t}\n}\n\nstatic double hyperrect_dist_sq(struct kdhyperrect *rect, const double *pos)\n{\n\tint i;\n\tdouble result = 0;\n\n\tfor (i=0; i < rect->dim; i++) {\n\t\tif (pos[i] < rect->min[i]) {\n\t\t\tresult += SQ(rect->min[i] - pos[i]);\n\t\t} else if (pos[i] > rect->max[i]) {\n\t\t\tresult += SQ(rect->max[i] - pos[i]);\n\t\t}\n\t}\n\n\treturn result;\n}\n\n/* ---- static helpers ---- */\n\n#ifdef USE_LIST_NODE_ALLOCATOR\n/* special list node allocators. */\nstatic struct res_node *free_nodes;\n\n#ifndef NO_PTHREADS\nstatic pthread_mutex_t alloc_mutex = PTHREAD_MUTEX_INITIALIZER;\n#endif\n\nstatic struct res_node *alloc_resnode(void)\n{\n\tstruct res_node *node;\n\n#ifndef NO_PTHREADS\n\tpthread_mutex_lock(&alloc_mutex);\n#endif\n\n\tif(!free_nodes) {\n\t\tnode = malloc(sizeof *node);\n\t} else {\n\t\tnode = free_nodes;\n\t\tfree_nodes = free_nodes->next;\n\t\tnode->next = 0;\n\t}\n\n#ifndef NO_PTHREADS\n\tpthread_mutex_unlock(&alloc_mutex);\n#endif\n\n\treturn node;\n}\n\nstatic void free_resnode(struct res_node *node)\n{\n#ifndef NO_PTHREADS\n\tpthread_mutex_lock(&alloc_mutex);\n#endif\n\n\tnode->next = free_nodes;\n\tfree_nodes = node;\n\n#ifndef NO_PTHREADS\n\tpthread_mutex_unlock(&alloc_mutex);\n#endif\n}\n#endif\t/* list node allocator or not */\n\n\n/* inserts the item. if dist_sq is >= 0, then do an ordered insert */\nstatic int rlist_insert(struct res_node *list, struct kdnode *item, double dist_sq)\n{\n\tstruct res_node *rnode;\n\n\tif(!(rnode = alloc_resnode())) {\n\t\treturn -1;\n\t}\n\trnode->item = item;\n\trnode->dist_sq = dist_sq;\n\n\tif(dist_sq >= 0.0) {\n\t\twhile(list->next && list->next->dist_sq < dist_sq) {\n\t\t\tlist = list->next;\n\t\t}\n\t}\n\trnode->next = list->next;\n\tlist->next = rnode;\n\treturn 0;\n}\n\nstatic void clear_results(struct kdres *rset)\n{\n\tstruct res_node *tmp, *node = rset->rlist->next;\n\n\twhile(node) {\n\t\ttmp = node;\n\t\tnode = node->next;\n\t\tfree_resnode(tmp);\n\t}\n\n\trset->rlist->next = 0;\n}\n"
  },
  {
    "path": "data_generation/rrtstar/src/kdtree.h",
    "content": "/*\nThis file is part of ``kdtree'', a library for working with kd-trees.\nCopyright (C) 2007-2009 John Tsiombikas <nuclear@siggraph.org>\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n3. The name of the author may not be used to endorse or promote products\n   derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED\nWARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\nEVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\nOF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\nIN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY\nOF SUCH DAMAGE.\n*/\n#ifndef _KDTREE_H_\n#define _KDTREE_H_\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nstruct kdtree;\nstruct kdres;\n\n\n/* create a kd-tree for \"k\"-dimensional data */\nstruct kdtree *kd_create(int k);\n\n/* free the struct kdtree */\nvoid kd_free(struct kdtree *tree);\n\n/* remove all the elements from the tree */\nvoid kd_clear(struct kdtree *tree);\n\n/* if called with non-null 2nd argument, the function provided\n * will be called on data pointers (see kd_insert) when nodes\n * are to be removed from the tree.\n */\nvoid kd_data_destructor(struct kdtree *tree, void (*destr)(void*));\n\n/* insert a node, specifying its position, and optional data */\nint kd_insert(struct kdtree *tree, const double *pos, void *data);\nint kd_insertf(struct kdtree *tree, const float *pos, void *data);\nint kd_insert3(struct kdtree *tree, double x, double y, double z, void *data);\nint kd_insert3f(struct kdtree *tree, float x, float y, float z, void *data);\n\n/* Find one of the nearest nodes from the specified point.\n *\n * This function returns a pointer to a result set with at most one element.\n */\nstruct kdres *kd_nearest(struct kdtree *tree, const double *pos);\nstruct kdres *kd_nearestf(struct kdtree *tree, const float *pos);\nstruct kdres *kd_nearest3(struct kdtree *tree, double x, double y, double z);\nstruct kdres *kd_nearest3f(struct kdtree *tree, float x, float y, float z);\n\n/* Find any nearest nodes from the specified point within a range.\n *\n * This function returns a pointer to a result set, which can be manipulated\n * by the kd_res_* functions.\n * The returned pointer can be null as an indication of an error. Otherwise\n * a valid result set is always returned which may contain 0 or more elements.\n * The result set must be deallocated with kd_res_free, after use.\n */\nstruct kdres *kd_nearest_range(struct kdtree *tree, const double *pos, double range);\nstruct kdres *kd_nearest_rangef(struct kdtree *tree, const float *pos, float range);\nstruct kdres *kd_nearest_range3(struct kdtree *tree, double x, double y, double z, double range);\nstruct kdres *kd_nearest_range3f(struct kdtree *tree, float x, float y, float z, float range);\n\n/* frees a result set returned by kd_nearest_range() */\nvoid kd_res_free(struct kdres *set);\n\n/* returns the size of the result set (in elements) */\nint kd_res_size(struct kdres *set);\n\n/* rewinds the result set iterator */\nvoid kd_res_rewind(struct kdres *set);\n\n/* returns non-zero if the set iterator reached the end after the last element */\nint kd_res_end(struct kdres *set);\n\n/* advances the result set iterator, returns non-zero on success, zero if\n * there are no more elements in the result set.\n */\nint kd_res_next(struct kdres *set);\n\n/* returns the data pointer (can be null) of the current result set item\n * and optionally sets its position to the pointers(s) if not null.\n */\nvoid *kd_res_item(struct kdres *set, double *pos);\nvoid *kd_res_itemf(struct kdres *set, float *pos);\nvoid *kd_res_item3(struct kdres *set, double *x, double *y, double *z);\nvoid *kd_res_item3f(struct kdres *set, float *x, float *y, float *z);\n\n/* equivalent to kd_res_item(set, 0) */\nvoid *kd_res_item_data(struct kdres *set);\n\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\t/* _KDTREE_H_ */\n"
  },
  {
    "path": "data_generation/rrtstar/src/rrts.h",
    "content": "/*! \n * \\file rrts.h \n */ \n\n#ifndef __RRTS_H_\n#define __RRTS_H_\n\n\n#include \"kdtree.h\"\n\n#include <list>\n#include <set>\n#include <vector>\n\n\n\nnamespace RRTstar {\n\n\n    template<class State, class Trajectory, class System>\n    class Planner;\n\n\n    /*!\n     * \\brief RRT* Vertex class\n     *\n     * More elaborate description\n     */\n    template<class State, class Trajectory, class System>\n    class Vertex {\n            public:\n        Vertex *parent;\n        State *state;\n        std::set<Vertex*> children;\n        double costFromParent;\n        double costFromRoot;\n        Trajectory *trajFromParent;\n    \n\n    public:\n    \n        /*!\n         * \\brief Vertex constructor\n         *\n         * More elaborate description\n         */\n        Vertex ();\n\n        /*!\n         * \\brief Vertex destructor\n         *\n         * More elaborate description\n         */\n        ~Vertex ();    \n        \n        /*!\n         * \\brief Vertex copy constructor\n         *\n         * More elaborate description\n         * \n         * \\param vertexIn A reference to the vertex to be copied.\n         *\n         */\n        Vertex (const Vertex &vertexIn);\n\n        /*!\n         * \\brief Returns a reference to the state\n         *\n         * More elaborate description\n         */\n        State& getState () {return *state;}\n        \n        /*!\n         * \\brief Returns a reference to the state (constant)\n         *\n         * More elaborate description\n         */\n        State& getState () const {return *state;}\n        \n        /*!\n         * \\brief Returns a reference to the parent vertex\n         *\n         * More elaborate description\n         */\n        Vertex& getParent () {return *parent;}\n        \n        /*!\n         * \\brief Returns the accumulated cost at this vertex\n         *\n         * More elaborate description\n         */\n        double getCost () {return costFromRoot;}\n    \n        friend class Planner<State,Trajectory,System>;\n    };\n\n    \n    /*!\n     * \\brief RRT* Planner class\n     *\n     * More elaborate description\n     */\n    template<class State, class Trajectory, class System>\n    class Planner {\n\n\n        typedef struct kdtree KdTree;\n        typedef struct kdres KdRes;\n        typedef Vertex<State,Trajectory,System> vertex_t;\n    \n        int numDimensions;\n        \n        \n        double gamma;\n\n        \n        double lowerBoundCost;\n        vertex_t *lowerBoundVertex;\n        KdTree *kdtree;\n        \n        vertex_t *root;\n        \n        int insertIntoKdtree (vertex_t &vertexIn);\n        \n        int getNearestVertex (State& stateIn, vertex_t*& vertexPointerOut);    \n        int getNearVertices (State& stateIn, std::vector<vertex_t*>& vectorNearVerticesOut);\n        \n        int checkUpdateBestVertex (vertex_t& vertexIn);\n        \n        vertex_t* insertTrajectory (vertex_t& vertexStartIn, Trajectory& trajectoryIn);    \n        int insertTrajectory (vertex_t& vertexStartIn, Trajectory& trajectoryIn, vertex_t& vertexEndIn);\n        \n        int findBestParent (State& stateIn, std::vector<vertex_t*>& vectorNearVerticesIn,\n                            vertex_t*& vertexBestOut, Trajectory& trajectoryOut, bool& exactConnection);\n    \n        int updateBranchCost (vertex_t& vertexIn, int depth);    \n        int rewireVertices (vertex_t& vertexNew, std::vector<vertex_t*>& vectorNearVertices); \n\n    \n    public:\n    \n        /*!\n         * \\brief A list of all the vertices\n         *\n         * More elaborate description\n         */\n        std::list<vertex_t*> listVertices;\n      \n        \n        /*!\n         * \\brief Number of vertices in the list\n         *\n         * More elaborate description\n         */\n        int numVertices;\n    \n        \n        /*!\n         * \\brief A pointer to the system class\n         *\n         * More elaborate description\n         */\n        System *system;\n    \n        \n        /*!\n         * \\brief Planner constructor\n         *\n         * More elaborate description\n         */\n        Planner ();\n        \n        \n        /*!\n         * \\brief Planner destructor\n         *\n         * More elaborate description\n         */\n        ~Planner ();\n\n        \n        /*!\n         * \\brief Sets the gamma constant of the RRT*\n         *\n         * More elaborate description\n         *\n         * \\param gammaIn The new value of the gamma parameter \n         *\n         */\n        int setGamma (double gammaIn);\n        \n        \n        /*!\n         * \\brief Sets the dynamical system used in the RRT* trajectory generation\n         *\n         * More elaborate description\n         *\n         * \\param system A reference to the new dynamical system\n         *\n         */\n        int setSystem (System& system);\n        \n        \n        /*!\n         * \\brief Returns a reference to the root vertex\n         *\n         * More elaborate description\n         */\n        vertex_t& getRootVertex ();\n        \n        \n        /*!\n         * \\brief Initializes the RRT* algorithm\n         *\n         * More elaborate description\n         */\n        int initialize ();\n\n        /*!\n         * \\brief Executes one iteration of the RRT* algorithm\n         *\n         * More elaborate description\n         */\n        int iteration (double (&node)[2],double px, double py);\n\n        /*!\n         * \\brief Returns the cost of the best vertex in the RRT*\n         *\n         * More elaborate description\n         */\n        double getBestVertexCost () {return lowerBoundCost;}\n        \n        /*!\n         * \\brief Returns a reference to the best vertex in the RRT*\n         *\n         * More elaborate description\n         */\n        vertex_t& getBestVertex () {return *lowerBoundVertex;}\n        \n        /*!\n         * \\brief Returns the best trajectory as a list of double arrays\n         *\n         * More elaborate description\n         *\n         * \\param trajectory The trajectory that contains the best trajectory as a \n         *                   list of double arrays of dimension system->getNumDimensions()\n         *                   \n         *\n         */\n        int getBestTrajectory (std::list<double*>& trajectory);\n    };\n\n}\n\n#endif\n"
  },
  {
    "path": "data_generation/rrtstar/src/rrts.hpp",
    "content": "/*! \n * \\file rrts.hpp \n */ \n\n#ifndef __RRTS_HPP_\n#define __RRTS_HPP_\n\n#include <iostream>\n#include <cfloat>\n#include <cmath>\n#include <algorithm>\n\n\n#include \"rrts.h\"\n\nusing namespace std;\n\ntemplate<class State, class Trajectory, class System>\nRRTstar::Vertex<State, Trajectory, System>\n::Vertex () {\n    \n    state = NULL;\n    parent = NULL;\n    trajFromParent = NULL;\n    \n    costFromParent = 0.0;\n    costFromRoot = 0.0;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nRRTstar::Vertex<State, Trajectory, System>\n::~Vertex () {\n    \n    if (state)\n        delete state;\n    parent = NULL;\n    if (trajFromParent)\n        delete trajFromParent;\n    children.clear();\n}\n\n\ntemplate<class State, class Trajectory, class System>\nRRTstar::Vertex<State, Trajectory, System>\n::Vertex(const Vertex<State, Trajectory, System>& vertexIn) {\n    \n    if (vertexIn.state)\n        state = new State (vertexIn.getState());\n    else \n        state = NULL;\n    parent = vertexIn.parent;\n    for (typename std::set< Vertex<State,Trajectory,System> * >::const_iterator iter = vertexIn.children.begin(); iter != vertexIn.children.end(); iter++)\n        children.insert (*iter);\n    costFromParent = vertexIn.costFromParent;\n    costFromRoot = vertexIn.costFromRoot;\n    if (vertexIn.trajFromParent)\n        trajFromParent = new Trajectory (*(vertexIn.trajFromParent));\n    else \n        trajFromParent = NULL;\n}\n\n\n// int Vertex::setState (const State &stateIn) {\n//   *state = stateIn;\n//   return 1;\n// }\n\n\ntemplate<class State, class Trajectory, class System>\nRRTstar::Planner<State, Trajectory, System>\n::Planner () {\n    \n    gamma = 1.0;\n    \n    lowerBoundCost = DBL_MAX;\n    lowerBoundVertex = NULL;\n    \n    kdtree = NULL; \n    \n    root = NULL;\n    \n    numVertices = 0;\n    \n    system = NULL;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nRRTstar::Planner<State, Trajectory, System>\n::~Planner () {\n    \n    // Delete the kdtree structure\n    if (kdtree) {\n        kd_clear (kdtree);\n        kd_free (kdtree);\n    }\n    \n    // Delete all the vertices\n    for (typename std::list<Vertex <State,Trajectory,System> * >::iterator iter = listVertices.begin(); iter != listVertices.end(); iter++) \n        delete *iter;\n    \n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::insertIntoKdtree (Vertex<State,Trajectory,System>& vertexIn) {\n    \n    double *stateKey = new double[numDimensions];\n    system->getStateKey ( *(vertexIn.state), stateKey);\n    kd_insert (kdtree, stateKey, &vertexIn);\n    delete [] stateKey;\n    \n    return 1;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State,Trajectory,System>\n::getNearestVertex (State& stateIn, Vertex<State,Trajectory,System>*& vertexPointerOut) {\n    \n    // Get the state key for the query state\n    double *stateKey = new double[numDimensions];\n    system->getStateKey (stateIn, stateKey);\n    \n    // Search the kdtree for the nearest vertex\n    KdRes *kdres = kd_nearest (kdtree, stateKey);\n    if (kd_res_end (kdres))  \n        vertexPointerOut = NULL;\n    vertexPointerOut = (Vertex<State,Trajectory,System>*) kd_res_item_data (kdres);\n    \n    // Clear up the memory\n    delete [] stateKey;\n    kd_res_free (kdres);\n    \n    // Return a non-positive number if any errors\n    if (vertexPointerOut == NULL)\n        return 0;\n    \n    return 1;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::getNearVertices (State& stateIn, std::vector< Vertex<State,Trajectory,System>* >& vectorNearVerticesOut) {\n    \n    // Get the state key for the query state\n    double *stateKey = new double[numDimensions];\n    system->getStateKey (stateIn, stateKey);\n\n    // Compute the ball radius\n    double ballRadius = gamma * pow( log((double)(numVertices + 1.0))/((double)(numVertices + 1.0)), 1.0/((double)numDimensions) );\n   // cout<<\"Ball Radius\"<<ballRadius<<endl;// numVertices= points found uptil now\n    // Search kdtree for the set of near vertices\n    KdRes *kdres = kd_nearest_range (kdtree, stateKey, ballRadius);\n    delete [] stateKey;\n    \n    // Create the vector data structure for storing the results\n    int numNearVertices = kd_res_size (kdres);\n\n    if (numNearVertices == 0) {\n        vectorNearVerticesOut.clear();\n        return 1;\n    }\n    vectorNearVerticesOut.resize(numNearVertices);\n    \n    // Place pointers to the near vertices into the vector \n    int i = 0;\n    kd_res_rewind (kdres);\n    while (!kd_res_end(kdres)) {\n        Vertex<State,Trajectory,System> *vertexCurr = (Vertex<State,Trajectory,System> *) kd_res_item_data (kdres);\n        vectorNearVerticesOut[i] = vertexCurr;\n        kd_res_next (kdres);\n        i++;\n    }\n    \n    // Free temporary memory\n    kd_res_free (kdres);\n\n    return 1;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint\nRRTstar::Planner<State, Trajectory, System>\n::checkUpdateBestVertex (Vertex<State,Trajectory,System>& vertexIn) {\n    \n    if (system->isReachingTarget(vertexIn.getState())){\n        \n        \n        double costCurr = vertexIn.getCost();\n        if ( (lowerBoundVertex == NULL) || ( (lowerBoundVertex != NULL) && (costCurr < lowerBoundCost)) ) {\n\n            lowerBoundVertex = &vertexIn;\n            lowerBoundCost = costCurr;\n        }\n    }\n    \n    return 1;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nRRTstar::Vertex<State,Trajectory,System>*\nRRTstar::Planner<State, Trajectory, System>\n::insertTrajectory (Vertex<State,Trajectory,System>& vertexStartIn, Trajectory& trajectoryIn) {\n    \n    // Check for admissible cost-to-go\n    if (lowerBoundVertex != NULL) {\n        double costToGo = system->evaluateCostToGo (trajectoryIn.getEndState());\n        if (costToGo >= 0.0) \n            if (lowerBoundCost < vertexStartIn.getCost() + costToGo) \n                return NULL;\n    }\n    \n    // Create a new end vertex\n    Vertex<State,Trajectory,System>* vertexNew = new Vertex<State,Trajectory,System>;\n    vertexNew->state = new State;\n    vertexNew->parent = NULL;\n    vertexNew->getState() = trajectoryIn.getEndState();\n    insertIntoKdtree (*vertexNew);  \n    this->listVertices.push_front (vertexNew);\n    this->numVertices++;\n    \n    // Insert the trajectory between the start and end vertices\n    insertTrajectory (vertexStartIn, trajectoryIn, *vertexNew);\n    \n    return vertexNew;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::insertTrajectory (Vertex<State,Trajectory,System>& vertexStartIn, Trajectory& trajectoryIn, Vertex<State,Trajectory,System>& vertexEndIn) {\n\n    // Update the costs\n    vertexEndIn.costFromParent = trajectoryIn.evaluateCost();\n    vertexEndIn.costFromRoot = vertexStartIn.costFromRoot + vertexEndIn.costFromParent;\n    checkUpdateBestVertex (vertexEndIn);\n    \n    // Update the trajectory between the two vertices\n    if (vertexEndIn.trajFromParent)\n        delete vertexEndIn.trajFromParent;\n    vertexEndIn.trajFromParent = new Trajectory (trajectoryIn);\n    // Update the parent to the end vertex\n    if (vertexEndIn.parent)\n        vertexEndIn.parent->children.erase (&vertexEndIn);\n    vertexEndIn.parent = &vertexStartIn;\n    \n    // Add the end vertex to the set of chilren\n    vertexStartIn.children.insert (&vertexEndIn);\n    \n    return 1;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::setSystem (System& systemIn) {\n    \n    if (system)\n        delete system;\n    \n    system = &systemIn;\n    \n    numDimensions = system->getNumDimensions ();\n    \n    // Delete all the vertices\n    for (typename std::list< Vertex<State,Trajectory,System>* >::iterator iter = listVertices.begin(); iter != listVertices.end(); iter++)\n        delete *iter;\n    numVertices = 0;\n    lowerBoundCost = DBL_MAX;\n    lowerBoundVertex = NULL;\n    \n    // Clear the kdtree\n    if (kdtree) {\n        kd_clear (kdtree);\n        kd_free (kdtree);\n    }\n    kdtree = kd_create (numDimensions);\n    \n    // Initialize the root vertex\n    root = new Vertex<State,Trajectory,System>;\n    root->state = new State (system->getRootState());\n    root->costFromParent = 0.0;\n    root->costFromRoot = 0.0;\n    root->trajFromParent = NULL;\n    \n    return 1;\n}\n\n\n\ntemplate<class State, class Trajectory, class System>\nRRTstar::Vertex<State, Trajectory, System>& \nRRTstar::Planner<State, Trajectory, System>\n::getRootVertex () {\n    \n    return *root;\n}\n\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::initialize () {\n    \n    // If there is no system, then return failure\n    if (!system)\n        return 0;\n    \n    // Backup the root\n    Vertex<State,Trajectory,System> *rootBackup = NULL;\n    if (root)\n        rootBackup = new Vertex<State,Trajectory,System> (*root);\n    cout<<sizeof(Vertex<State,Trajectory,System>)<<endl;\n    // Delete all the vertices\n    for (typename std::list< Vertex<State,Trajectory,System>* >::iterator iter = listVertices.begin(); iter != listVertices.end(); iter++)\n        delete *iter;\n    listVertices.clear();\n    numVertices = 0;\n    lowerBoundCost = DBL_MAX;\n    lowerBoundVertex = NULL;\n    \n    // Clear the kdtree\n    if (kdtree) {\n        kd_clear (kdtree);\n        kd_free (kdtree);\n    }\n    kdtree = kd_create (system->getNumDimensions());\n    \n    // Initialize the variables\n    numDimensions = system->getNumDimensions();\n    root = rootBackup;\n    if (root){\n        listVertices.push_back(root);\n        insertIntoKdtree (*root);\n        numVertices++;\n    }\n    lowerBoundCost = DBL_MAX;\n    lowerBoundVertex = NULL;\n    \n    return 1;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::setGamma (double gammaIn) {\n    \n    if (gammaIn < 0.0)\n        return 0;\n    \n    gamma = gammaIn;\n    \n    return 1;\n}\n\n\n\n\ntemplate <class State,class Trajectory, class System>\nint compareVertexCostPairs (std::pair<RRTstar::Vertex<State,Trajectory,System>*,double> i, std::pair<RRTstar::Vertex<State,Trajectory,System>*,double> j) {\n    \n    return (i.second < j.second);\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::findBestParent (State& stateIn, std::vector< Vertex<State,Trajectory,System>* >& vectorNearVerticesIn, Vertex<State,Trajectory,System>*& vertexBest, Trajectory& trajectoryOut, bool& exactConnection) {\n    \n    \n    // Compute the cost of extension for each near vertex\n    int numNearVertices = vectorNearVerticesIn.size();\n\n    std::vector< std::pair<Vertex<State,Trajectory,System>*,double> > vectorVertexCostPairs(numNearVertices);\n    \n    int i = 0;\n    for (typename std::vector< Vertex<State,Trajectory,System>* >::iterator iter = vectorNearVerticesIn.begin(); iter != vectorNearVerticesIn.end(); iter++) {\n        \n        vectorVertexCostPairs[i].first = *iter;\n        exactConnection = false;\n        double trajCost = system->evaluateExtensionCost ( *((*iter)->state), stateIn, exactConnection);\n        //if(trajCost>=0)\n        vectorVertexCostPairs[i].second = (*iter)->costFromRoot + trajCost;\n        i++;\n    }\n\n    // Sort vertices according to cost\n    std::sort (vectorVertexCostPairs.begin(), vectorVertexCostPairs.end(), compareVertexCostPairs<State,Trajectory,System>);\n    \n    // Try out each extension according to increasing cost\n    i = 0;\n    bool connectionEstablished = false;\n    for (typename std::vector< std::pair<Vertex<State,Trajectory,System>*,double> >::iterator iter = vectorVertexCostPairs.begin(); \n         iter != vectorVertexCostPairs.end(); iter++) {\n        \n        Vertex<State,Trajectory,System>* vertexCurr = iter->first;\n\n        // Extend the current vertex towards stateIn (and this time check for collision with obstacles)\n        exactConnection = false;\n        if (system->extendTo(*(vertexCurr->state), stateIn, trajectoryOut, exactConnection) > 0) {\n            vertexBest = vertexCurr;\n            connectionEstablished = true;\n            break;\n\n        }\n\n    }\n\n    // Return success if a connection was established\n    if (connectionEstablished)\n        return 1;\n\n    // If the connection could not be established then return zero\n    return 0;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::updateBranchCost (Vertex<State,Trajectory,System>& vertexIn, int depth) {\n    \n    \n    // Update the cost for each children\n    for (typename std::set< Vertex<State,Trajectory,System>* >::iterator iter = vertexIn.children.begin(); iter != vertexIn.children.end(); iter++) {\n        \n        Vertex<State,Trajectory,System>& vertex = **iter;\n        \n        vertex.costFromRoot = vertexIn.costFromRoot + vertex.costFromParent;\n\n        checkUpdateBestVertex (vertex);\n        \n        updateBranchCost (vertex, depth + 1);\n    }\n    \n    \n    \n    return 1;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::rewireVertices (Vertex<State,Trajectory,System>& vertexNew, std::vector< Vertex<State,Trajectory,System>* >& vectorNearVertices) {\n    \n\n    // Repeat for all vertices in the set of near vertices\n    for (typename std::vector< Vertex<State,Trajectory,System>* >::iterator iter = vectorNearVertices.begin(); iter != vectorNearVertices.end(); iter++) {\n        \n        Vertex<State,Trajectory,System>& vertexCurr = **iter; \n        \n        // Check whether the extension results in an exact connection\n        bool exactConnection = false;\n        double costCurr = system->evaluateExtensionCost (*(vertexNew.state), *(vertexCurr.state), exactConnection);\n        if ( (exactConnection == false) || (costCurr < 0) )\n            continue;\n        \n        // Check whether the cost of the extension is smaller than current cost\n        double totalCost = vertexNew.costFromRoot + costCurr;\n\n        if (totalCost < vertexCurr.costFromRoot - 0.001) {\n            \n            // Compute the extension (checking for collision)\n            Trajectory trajectory;\n\n            if (system->extendTo (*(vertexNew.state), *(vertexCurr.state), trajectory, exactConnection) <= 0 ) \n                continue;\n\n            // Insert the new trajectory to the tree by rewiring\n            insertTrajectory (vertexNew, trajectory, vertexCurr);\n            \n            // Update the cost of all vertices in the rewired branch\n            updateBranchCost (vertexCurr, 0);\n        }\n    }\n    \n    return 1;\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::iteration (double (&node)[2],double px, double py) {\n    \nint chexk;\nint s=0;  \n//double node[2];\n    // 1. Sample a new state\n\n    State stateRandom;\n    system->sampleState (stateRandom, node,px,py);\n\n   \n    \n    \t// 2. Compute the set of all near vertices\n    std::vector< Vertex<State,Trajectory,System>* > vectorNearVertices;\n    getNearVertices (stateRandom, vectorNearVertices);\n\n\n    // 3. Find the best parent and extend from that parent\n    Vertex<State,Trajectory,System>* vertexParent = NULL;\n    Trajectory trajectory,trajectory1;\n\n    bool exactConnection = false;\n    \n    if (vectorNearVertices.size() == 0) {\n\n        // 3.a Extend the nearest\n        if (getNearestVertex (stateRandom, vertexParent) <= 0){\n\n\n        \treturn 0;\n        }\n        if (system->extendTo(vertexParent->getState(), stateRandom, trajectory, exactConnection) <= 0)\n        {\n\n        \treturn  0;\n        }\n    }\n    else\n    {\n        // 3.b Extend the best parent within the near vertices\n        if (findBestParent (stateRandom, vectorNearVertices, vertexParent, trajectory, exactConnection) <= 0) \n            return 0;\n\n    }\n    \n\n    // 3.c add the trajectory from the best parent to the tree\n    Vertex<State,Trajectory,System>* vertexNew = insertTrajectory (*vertexParent, trajectory);\n    if (vertexNew == NULL) \n        return 0;\n    \n\n    // 4. Rewire the tree  \n    if (vectorNearVertices.size() > 0) {\n        rewireVertices (*vertexNew, vectorNearVertices);\n\n    }\n\n}\n\n\ntemplate<class State, class Trajectory, class System>\nint \nRRTstar::Planner<State, Trajectory, System>\n::getBestTrajectory (std::list<double*>& trajectoryOut) {\n    \n    if (lowerBoundVertex == NULL)\n        return 0;\n    \n    Vertex<State,Trajectory,System>* vertexCurr = lowerBoundVertex;\n    \n    \n    while (vertexCurr) {\n        \n        State& stateCurr = vertexCurr->getState();\n        \n        double *stateArrCurr = new double[2]; \n        stateArrCurr[0] = stateCurr[0];\n        stateArrCurr[1] = stateCurr[1];\n        //stateArrCurr[2] = stateCurr[2];\n        \n        trajectoryOut.push_front (stateArrCurr);\n        \n        Vertex<State,Trajectory,System>& vertexParent = vertexCurr->getParent(); \n        \n        if (&vertexParent != NULL) {\n            \n            State& stateParent = vertexParent.getState();\n            \n            std::list<double*> trajectory;\n            system->getTrajectory (stateParent, stateCurr, trajectory);\n\n            trajectory.reverse ();\n            for (std::list<double*>::iterator iter = trajectory.begin(); iter != trajectory.end(); iter++) {\n                \n                double *stateArrFromParentCurr = *iter;\n                \n                stateArrCurr = new double[2];\n                stateArrCurr[0] = stateArrFromParentCurr[0];\n                stateArrCurr[1] = stateArrFromParentCurr[1];\n               //stateArrCurr[2] = stateArrFromParentCurr[2];\n                \n                trajectoryOut.push_front (stateArrCurr);\n                \n                delete [] stateArrFromParentCurr;\n            }\n        }\n        \n        vertexCurr = &vertexParent;\n    }\n\n\n    return 1;\n}\n\n\n#endif\n"
  },
  {
    "path": "data_generation/rrtstar/src/rrts_main.cpp",
    "content": "#define LIBBOT_PRESENT 0\n\n#include <iostream>\n#include <fstream>\n#include <ctime>\n\n#include <bot_core/bot_core.h>\n\n#include <lcm/lcm.h>\n\n#include <lcmtypes/lcmtypes.h>\n\n#include \"rrts.hpp\"\n#include \"system_single_integrator.h\"\n#include<list>\n#include <time.h>\n#include <string>\n#include <sstream>\n#include <sys/stat.h>\n\nusing namespace RRTstar;\nusing namespace SingleIntegrator;\n\nusing namespace std;\n\n\n//int sw=1;\n//int algo=0;\ntypedef Planner<State,Trajectory,System> planner_t;\ntypedef Vertex<State,Trajectory,System> vertex_t;\nclass obst\n{\npublic:\n\tdouble center[3];\n\tdouble size[3];\n\tdouble radius;\n};\nbool check (double* first, double* second)\n{\n    if(first[0]==second[0] && first[1]==second[1])\n    return true;\n    else\n        return false;\n}\nint size=50000;\nint publishTree (lcm_t *lcm, planner_t& planner, System& system);\nint publishPC (lcm_t *lcm, double nodes[8000][2], int sze, System& system);\nint publishTraj (lcm_t *lcm, planner_t& planner, System& system, int num, string fod);\n//lcm_t *lcm, region& regionOperating, region& regionGoal,list<region*>& obstacles\n//int publishEnvironment (lcm_t *lcm);\n\nint publishEnvironment(lcm_t *lcm, region& regionOperating, region& regionGoal, list<region*>& obstacles);\n//ofstream out(\"nodes1\", ios::out | ios::binary);\n// double nodes[50000][2];\nstring env_path=\"env\";\nmkdir(env_path.c_str(),ACCESSPERMS); // create folder with env label to store generated trajectories\n\nint main () {\n    \n    double nodes[size][2]; // nodes from obstacle-free space that will become random start-goal pairs\n\n    srand (time(0));\n    \n  \n    /*\n\t//-In order to generate random environments, we randomly sample 20 obstacles locations in the workspace, as follow:\n\t//-Orignal workspace is 40X40 but we sample locations from 30X30 space in order to avoid obstacles going out of workspace boundry.\n\t////////////////////////////////////////////////////// \n\tdouble obst[20][2];\n    for (int i=0;i<20;i++)\n    for (int j = 0; j < 2; j++)\n\t     obst[i][j] = (double)rand()/(RAND_MAX + 1.0)*30.0 \n        - 15.0 + 0.0;\n         \n    ofstream out(\"obs.dat\", ios::out | ios::binary);\n          if(!out) {\n                        cout << \"Cannot open file.\";\n                return 1;\n                }\n\n          out.write((char *) &obst, sizeof nodes);\n          out.close();\n\t//////////////////////////////////////////////////\n    */\n    // load obstacle locations \n    double fnum[20][2];\n    ifstream in(\"obs.dat\", ios::in | ios::binary);\n    in.read((char *) &fnum, sizeof fnum);\n    //We drop 7 obstacle blocks in the workspace to generate random environments using 20P7=77520 permutations. Note that we can have now 77520 different environments but we use 110 envs only\n    int perm[77520][7];\n    ifstream in2(\"obs_perm2.dat\", ios::in | ios::binary);\n    in2.read((char *) &perm, sizeof perm);\n    \n    //start and goal region\n   \n    \n   \n   int i=0;\n   for (i=0;i<1;i++){\n    \tstring env_no;          // string which will contain the result\n    \tostringstream convert2;   // stream used for the conversion\n    \tconvert2 << i;      // insert the textual representation of 'Number' in the characters in the stream\n    \tenv_no =convert2.str();\n    \tstring path=\"env/e\"+env_no;\n    \tmkdir(path.c_str(),ACCESSPERMS); // create folder with env label to store generated trajectories\n\t\t/*\n\t\tWe also generted a random set of nodes from obstacle-free space, denoted as graph. These nodes are used as start and goal pairs \n\t\t*/\n\t\tdouble fnum2[50000][2];\n\t\tpath=\"graph/graph\"+env_no+\".dat\";\n\t    ifstream in3(path.c_str(), ios::in | ios::binary);\n\t    in3.read((char *) &fnum2, sizeof fnum2);\n\t    int t=0;\n  \t\tfor (int t=0;t<100;t++){  \n\t\t\tcout<<\"t\"<<t<<endl;  \n\t\t\t \n\t\t\tplanner_t rrts;\n\t\t\n\t\t\tcout << \"RRTstar is alive\" << endl;\n\t\t\n\t\t\n\t\t\t// Get lcm\n\t\t\tlcm_t *lcm = bot_lcm_get_global (NULL);\n\t\t\t\t\n\t\t\n\t\t\t// Create the dynamical system\n\t\t\tSystem system;\n\t\t\n\t\t\t// Three dimensional configuration space\n\t\t\tsystem.setNumDimensions (2);\n\t\t\t// Define the operating region\n\t\t\tsystem.regionOperating.setNumDimensions(2);\n\t\t\tsystem.regionOperating.center[0] = 0.0;\n\t\t\tsystem.regionOperating.center[1] = 0.0;\n\t\t\tsystem.regionOperating.center[2] = 0.0;\n\t\t\tsystem.regionOperating.size[0] = 40.0;\n\t\t\tsystem.regionOperating.size[1] = 40.0;\n\t\t\tsystem.regionOperating.size[2] = 0.0;\n\t\t\t// Define the goal region\t\t\n\t\t\tsystem.regionGoal.setNumDimensions(2);\n\t\t\tsystem.regionGoal.center[0] =fnum2[t][0];\n\t\t\tsystem.regionGoal.center[1] =fnum2[t][1];\n\t\t\tsystem.regionGoal.center[2] = 0.0;// fnum2[t][2] //if 3D\n\t\t\tsystem.regionGoal.size[0] = 1.0;\n\t\t\tsystem.regionGoal.size[1] = 1.0;\n\t\t\tsystem.regionGoal.size[2] = 0.0;\n\t\t    region *obstacle,*obstacle1,*obstacle2,*obstacle3,*obstacle4,*obstacle5,*obstacle6;\n\t\t    obstacle = new region;\n\t\t    obstacle1 = new region;  \n\t\t    obstacle2 = new region;  \n\t\t    obstacle3=new region;\n\t\t    obstacle4=new region;\n\t\t    obstacle5=new region;\n\t\t    obstacle6=new region;\n\n \n\t\t\tobstacle->setNumDimensions(2);\n\t\t\tobstacle->center[0] =fnum[perm[i][0]][0];\n\t\t\tobstacle->center[1] = fnum[perm[i][0]][1];\n\t\t\tobstacle->center[2] = 0.0;\n\t\t\tobstacle->size[0] = 5.0;\n\t\t\tobstacle->size[1] = 5.0;\n\t\t\tobstacle->size[2] = 0.0;\n\t\t \n\t\t\tobstacle1->setNumDimensions(2);\n\t\t\tobstacle1->center[0] = fnum[perm[i][1]][0];\n\t\t\tobstacle1->center[1] = fnum[perm[i][1]][1];\n\t\t\tobstacle1->center[2] = 0.0;\n\t\t\tobstacle1->size[0] = 5.0;\n\t\t\tobstacle1->size[1] = 5.0;\n\t\t\tobstacle1->size[2] = 0.0;\n\n\t\t\tobstacle2->setNumDimensions(2);\n\t\t\tobstacle2->center[0] = fnum[perm[i][2]][0];\n\t\t\tobstacle2->center[1] = fnum[perm[i][2]][1];\n\t\t\tobstacle2->center[2] = 0.0;\n\t\t\tobstacle2->size[0] = 5.0;\n\t\t\tobstacle2->size[1] = 5.0;\n\t\t\tobstacle2->size[2] = 0.0;\n\t\t\n\t\t\tobstacle3->setNumDimensions(2);\n\t\t\tobstacle3->center[0] = fnum[perm[i][3]][0];\n\t\t\tobstacle3->center[1] = fnum[perm[i][3]][1];\n\t\t\tobstacle3->center[2] = 0.0;\n\t\t\tobstacle3->size[0] = 5.0;\n\t\t\tobstacle3->size[1] = 5.0;\n\t\t\tobstacle3->size[2] = 0.0;\n\t\t\n\t\t\tobstacle4->setNumDimensions(2);\n\t\t\tobstacle4->center[0] = fnum[perm[i][4]][0];\n\t\t\tobstacle4->center[1] = fnum[perm[i][4]][1];\n\t\t\tobstacle4->center[2] = 0.0;\n\t\t\tobstacle4->size[0] = 5.0;\n\t\t\tobstacle4->size[1] = 5.0;\n\t\t\tobstacle4->size[2] = 0.0;\n\n\t\t\tobstacle5->setNumDimensions(2);\n\t\t\tobstacle5->center[0] = fnum[perm[i][5]][0];\n\t\t\tobstacle5->center[1] = fnum[perm[i][5]][1];\n\t\t\tobstacle5->center[2] = 0.0;\n\t\t\tobstacle5->size[0] = 5.0;\n\t\t\tobstacle5->size[1] = 5.0;\n\t\t\tobstacle5->size[2] = 0.0;\n\t\t\n\t\t\n\t\t\tobstacle6->setNumDimensions(2);\n\t\t\tobstacle6->center[0] = fnum[perm[i][6]][0];\n\t\t\tobstacle6->center[1] = fnum[perm[i][6]][1];\n\t\t\tobstacle6->center[2] = 0.0;\n\t\t\tobstacle6->size[0] = 5.0;\n\t\t\tobstacle6->size[1] = 5.0;\n\t\t\tobstacle6->size[2] = 0.0;\n    \n\n                                        \n\t\t\tsystem.obstacles.push_front(obstacle);  // Add the obstacle to the list\n\t\t\tsystem.obstacles.push_front(obstacle1);  // Add the obstacle to the list\n\t\t\tsystem.obstacles.push_front(obstacle2);  // Add the obstacle to the list\n\t\t\tsystem.obstacles.push_front(obstacle3);  // Add the obstacle to the list\n\t\t\tsystem.obstacles.push_front(obstacle4);  // Add the obstacle to the list\n\t\t\tsystem.obstacles.push_front(obstacle5);\n\t\t\tsystem.obstacles.push_front(obstacle6);\n\n\t\t\t// publishEnvironment(lcm, system.regionOperating, system.regionGoal, system.obstacles);\n\t\t\t// Add the system to the planner\n\t\t\trrts.setSystem (system);\n\t\t\t//publishEnvironment (lcm);\n\t\t\t// Set up the root vertex\n\t\t\tvertex_t &root = rrts.getRootVertex();  \n\t\t\tState &rootState = root.getState();\n\t\t\t\n\t\t\t// Define start state\t\t\t\n\t\t\trootState[0] =fnum2[t+1][0];\n\t\t\trootState[1] =fnum2[t+1][1];\n\t\t\trootState[2] = 0.0;\n \n \n\t\t\t// Initialize the planner\n\t\t\trrts.initialize ();\n\n\t\t\t// This parameter should be larger than 1.5 for asymptotic \n\t\t\t//   optimality. Larger values will weigh on optimization \n\t\t\t//   rather than exploration in the RRT* algorithm. Lower \n\t\t\t//   values, such as 0.1, should recover the RRT.\n\t\t\trrts.setGamma (1.5);\n\n    \n    \t\tclock_t start = clock();\n    \t\tint j=0;\n\t\t\tdouble node[2];\n\n\n\t\t\t// random obstacle-free nodes generation. These nodes were generated to form random start and goal pairs.\n\t\t\t/*\n\t\t\t  int s=0;\n\t\t\twhile(j<80000)\n\t\t\t\t{\n\t\t\t  \n\t\t\t\t\t\t     rrts.iteration(node);\n\t\t\t\t\t\t         j++;\n\t\t\t\t\t\t      if (node[0]!=0 && node[1]!=0)\n\t\t\t\t\t\t      {\n\t\t\t\t\t\t         if(s<size){ \n\t\t\t\t\t\t          nodes[s][0]=node[0];\n\t\t\t\t\t\t          nodes[s][1]=node[1];\n\t\t\t\t\t\t          s++;}\n\t\t\t\t\t\t      } \n\t\t\t\t\t\t         \n\t\t\t}    \n\n\t\t\t \n\t\t\tcout<<\"s:\"<<s<<endl;\n\t\t\tstring env_no;          // string which will contain the result\n\t\t\tostringstream convert;   // stream used for the conversion\n\t\t\tconvert << i;      // insert the textual representation of 'Number' in the characters in the stream\n\t\t\tResult =convert.str();\n\t\t\t ofstream out((\"graph/graph\"+env_no+\".dat\").c_str(), ios::out | ios::binary);\n\t\t\t\t\t  if(!out) {\n\t\t\t\t\t\t            cout << \"Cannot open file.\";\n\t\t\t\t\t\t    return 1;\n\t\t\t\t\t\t    }\n\n\t\t\t\t\t  out.write((char *) &nodes, sizeof nodes);\n\t\t\t\t\t  out.close();\n\t\t\t*/\n\n\n\t\t \n\t\t // p-rrt* path generation\n\t\tdouble cost=1000;\n\t\tint k=0;\n\t\tint c=0, cp=0;\n\t\tfor (int j=0;j<=100000;j+=2000){\n\t\t\n\t\t\tint limit= 5000+j;\n\t\t\t//cout<<limit<<endl;\n\t\t\twhile(k<limit)\n\t\t\t{\n\t\t\t\t\n\t\t\t\trrts.iteration(node,-1,-1);\n\t\t\t    k++;\n\t\t\t}\n\t\t\tvertex_t & vertexBest=rrts.getBestVertex ();\n\t\t\tif(& vertexBest!=NULL)\n\t\t\t\tif (vertexBest.costFromRoot< cost){\n\t\t\t\t       cost=vertexBest.costFromRoot;\n\t\t\t\t       c++;\n\t\t\t\t    }\n\t\t\tif(cp!=c)\n\t\t\t\tcp=c;\n\t\t\telse\n\t\t\t\tbreak;\n\t\t\t\t  \n\t\t\n\t\t}\n\n\t\tcout<<\"iterations:\"<<k<<endl;\n\n  \t\t // Run the algorithm for 10000 iteartions\n\n\n\t\t// Generate obstacle point cloud for each environment to train auto encoder.\n\t\t/*\n\t\tint s=0;\n\t\t double obcloud[1400][2]; \n\t\t\tfor (list<region*>::iterator iter = system.obstacles.begin(); iter != system.obstacles.end(); iter++){\n\t\t\t\t\n\t\t\t\tregion* obstacleCurr = *iter;\n\t\t\t\t\n\t\t\t\tfor (int i=s; i< (200+s); i++){\n\t\t\t\t    for (int j = 0; j < system.getNumDimensions(); j++)\n\t\t\t\t\t    obcloud[i][j] = (double)rand()/(RAND_MAX + 1.0)*obstacleCurr->size[j] \n\t\t\t\t- obstacleCurr->size[j]/2.0 + obstacleCurr->center[j];\n\n\t\t\t\t}\n\t\t\t\tif (s< 1400)\n\t\t\t\t    s=s+200;\n\t\t\t\telse\n\t\t\t\t    break;\n\t\t\t}       \n\t\t string Result;          // string which will contain the result\n\t\tostringstream convert;   // stream used for the conversion\n\t\tconvert << i;      // insert the textual representation of 'Number' in the characters in the stream\n\t\tResult =convert.str();\n\t\t ofstream out((\"obs_cloud/obc\"+Result+\".dat\").c_str(), ios::out | ios::binary);\n\t\t\t\t  if(!out) {\n\t\t\t\t                cout << \"Cannot open file.\";\n\t\t\t\t        return 1;\n\t\t\t\t        }\n\n\t\t\t\t  out.write((char *) &obcloud, sizeof obcloud);\n\t\t\t\t  out.close();  \n\t\t */\n\n    \tclock_t finish = clock();\n    \tcout << \"Time : \" << ((double)(finish-start))/CLOCKS_PER_SEC << endl;\n\n\n    \t//publishTree (lcm, rrts, system);\n    \t// stores path in the folder env_no\n    \tpublishTraj (lcm, rrts, system,t, env_no );\n    \n   \n\n   } } \n      \n    return 1;\n}\n\n\nint publishEnvironment (lcm_t *lcm, region& regionOperating, region& regionGoal, list<region*>& obstacles) {\n    \n    // Publish the environment\n    lcmtypes_environment_t *environment = (lcmtypes_environment_t*) malloc (sizeof(lcmtypes_environment_t));\n    \n    environment->operating.center[0] = regionOperating.center[0];\n    environment->operating.center[1] = regionOperating.center[1];\n    environment->operating.center[2] = regionOperating.center[2];\n    environment->operating.size[0] = regionOperating.size[0];\n    environment->operating.size[1] = regionOperating.size[1];\n    environment->operating.size[2] = regionOperating.size[2];\n\n    environment->goal.center[0] = regionGoal.center[0];\n    environment->goal.center[1] = regionGoal.center[1];\n    environment->goal.center[2] = regionGoal.center[2];\n    environment->goal.size[0] = regionGoal.size[0];\n    environment->goal.size[1] = regionGoal.size[1];\n    environment->goal.size[2] = regionGoal.size[2];\n    \n    \n    environment->num_obstacles = obstacles.size();\n    \n    if (environment->num_obstacles > 0) \n        environment->obstacles = (lcmtypes_region_3d_t *) malloc (sizeof(lcmtypes_region_3d_t));\n    \n    int idx_obstacles = 0;\n    for (list<region*>::iterator iter = obstacles.begin(); iter != obstacles.end(); iter++){\n        \n        region* obstacleCurr = *iter;\n        \n        environment->obstacles[idx_obstacles].center[0] = obstacleCurr->center[0];\n        environment->obstacles[idx_obstacles].center[1] = obstacleCurr->center[1];\n        environment->obstacles[idx_obstacles].center[2] = obstacleCurr->center[2];\n        environment->obstacles[idx_obstacles].size[0] = obstacleCurr->size[0];\n        environment->obstacles[idx_obstacles].size[1] = obstacleCurr->size[1];\n        environment->obstacles[idx_obstacles].size[2] = obstacleCurr->size[2];\n        \n        idx_obstacles++;\n    }\n    \n    \n    lcmtypes_environment_t_publish (lcm, \"ENVIRONMENT\", environment);\n    lcmtypes_environment_t_destroy (environment);\n    \n    return 1;\n}\n\nint publishTraj (lcm_t *lcm, planner_t& planner, System& system, int num, string fod) {\n    \n    \n    cout << \"Publishing trajectory -- start\" << endl;\n\n    \n    vertex_t& vertexBest = planner.getBestVertex ();\n    \n    if (&vertexBest == NULL) {\n        cout << \"No best vertex\" << endl;\n        double path[1][2];\n        path[0][0]=0;\n        path[0][1]=0;\n        string Result;          // string which will contain the result\n    \tostringstream convert;   // stream used for the conversion\n    \tconvert << num;      // insert the textual representation of 'Number' in the characters in the stream\n    \tResult =convert.str(); // set 'Result' to the contents of the stream\n\n\n\n  \t\tofstream out((\"e\"+fod+\"/path\"+Result+\".dat\").c_str(), ios::out | ios::binary);\n  \t\tif(!out) {\n      \t\tcout << \"Cannot open file.\";\n                return 1;\n        }\n\n  \t\tout.write((char *) &path, sizeof path);\n  \t\tout.close();\n        return 0;\n    }\n\n    cout<<\"Cost From root \"<<vertexBest.costFromRoot;\n    list<double*> stateList;\n    planner.getBestTrajectory (stateList);\n    lcmtypes_trajectory_t *opttraj = (lcmtypes_trajectory_t *) malloc (sizeof (lcmtypes_trajectory_t));\n    opttraj->num_states = stateList.size();\n    opttraj->states = (lcmtypes_state_t *) malloc (opttraj->num_states * sizeof (lcmtypes_state_t));\n    int psize=(stateList.size()-1)/2+1;\n    int pindex=0;\n    double path[psize][2];\n    int stateIndex = 0;\n\n       for (list<double*>::iterator iter = stateList.begin(); iter != stateList.end(); iter++) {\n        \n        double* stateRef = *iter;\n        opttraj->states[stateIndex].x = stateRef[0];\n        opttraj->states[stateIndex].y = stateRef[1];\n        if(pindex>0){\n\n            if(path[pindex-1][0]!=stateRef[0]){\n            path[pindex][0]=stateRef[0];\n            path[pindex][1]=stateRef[1];\n            pindex++;\n            }\n        }\n        else{\n        path[pindex][0]=stateRef[0];\n        path[pindex][1]=stateRef[1];\n        pindex++;\n        }\n\n        if (system.getNumDimensions() > 2)\n            opttraj->states[stateIndex].z = stateRef[2];\n        else\n            opttraj->states[stateIndex].z = 0.0;\n        \n\n        delete [] stateRef;\n        \n        stateIndex++;\n    }\n  \tstring Result;          // string which will contain the result\n    ostringstream convert;   // stream used for the conversion\n    convert << num;      // insert the textual representation of 'Number' in the characters in the stream\n    Result =convert.str(); // set 'Result' to the contents of the stream\n\n  \tofstream out((\"env/e\"+fod+\"/path\"+Result+\".dat\").c_str(), ios::out | ios::binary);\n \tif(!out) {\n      cout << \"Cannot open file.\";\n                return 1;\n               }\n\n    out.write((char *) &path, sizeof path);\n  \tout.close();\n    \n    lcmtypes_trajectory_t_publish (lcm, \"TRAJECTORY\", opttraj);\n    \n    lcmtypes_trajectory_t_destroy (opttraj);\n    \n    cout << \"Publishing trajectory -- end\" << endl;\n    \n    \n    \n    return 1;\n}\n\n\nint publishTree (lcm_t *lcm, planner_t& planner, System& system) {\n    \n    \n    cout << \"Publishing the tree -- start\" << endl;\n    \n    bool plot3d = (system.getNumDimensions() > 2);\n    \n    lcmtypes_graph_t *graph = (lcmtypes_graph_t *) malloc (sizeof (lcmtypes_graph_t));\n    graph->num_vertices = planner.numVertices; \n    cout<<\"num_Vertices: \"<< graph->num_vertices<< endl;\n    \n    if (graph->num_vertices > 0) {    \n        \n        graph->vertices = (lcmtypes_vertex_t *) malloc (graph->num_vertices * sizeof(lcmtypes_vertex_t));\n        \n        int vertexIndex = 0;\n        for (list<vertex_t*>::iterator iter = planner.listVertices.begin(); iter != planner.listVertices.end(); iter++) {\n            \n            \n            vertex_t &vertexCurr = **iter;\n            State &stateCurr = vertexCurr.getState ();\n            \n            graph->vertices[vertexIndex].state.x = stateCurr[0];\n            graph->vertices[vertexIndex].state.y = stateCurr[1];\n            if (plot3d){ \n                graph->vertices[vertexIndex].state.z = stateCurr[2];\n            }\n            else \n                graph->vertices[vertexIndex].state.z = 0.0;\n            \n            vertexIndex++;\n            \n        }\n    }\n    else {\n        graph->vertices = NULL;\n    }\n    \n    if (graph->num_vertices > 1) {\n        \n        graph->num_edges = graph->num_vertices - 1;\n        graph->edges = (lcmtypes_edge_t *) malloc (graph->num_edges * sizeof(lcmtypes_edge_t));\n        \n        \n        int edgeIndex = 0;\n        for (list<vertex_t*>::iterator iter = planner.listVertices.begin(); iter != planner.listVertices.end(); iter++) {\n            \n            vertex_t &vertexCurr = **iter;\n            \n            vertex_t &vertexParent = vertexCurr.getParent();\n            \n            if ( &vertexParent == NULL ) \n                continue;\n            \n            State &stateCurr = vertexCurr.getState ();\n            State &stateParent = vertexParent.getState();\n            \n            \n            graph->edges[edgeIndex].vertex_src.state.x = stateParent[0];\n            graph->edges[edgeIndex].vertex_src.state.y = stateParent[1];\n            if (plot3d)\n                graph->edges[edgeIndex].vertex_src.state.z = stateParent[2];\n            else \n                graph->edges[edgeIndex].vertex_src.state.z = 0.0;\n            \n            \n            graph->edges[edgeIndex].vertex_dst.state.x = stateCurr[0];\n            graph->edges[edgeIndex].vertex_dst.state.y = stateCurr[1];\n            if (plot3d)\n                graph->edges[edgeIndex].vertex_dst.state.z = stateCurr[2];\n            else \n                graph->edges[edgeIndex].vertex_dst.state.z = 0.0;\n            \n            graph->edges[edgeIndex].trajectory.num_states = 0;\n            graph->edges[edgeIndex].trajectory.states = NULL;\n            \n            edgeIndex++;\n        }\n        \n    }\n    else {\n        graph->num_edges = 0;\n        graph->edges = NULL;\n    }\n    \n    lcmtypes_graph_t_publish (lcm, \"GRAPH\", graph);\n    \n    lcmtypes_graph_t_destroy (graph);\n    \n    cout << \"Publishing the tree -- end\" << endl;\n    \n    return 1;\n}\n\n\n\n"
  },
  {
    "path": "data_generation/rrtstar/src/system.h",
    "content": "/*! \n * \\file system.h \n *\n * This serves as a template to start a new system file. \n * It should not be included as is. \n */ \n\n#ifndef __RRTS_SYSTEM_H_\n#define __RRTS_SYSTEM_H_\n\n#include  <list>\n\n\n\n/*!\n * \\brief State Class.\n *\n * A more elaborate description of the State class\n */\nclass State {\n    \npublic:\n    \n    \n    /*!\n     * \\brief State assingment operator.\n     *\n     * A more elaborate description of the State assignment operator\n     */\n    State& operator= (const State &stateIn);\n    \n    /*!\n     * \\brief State bracket operator.\n     *\n     * A more elaborate description of the State bracket operator\n     */\n    double& operator[] (const int i);\n};\n\n\n/*!\n * \\brief Trajectory Class.\n *\n * A more elaborate description of the Trajectory class\n */\nclass Trajectory {\n    \npublic:\n    \n    \n    /*!\n     * \\brief Trajecotory assignment operator.\n     *\n     * A more elaborate description.\n     */\n    Trajectory& operator= (const Trajectory &trajectoryIn);\n    \n    /*!\n     * \\brief Returns a reference to the end state of this trajectory.\n     *\n     * A more elaborate description.\n     */\n    State& getEndState ();\n    \n    /*!\n     * \\brief Returns a reference to the end state of this trajectory (constant).\n     *\n     * A more elaborate description.\n     */\n    State& getEndState () const;\n    \n    \n    /*!\n     * \\brief Returns the cost of this trajectory.\n     *\n     * A more elaborate description.\n     */\n    double evaluateCost ();\n};\n\n\n/*!\n * \\brief System Class.\n *\n * A more elaborate description of the System class\n */\nclass System {\n    \npublic:\n    \n    /*!\n     * \\brief Returns the dimensionality of the Euclidean space.\n     *\n     * A more elaborate description.\n     */\n    int getNumDimensions ();\n    \n    /*!\n     * \\brief Returns a reference to the root state.\n     *\n     * A more elaborate description.\n     */\n    State & getRootState ();\n    \n    /*!\n     * \\brief Returns the statekey for the given state.\n     *\n     * A more elaborate description.\n     *\n     * \\param stateIn the given state\n     * \\param stateKey the key to the state. An array of dimension getNumDimensions()\n     *\n     */\n    int getStateKey (State& stateIn, double* stateKey);\n    \n    /*!\n     * \\brief Returns true of the given state reaches the target.\n     *\n     * A more elaborate description.\n     */\n    bool isReachingTarget (State& stateIn);\n    \n    /*!\n     * \\brief Returns a sample state.\n     *\n     * A more elaborate description.\n     *\n     * \\param randomStateOut\n     *\n     */\n    int sampleState (State& randomStateOut, double (&node)[2],double px, double py);\n    \n    \n    /*!\n     * \\brief Returns a the cost of the trajectory that connects stateFromIn and\n     *        stateTowardsIn. The trajectory is also returned in trajectoryOut.\n     *\n     * A more elaborate description.\n     * \n     * \\param stateFromIn Initial state\n     * \\param stateTowardsIn Final state\n     * \\param trajectoryOut Trajectory that starts the from the initial state and \n     *                      reaches near the final state.\n     * \\param exactConnectionOut Set to true if the initial and the final states\n     *                           can be connected exactly.\n     *\n     */\n    int extendTo (State& stateFromIn, State& stateTowardsIn, \n                          Trajectory& trajectoryOut, bool& exactConnectionOut);\n    \n    /*!\n     * \\brief Returns the cost of the trajectory that connects stateFromIn and StateTowardsIn.\n     *\n     * A more elaborate description.\n     *\n     * \\param stateFromIn Initial state\n     * \\param stateTowardsIn Final state\n     * \\param exactConnectionOut Set to true if the initial and the final states\n     *                           can be connected exactly.\n     *\n     */\n    double evaluateExtensionCost (State& stateFromIn, State& stateTowardsIn, bool& exactConnectionOut);\n    \n    /*!\n     * \\brief Returns a lower bound on the cost to go starting from stateIn\n     *\n     * A more elaborate description.\n     *\n     * \\param stateIn Starting state\n     *\n     */\n    double evaluateCostToGo (State& stateIn);\n    \n    /*!\n     * \\brief Returns the trajectory as a list of double arrays, each with dimension getNumDimensions.\n     *\n     * A more elaborate description.\n     *\n     * \\param stateFromIn Initial state\n     * \\param stateToIn Final state\n     * \\param trajectoryOut The list of double arrays that represent the trajectory\n     *\n     */\n    int getTrajectory (State& stateFromIn, State& stateToIn, std::list< double* > & trajectoryOut);\n\n\t// p-rrt*\n    int get_pfstates (State& state);\n};\n\n#endif\n"
  },
  {
    "path": "data_generation/rrtstar/src/system_single_integrator.cpp",
    "content": "#include \"system_single_integrator.h\"\n#include <cmath>\n#include <cstdlib>\n\n#include <iostream>\n\nusing namespace std;\nusing namespace SingleIntegrator;\n\n#define DISCRETIZATION_STEP 0.01\nint s=0;\nregion::region () {\n    \n    numDimensions = 0;\n    \n    center = NULL;\n    size = NULL;\n    radius=0;\n}\n\n\nregion::~region () {\n    \n    if (center)\n        delete [] center;\n    if (size)\n        delete [] size;\n    \n}\n\n\nint region::setNumDimensions (int numDimensionsIn) {\n    \n    numDimensions = numDimensionsIn;\n    \n    if (center)\n        delete [] center;\n    center = new double[numDimensions];\n    \n    if (size)\n        delete [] size;\n    size = new double[numDimensions];\n    \n    return 1;\n    \n}\n\n\nState::State () {\n    \n    numDimensions = 0;\n\n    x = NULL;\n}\n\n\nState::~State () {\n    \n    if (x)\n        delete [] x;\n}\n\n\nState::State (const State &stateIn) {\n    \n    numDimensions = stateIn.numDimensions;\n    \n    if (numDimensions > 0) {\n        x = new double[numDimensions];\n        \n        for (int i = 0; i < numDimensions; i++) \n            x[i] = stateIn.x[i];\n    }\n    else {\n        x = NULL;\n    }\n}\n\n\nState& State::operator=(const State &stateIn){\n    \n    if (this == &stateIn)\n        return *this;\n    \n    if (numDimensions != stateIn.numDimensions) {\n        if (x) \n            delete [] x;\n        numDimensions = stateIn.numDimensions;\n        if (numDimensions > 0)\n            x = new double[numDimensions];\n    }\n    \n    for (int i = 0; i < numDimensions; i++) \n        x[i] = stateIn.x[i];\n    \n    return *this;\n}\n\n\nint State::setNumDimensions (int numDimensionsIn) {\n    \n    if (x)\n        delete [] x;\n    \n    if (numDimensions < 0)\n        return 0;\n    \n    numDimensions = numDimensionsIn;\n    \n    if (numDimensions > 0)\n        x = new double[numDimensions];\n    \n    return 1;\n}\n\n\nTrajectory::Trajectory () {\n    \n    endState = NULL;\n}\n\n\nTrajectory::~Trajectory () {\n    \n    if (endState)\n        delete endState;\n}\n\n\nTrajectory::Trajectory (const Trajectory &trajectoryIn) {\n    \n    endState = new State (trajectoryIn.getEndState()); \n\n\n}\n\n\nTrajectory& Trajectory::operator=(const Trajectory &trajectoryIn) {\n    \n    if (this == &trajectoryIn)\n        return *this;\n    \n    if (endState)\n        delete endState;\n    \n    \n    endState = new State (trajectoryIn.getEndState());\n    \n    totalVariation = trajectoryIn.totalVariation;\n    \n    return *this;\n}\n\n\ndouble Trajectory::evaluateCost () {\n    \n    return totalVariation;\n}\n\n\nSystem::System () {\n    \n    numDimensions = 0;\n}\n\n\nSystem::~System () {\n    \n}\n\n\nint System::setNumDimensions (int numDimensionsIn) {\n    \n    if (numDimensions < 0)\n        return 0;\n    \n    numDimensions = numDimensionsIn;\n    \n    rootState.setNumDimensions (numDimensions);\n    \n    return 1;\n}\n\n\nint System::getStateKey (State& stateIn, double* stateKey) {\n    \n    for (int i = 0; i < numDimensions; i++){\n    \n    \tstateKey[i] =  stateIn.x[i] / regionOperating.size[i];\n\n    }\n    return 1;\n}\n\n/***********************************************My new addition****************************************************/\n\n\n// Implementation of Pontential function-based sampling heuristic (https://link.springer.com/article/10.1007/s10514-015-9518-0)\nint System::pfstates(State& rstout){\n    \n   int k = 50; \n   double lamda = 0.1; //lamda step size\n   double prev_state[numDimensions];\n   for(int j=0;j<numDimensions;j++)\n     prev_state[j]=rstout.x[j];\n      \n   for(int n = 0; n<k; n++)\n    {\n \n      if(IsInCollision(rstout.x))\n\t{\n            for(int j=0;j<numDimensions;j++)\n                rstout.x[j]=prev_state[j];\n\t  return 1;\n\n\t }\n\n\tfor(int j = 0; j<numDimensions; j++)\n \t {\n\t\t//Find out the direction of goal w.r.t sample or vice versa\t\n\t if(rstout.x[j] - regionGoal.center[j] > 0) \n          {\t//as Potential is zero in goal region\n\t\t  if(isReachingTarget(rstout) == false)\n\t\t    {\t \t\n                          prev_state[j]=rstout.x[j];\n\t\t\t  rstout.x[j] = rstout.x[j] - lamda;\n\t  \t    }\n\t  }else if(rstout.x[j] - regionGoal.center[j] < 0) \n\t\t{   //as Potential is zero in goal region\n\t\t    if(isReachingTarget(rstout) == false) \n\t\t      {   \n                           prev_state[j]=rstout.x[j];\n                           rstout.x[j] = rstout.x[j] + lamda;\n\t\t      }\n\n\t\t}\t\t\n\t\n\t }\n\t\n       }\n\n   return 1;\n    \n}\n\n\n\nbool System::isReachingTarget (State &stateIn) {\n    \n    \n    for (int i = 0; i < numDimensions; i++) {\n        \n        if (fabs(stateIn.x[i] - regionGoal.center[i]) > regionGoal.size[i]/2.0 ) \n            return false;\n    }\n    \n    return true;\n}\n\n\nbool System::IsInCollision (double *stateIn) {\n\n\t for (list<region*>::iterator iter = obstacles.begin(); iter != obstacles.end(); iter++) {\n\n\t        region *obstacleCurr = *iter;\n\t        bool collisionFound = true;\n\n\n\n\t       for (int i = 0; i < numDimensions; i++)\n\t            if (fabs(obstacleCurr->center[i] - stateIn[i]) > obstacleCurr->size[i]/2.0 ) {\n\t                collisionFound = false;\n\n\t                break;\n\n\t            }\n\n\t        if (collisionFound) {\n\n\t        \treturn true;\n\t        }\n\n\t    }\n\n\t    return false;\n}\n\n\nint System::sampleState (State &randomStateOut, double (&node)[2], double px, double py) {\n   \n\n\n\trandomStateOut.setNumDimensions (numDimensions);\n\tif (px==-1){\n\t\t\n\t\t\n\t\t\n\t\tfor (int i = 0; i < numDimensions; i++) {\n\t\t    \n\t\t    randomStateOut.x[i] = (double)rand()/(RAND_MAX + 1.0)*regionOperating.size[i] \n\t\t    - regionOperating.size[i]/2.0 + regionOperating.center[i];\n\n\t\t}\n\t}\n\telse{\n\t\t\n\t\trandomStateOut.x[0]=px;\n\t\trandomStateOut.x[1]=py;\n\t\t\n\t}\n\t   \n\t\t\n\t   \n\tif (IsInCollision (randomStateOut.x)){\n\t\tnode[0]= 0;\n\t\tnode[1]=0;\n\t\treturn 0;\n\t\t     \n\t }\n\t \n\t// node contain sample from obstacle-free space (see rrts_main.cpp for random obstacle-free nodes generation)   \n\t node[0]= randomStateOut.x[0];\n\t node[1]=randomStateOut.x[1]; \n\t// comment it if you would like to use simple rrtstar.\n\t pfstates(randomStateOut);\n\t\t\n\t if (IsInCollision (randomStateOut.x))\n\t\t      return 0;\n\t return 1;\n}\n\n\nint System::steerTo (State &stateFromIn, State &stateTowardsIn) {\n\n    double *dists = new double[numDimensions];\n    for (int i = 0; i < numDimensions; i++)\n        dists[i] = stateTowardsIn.x[i] - stateFromIn.x[i];\n\n    double distTotal = 0.0;\n    for (int i = 0; i < numDimensions; i++)\n        distTotal += dists[i]*dists[i];\n    distTotal = sqrt (distTotal);\n\n    double incrementTotal = distTotal/DISCRETIZATION_STEP;\n\n    // normalize the distance according to the disretization step\n    for (int i = 0; i < numDimensions; i++)\n        dists[i] /= incrementTotal;\n\n    int numSegments = (int)floor(incrementTotal);\n\n    double *stateCurr = new double[numDimensions];\n    for (int i = 0; i < numDimensions; i++)\n        stateCurr[i] = stateFromIn.x[i];\n\n    for (int i = 0; i < numSegments; i++) {\n\n        if (IsInCollision (stateCurr))\n            return 0;\n\n\n        for (int i = 0; i < numDimensions; i++)\n            stateCurr[i] += dists[i];\n    }\n\n    if (IsInCollision (stateTowardsIn.x))\n        return 0;\n\n    delete [] dists;\n    delete [] stateCurr;\n\n\n    return 1;\n}\n\nint System::extendTo (State &stateFromIn, State &stateTowardsIn, Trajectory &trajectoryOut, bool &exactConnectionOut) {\n    \n    double *dists = new double[numDimensions];\n    for (int i = 0; i < numDimensions; i++) \n        dists[i] = stateTowardsIn.x[i] - stateFromIn.x[i];\n    \n    double distTotal = 0.0;\n    for (int i = 0; i < numDimensions; i++) \n        distTotal += dists[i]*dists[i];\n    distTotal = sqrt (distTotal);\n    \n    double incrementTotal = distTotal/DISCRETIZATION_STEP;\n    \n    // normalize the distance according to the disretization step\n    for (int i = 0; i < numDimensions; i++)\n        dists[i] /= incrementTotal;\n    \n    int numSegments = (int)floor(incrementTotal);\n    \n    double *stateCurr = new double[numDimensions];\n    for (int i = 0; i < numDimensions; i++) \n        stateCurr[i] = stateFromIn.x[i];\n\n    for (int i = 0; i < numSegments; i++) {\n\n        if (IsInCollision (stateCurr))  \n            return 0;\n\n\n        for (int i = 0; i < numDimensions; i++)\n            stateCurr[i] += dists[i];\n    }\n\n    if (IsInCollision (stateTowardsIn.x))\n        return 0;\n    \n    trajectoryOut.endState = new State (stateTowardsIn);\n    trajectoryOut.totalVariation = distTotal;\n    \n    delete [] dists;\n    delete [] stateCurr;\n    \n    exactConnectionOut = true;\n    \n    return 1;\n}\n\ndouble System::evaluateExtensionCost (State& stateFromIn, State& stateTowardsIn, bool &exactConnectionOut) {\n    \n    \n    exactConnectionOut = true;\n\n    double distTotal = 0.0;\n    for (int i = 0; i < numDimensions; i++) {\n        double distCurr = stateTowardsIn.x[i] - stateFromIn.x[i];\n        distTotal += distCurr*distCurr;\n    }\n\n    return sqrt(distTotal);\n\n}\n\n\nint System::getTrajectory (State& stateFromIn, State& stateToIn, list<double*>& trajectoryOut) {\n    \n    double *stateArr = new double[numDimensions];\n    for (int i = 0; i < numDimensions; i++)\n        stateArr[i] = stateToIn[i];\n    trajectoryOut.push_front (stateArr);\n    return 1;\n    \n}\n\n\ndouble System::evaluateCostToGo (State& stateIn) {\n    \n    double radius = 0.0;\n    for (int i = 0; i < numDimensions; i++) \n        radius += regionGoal.size[i] * regionGoal.size[i];\n    radius = sqrt(radius);\n    \n    double dist = 0.0;\n    for (int i = 0; i < numDimensions; i++) \n        dist += (stateIn[i] - regionGoal.center[i])*(stateIn[0] - regionGoal.center[i]);\n    dist = sqrt(dist);\n    \n    return dist - radius;\n}\n"
  },
  {
    "path": "data_generation/rrtstar/src/system_single_integrator.h",
    "content": "/*! \n * \\file system_single_integrator.h \n */ \n\n#ifndef __RRTS_SYSTEM_SINGLE_INTEGRATOR_H_\n#define __RRTS_SYSTEM_SINGLE_INTEGRATOR_H_\n\n#include <list>\n\n\n\nnamespace SingleIntegrator {\n\n    \n    /*!\n     * \\brief region class\n     *\n     * More elaborate description\n     */\n    class region {\n        \n        int numDimensions;\n        \n    public:    \n        \n        /*!\n         * \\brief Cartesian coordinates of the center of the region\n         *\n         * More elaborate description\n         */\n        double *center;\n        \n        /*!\n         * \\brief Size of the region in cartesian coordinates\n         *\n         * More elaborate description\n         */\n        double *size;\n        double radius;\n        /*!\n         * \\brief region constructor\n         *\n         * More elaborate description\n         */\n        region ();\n        \n        /*!\n         * \\brief region destructor\n         *\n         * More elaborate description\n         */\n        ~region ();\n        \n        /*!\n         * \\brief Sets the dimensionality of the region\n         *\n         * More elaborate description\n         *\n         * \\param numDimensionsIn New number of dimensions.\n         *\n         */\n        int setNumDimensions (int numDimensionsIn);\n    };\n    \n\n    \n    /*!\n     * \\brief State Class.\n     *\n     * A more elaborate description of the State class\n     */\n    class State {\n        \n        int numDimensions;\n        double *x;\n\n        int setNumDimensions (int numDimensions);\n        \n    public:\n\n        /*!\n         * \\brief State constructor\n         *\n         * More elaborate description\n         */\n        State ();\n        \n        /*!\n         * \\brief State desctructor\n         *\n         * More elaborate description\n         */\n        ~State ();\n        \n        /*!\n         * \\brief State copy constructor\n         *\n         * More elaborate description\n         */\n        State (const State& stateIn);\n        \n        /*!\n         * \\brief State assignment operator\n         *\n         * More elaborate description\n         */\n        State& operator= (const State& stateIn);\n        \n        /*!\n         * \\brief State bracket operator\n         *\n         * More elaborate description\n         */\n        double& operator[] (const int i) {return x[i];}\n        \n        friend class System;\n        friend class Trajectory;\n    };\n    \n    \n    \n    /*!\n     * \\brief Trajectory Class.\n     *\n     * A more elaborate description of the State class\n     */\n    class Trajectory {\n        \n        State *endState; \n        double totalVariation;  \n        \n    public:    \n\n        /*!\n         * \\brief Trajectory constructor\n         *\n         * More elaborate description\n         */\n        Trajectory ();\n        \n        /*!\n         * \\brief Trajectory destructor\n         *\n         * More elaborate description\n         */\n        ~Trajectory ();\n        \n        /*!\n         * \\brief Trajectory copy constructor\n         *\n         * More elaborate description\n         *\n         * \\param trajectoryIn The trajectory to be copied.\n         *\n         */\n        Trajectory (const Trajectory& trajectoryIn);\n        \n        /*!\n         * \\brief Trajectory assignment constructor\n         *\n         * More elaborate description\n         *\n         * \\param trajectoryIn the trajectory to be copied.\n         *\n         */\n        Trajectory& operator= (const Trajectory& trajectoryIn);\n        \n        /*!\n         * \\brief Returns a reference to the end state of this trajectory.\n         *\n         * More elaborate description\n         */\n        State& getEndState () {return *endState;}\n        \n        /*!\n         * \\brief Returns a reference to the end state of this trajectory (constant).\n         *\n         * More elaborate description\n         */\n        State& getEndState () const {return *endState;}\n        \n        /*!\n         * \\brief Returns the cost of this trajectory.\n         *\n         * More elaborate description\n         */\n        double evaluateCost ();\n        \n        friend class System;\n    };\n    \n    \n    \n    /*!\n     * \\brief System Class.\n     *\n     * A more elaborate description of the State class\n     */\n    class System {\n        \n        int numDimensions;\n        bool IsInCollision (double *stateIn);\n        \n        State rootState;\n        \n    public:    \n        \n        /*!\n         * \\brief The operating region\n         *\n         * More elaborate description\n         */\n        region regionOperating;\n        \n        /*!\n         * \\brief The goal region\n         *\n         * More elaborate description\n         */\n        region regionGoal;\n        \n        /*!\n         * \\brief The list of all obstacles\n         *\n         * More elaborate description\n         */\n        std::list<region*> obstacles;\n        \n        /*!\n         * \\brief System constructor\n         *\n         * More elaborate description\n         */\n        System ();\n        \n        /*!\n         * \\brief System destructor\n         *\n         * More elaborate description\n         */\n        ~System ();\n        \n        int setNumDimensions (int numDimensionsIn);\n        \n        /*!\n         * \\brief Returns the dimensionality of the Euclidean space.\n         *\n         * A more elaborate description.\n         */\n        int getNumDimensions () {return numDimensions;}\n        \n        /*!\n         * \\brief Returns a reference to the root state.\n         *\n         * A more elaborate description.\n         */\n        State& getRootState () {return rootState;}\n        \n        /*!\n         * \\brief Returns the statekey for the given state.\n         *\n         * A more elaborate description.\n         *\n         * \\param stateIn the given state\n         * \\param stateKey the key to the state. An array of dimension getNumDimensions()\n         *\n         */\n        int getStateKey (State &stateIn, double *stateKey);\n        \n        /*!\n         * \\brief Returns true of the given state reaches the target.\n         *\n         * A more elaborate description.\n         */\n        bool isReachingTarget (State &stateIn);\n        \n        /*!\n         * \\brief Returns a sample state.\n         *\n         * A more elaborate description.\n         *\n         * \\param randomStateOut\n         *\n         */\n        int sampleState (State &randomStateOut, double (&node)[2], double px, double py); \n       \n\n        \n        /*!\n         * \\brief Returns a the cost of the trajectory that connects stateFromIn and\n         *        stateTowardsIn. The trajectory is also returned in trajectoryOut.\n         *\n         * A more elaborate description.\n         * \n         * \\param stateFromIn Initial state\n         * \\param stateTowardsIn Final state\n         * \\param trajectoryOut Trajectory that starts the from the initial state and \n         *                      reaches near the final state.\n         * \\param exactConnectionOut Set to true if the initial and the final states\n         *                           can be connected exactly.\n         *\n         */\n        int extendTo (State &stateFromIn, State &stateTowardsIn, \n                      Trajectory &trajectoryOut, bool &exactConnectionOut); \n        int steerTo (State &stateFromIn, State &stateTowardsIn);\n        int optimization (double* stateA, double* stateC);\n        /*!\n         * \\brief Returns the cost of the trajectory that connects stateFromIn and StateTowardsIn.\n         *\n         * A more elaborate description.\n         *\n         * \\param stateFromIn Initial state\n         * \\param stateTowardsIn Final state\n         * \\param exactConnectionOut Set to true if the initial and the final states\n         *                           can be connected exactly.\n         *\n         */\n        double evaluateExtensionCost (State &stateFromIn, State &stateTowardsIn, bool &exactConnectionOut);\n        \n        /*!\n         * \\brief Returns a lower bound on the cost to go starting from stateIn\n         *\n         * A more elaborate description.\n         *\n         * \\param stateIn Starting state\n         *\n         */\n        double evaluateCostToGo (State& stateIn);\n        \n        /*!\n         * \\brief Returns the trajectory as a list of double arrays, each with dimension getNumDimensions.\n         *\n         * A more elaborate description.\n         *\n         * \\param stateFromIn Initial state\n         * \\param stateToIn Final state\n         * \\param trajectoryOut The list of double arrays that represent the trajectory\n         *\n         */\n        int getTrajectory (State& stateFromIn, State& stateToIn, std::list<double*>& trajectoryOut);\n        \n\n\t\t// p-rrt*\n        int pfstates (State& state);\n    };\n}\n\n\n#endif\n"
  },
  {
    "path": "data_generation/tobuild.txt",
    "content": "# list of collections to build, one one each line.  Empty lines\n# and lines starting with '#' are ignored\nlcmtypes\nviewer\nrrtstar\n"
  },
  {
    "path": "data_generation/viewer/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 2.6.0)\n\n# pull in the pods macros. See cmake/pods.cmake for documentation\nset(POD_NAME viewer)\ninclude(cmake/pods.cmake)\n\nfind_package(PkgConfig REQUIRED)\nfind_package(OpenGL REQUIRED)\n\nlist(APPEND OPENGL_LIBRARIES GL )\nset(GLUT_CFLAGS \"\")\nset(GLUT_LIBRARIES -lglut)\nset(ZLIB_LIBRARIES -lz)\n\npkg_check_modules(LCM REQUIRED lcm)\npkg_check_modules(GTK2 REQUIRED gtk+-2.0)\npkg_check_modules(BOT2_VIS REQUIRED bot2-vis)\n\n\n\n#tell cmake to build these subdirectories\nadd_subdirectory(src/renderers)\nadd_subdirectory(src)\n"
  },
  {
    "path": "data_generation/viewer/Makefile",
    "content": "# Default makefile distributed with pods version: 11.03.11\n\ndefault_target: all\n\n# Default to a less-verbose build.  If you want all the gory compiler output,\n# run \"make VERBOSE=1\"\n$(VERBOSE).SILENT:\n\n# Figure out where to build the software.\n#   Use BUILD_PREFIX if it was passed in.\n#   If not, search up to four parent directories for a 'build' directory.\n#   Otherwise, use ./build.\nifeq \"$(BUILD_PREFIX)\" \"\"\nBUILD_PREFIX:=$(shell for pfx in ./ .. ../.. ../../.. ../../../..; do d=`pwd`/$$pfx/build;\\\n               if [ -d $$d ]; then echo $$d; exit 0; fi; done; echo `pwd`/build)\nendif\n# create the build directory if needed, and normalize its path name\nBUILD_PREFIX:=$(shell mkdir -p $(BUILD_PREFIX) && cd $(BUILD_PREFIX) && echo `pwd`)\n\n# Default to a release build.  If you want to enable debugging flags, run\n# \"make BUILD_TYPE=Debug\"\nifeq \"$(BUILD_TYPE)\" \"\"\nBUILD_TYPE=\"Release\"\nendif\n\nall: pod-build/Makefile\n\t$(MAKE) -C pod-build all install\n\npod-build/Makefile:\n\t$(MAKE) configure\n\n.PHONY: configure\nconfigure:\n\t@echo \"\\nBUILD_PREFIX: $(BUILD_PREFIX)\\n\\n\"\n\n\t# create the temporary build directory if needed\n\t@mkdir -p pod-build\n\n\t# run CMake to generate and configure the build scripts\n\t@cd pod-build && cmake -DCMAKE_INSTALL_PREFIX=$(BUILD_PREFIX) \\\n\t\t   -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) ..\n\nclean:\n\t-if [ -e pod-build/install_manifest.txt ]; then rm -f `cat pod-build/install_manifest.txt`; fi\n\t-if [ -d pod-build ]; then $(MAKE) -C pod-build clean; rm -rf pod-build; fi\n\n# other (custom) targets are passed through to the cmake-generated Makefile \n%::\n\t$(MAKE) -C pod-build $@"
  },
  {
    "path": "data_generation/viewer/README",
    "content": "This is a default README file, please replace its contents with information\nrelevant to your project.\n\nThis software is constructed according to the Pods software policies and\ntemplates.  The policies and templates can be found at:\n\n  http://sourceforge.net/projects/pods\n\n====\n\nName:         rrtstar-planner\nMaintainers:  FILL-ME-IN\nSummary:      FILL-ME-IN\nDescription:\n  FILL-ME-IN\n\nRequirements: \n  FILL-ME-IN\n\nLicense:      FILL-ME-IN\n"
  },
  {
    "path": "data_generation/viewer/cmake/pods.cmake",
    "content": "# Macros to simplify compliance with the pods build policies.\n#\n# To enable the macros, add the following lines to CMakeLists.txt:\n#   set(POD_NAME <pod-name>)\n#   include(cmake/pods.cmake)\n#\n# If POD_NAME is not set, then the CMake source directory is used as POD_NAME\n#\n# Next, any of the following macros can be used.  See the individual macro\n# definitions in this file for individual documentation.\n#\n# C/C++\n#   pods_install_headers(...)\n#   pods_install_libraries(...)\n#   pods_install_executables(...)\n#   pods_install_pkg_config_file(...)\n#\n#   pods_use_pkg_config_packages(...)\n#\n# Python\n#   pods_install_python_packages(...)\n#   pods_install_python_script(...)\n#\n# Java\n#   None yet\n#\n# ----\n# File: pods.cmake\n# Distributed with pods version: 11.03.11\n\n# pods_install_headers(<header1.h> ... DESTINATION <subdir_name>)\n# \n# Install a (list) of header files.\n#\n# Header files will all be installed to include/<subdir_name>\n#\n# example:\n#   add_library(perception detector.h sensor.h)\n#   pods_install_headers(detector.h sensor.h DESTINATION perception)\n#\nfunction(pods_install_headers)\n    list(GET ARGV -2 checkword)\n    if(NOT checkword STREQUAL DESTINATION)\n        message(FATAL_ERROR \"pods_install_headers missing DESTINATION parameter\")\n    endif()\n\n    list(GET ARGV -1 dest_dir)\n    list(REMOVE_AT ARGV -1)\n    list(REMOVE_AT ARGV -1)\n    #copy the headers to the INCLUDE_OUTPUT_PATH (${CMAKE_BINARY_DIR}/include)\n    foreach(header ${ARGV})\n        get_filename_component(_header_name ${header} NAME)\n        configure_file(${header} ${INCLUDE_OUTPUT_PATH}/${dest_dir}/${_header_name} COPYONLY)\n\tendforeach(header)\n\t#mark them to be installed\n\tinstall(FILES ${ARGV} DESTINATION include/${dest_dir})\n\n\nendfunction(pods_install_headers)\n\n# pods_install_executables(<executable1> ...)\n#\n# Install a (list) of executables to bin/\nfunction(pods_install_executables)\n    install(TARGETS ${ARGV} RUNTIME DESTINATION bin)\nendfunction(pods_install_executables)\n\n# pods_install_libraries(<library1> ...)\n#\n# Install a (list) of libraries to lib/\nfunction(pods_install_libraries)\n    install(TARGETS ${ARGV} LIBRARY DESTINATION lib ARCHIVE DESTINATION lib)\nendfunction(pods_install_libraries)\n\n\n# pods_install_pkg_config_file(<package-name> \n#                              [VERSION <version>]\n#                              [DESCRIPTION <description>]\n#                              [CFLAGS <cflag> ...]\n#                              [LIBS <lflag> ...]\n#                              [REQUIRES <required-package-name> ...])\n# \n# Create and install a pkg-config .pc file.\n#\n# example:\n#    add_library(mylib mylib.c)\n#    pods_install_pkg_config_file(mylib LIBS -lmylib REQUIRES glib-2.0)\nfunction(pods_install_pkg_config_file)\n    list(GET ARGV 0 pc_name)\n    # TODO error check\n\n    set(pc_version 0.0.1)\n    set(pc_description ${pc_name})\n    set(pc_requires \"\")\n    set(pc_libs \"\")\n    set(pc_cflags \"\")\n    set(pc_fname \"${PKG_CONFIG_OUTPUT_PATH}/${pc_name}.pc\")\n    \n    set(modewords LIBS CFLAGS REQUIRES VERSION DESCRIPTION)\n    set(curmode \"\")\n\n    # parse function arguments and populate pkg-config parameters\n    list(REMOVE_AT ARGV 0)\n    foreach(word ${ARGV})\n        list(FIND modewords ${word} mode_index)\n        if(${mode_index} GREATER -1)\n            set(curmode ${word})\n        elseif(curmode STREQUAL LIBS)\n            set(pc_libs \"${pc_libs} ${word}\")\n        elseif(curmode STREQUAL CFLAGS)\n            set(pc_cflags \"${pc_cflags} ${word}\")\n        elseif(curmode STREQUAL REQUIRES)\n            set(pc_requires \"${pc_requires} ${word}\")\n        elseif(curmode STREQUAL VERSION)\n            set(pc_version ${word})\n            set(curmode \"\")\n        elseif(curmode STREQUAL DESCRIPTION)\n            set(pc_description \"${word}\")\n            set(curmode \"\")\n        else(${mode_index} GREATER -1)\n            message(\"WARNING incorrect use of pods_add_pkg_config (${word})\")\n            break()\n        endif(${mode_index} GREATER -1)\n    endforeach(word)\n\n    # write the .pc file out\n    file(WRITE ${pc_fname}\n        \"prefix=${CMAKE_INSTALL_PREFIX}\\n\"\n        \"exec_prefix=\\${prefix}\\n\"\n        \"libdir=\\${exec_prefix}/lib\\n\"\n        \"includedir=\\${prefix}/include\\n\"\n        \"\\n\"\n        \"Name: ${pc_name}\\n\"\n        \"Description: ${pc_description}\\n\"\n        \"Requires: ${pc_requires}\\n\"\n        \"Version: ${pc_version}\\n\"\n        \"Libs: -L\\${libdir} ${pc_libs}\\n\"\n        \"Cflags: -I\\${includedir} ${pc_cflags}\\n\")\n\n    # mark the .pc file for installation to the lib/pkgconfig directory\n    install(FILES ${pc_fname} DESTINATION lib/pkgconfig)\n    \n    # find targets that this pkg-config file depends on\n    if (pc_libs)\n        string(REPLACE \" \" \";\" split_lib ${pc_libs})\n        foreach(lib ${split_lib})\n            string(REGEX REPLACE \"^-l\" \"\" libname ${lib})\n            get_target_property(IS_TARGET ${libname} LOCATION)\n            if (NOT IS_TARGET STREQUAL \"IS_TARGET-NOTFOUND\")\n                set_property(GLOBAL APPEND PROPERTY \"PODS_PKG_CONFIG_TARGETS-${pc_name}\" ${libname})\n            endif() \n        endforeach()\n    endif()\n    \nendfunction(pods_install_pkg_config_file)\n\n\n# pods_install_python_script(<script_name> <python_module>)\n#\n# Create and install a script that invokes the python interpreter with a\n# specified module.\n#\n# A script will be installed to bin/<script_name>.  The script simply\n# adds <install-prefix>/lib/pythonX.Y/site-packages to the python path, and\n# then invokes `python -m <python_module>`.\n#\n# example:\n#    pods_install_python_script(run-pdb pdb)\nfunction(pods_install_python_script script_name py_module)\n    find_package(PythonInterp REQUIRED)\n\n    # which python version?\n    execute_process(COMMAND \n        ${PYTHON_EXECUTABLE} -c \"import sys; sys.stdout.write(sys.version[:3])\"\n        OUTPUT_VARIABLE pyversion)\n\n    # where do we install .py files to?\n    set(python_install_dir \n        ${CMAKE_INSTALL_PREFIX}/lib/python${pyversion}/site-packages)\n\n    # write the script file\n    file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${script_name} \"#!/bin/sh\\n\"\n        \"export PYTHONPATH=${python_install_dir}:\\${PYTHONPATH}\\n\"\n        \"exec ${PYTHON_EXECUTABLE} -m ${py_module} $*\\n\")\n\n    # install it...\n    install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${script_name} DESTINATION bin)\nendfunction()\n\n# pods_install_python_packages(<src_dir>)\n#\n# Install python packages to lib/pythonX.Y/site-packages, where X.Y refers to\n# the current python version (e.g., 2.6)\n#\n# Recursively searches <src_dir> for .py files, byte-compiles them, and\n# installs them\nfunction(pods_install_python_packages py_src_dir)\n    find_package(PythonInterp REQUIRED)\n\n    # which python version?\n    execute_process(COMMAND \n        ${PYTHON_EXECUTABLE} -c \"import sys; sys.stdout.write(sys.version[:3])\"\n        OUTPUT_VARIABLE pyversion)\n\n    # where do we install .py files to?\n    set(python_install_dir \n        ${CMAKE_INSTALL_PREFIX}/lib/python${pyversion}/site-packages)\n\n    if(ARGC GREATER 1)\n        message(FATAL_ERROR \"NYI\")\n    else()\n        # get a list of all .py files\n        file(GLOB_RECURSE py_files RELATIVE ${py_src_dir} ${py_src_dir}/*.py)\n\n        # add rules for byte-compiling .py --> .pyc\n        foreach(py_file ${py_files})\n            get_filename_component(py_dirname ${py_file} PATH)\n            add_custom_command(OUTPUT \"${py_src_dir}/${py_file}c\" \n                COMMAND ${PYTHON_EXECUTABLE} -m py_compile ${py_src_dir}/${py_file} \n                DEPENDS ${py_src_dir}/${py_file})\n            list(APPEND pyc_files \"${py_src_dir}/${py_file}c\")\n\n            # install python file and byte-compiled file\n            install(FILES ${py_src_dir}/${py_file} ${py_src_dir}/${py_file}c\n                DESTINATION \"${python_install_dir}/${py_dirname}\")\n#            message(\"${py_src_dir}/${py_file} -> ${python_install_dir}/${py_dirname}\")\n        endforeach()\n        string(REGEX REPLACE \"[^a-zA-Z0-9]\" \"_\" san_src_dir \"${py_src_dir}\")\n        add_custom_target(\"pyc_${san_src_dir}\" ALL DEPENDS ${pyc_files})\n    endif()\nendfunction()\n\n\n# pods_use_pkg_config_packages(<target> <package-name> ...)\n#\n# Convenience macro to get compiler and linker flags from pkg-config and apply them\n# to the specified target.\n#\n# Invokes `pkg-config --cflags-only-I <package-name> ...` and adds the result to the\n# include directories.\n#\n# Additionally, invokes `pkg-config --libs <package-name> ...` and adds the result to\n# the target's link flags (via target_link_libraries)\n#\n# example:\n#   add_executable(myprogram main.c)\n#   pods_use_pkg_config_packages(myprogram glib-2.0 opencv)\nmacro(pods_use_pkg_config_packages target)\n    if(${ARGC} LESS 2)\n        message(WARNING \"Useless invocation of pods_use_pkg_config_packages\")\n        return()\n    endif()\n    find_package(PkgConfig REQUIRED)\n    execute_process(COMMAND \n        ${PKG_CONFIG_EXECUTABLE} --cflags-only-I ${ARGN}\n        OUTPUT_VARIABLE _pods_pkg_include_flags)\n    string(STRIP ${_pods_pkg_include_flags} _pods_pkg_include_flags)\n    string(REPLACE \"-I\" \"\" _pods_pkg_include_flags \"${_pods_pkg_include_flags}\")\n\tseparate_arguments(_pods_pkg_include_flags)\n    #    message(\"include: ${_pods_pkg_include_flags}\")\n    execute_process(COMMAND \n        ${PKG_CONFIG_EXECUTABLE} --libs ${ARGN}\n        OUTPUT_VARIABLE _pods_pkg_ldflags)\n    string(STRIP ${_pods_pkg_ldflags} _pods_pkg_ldflags)\n    #    message(\"ldflags: ${_pods_pkg_ldflags}\")\n    include_directories(${_pods_pkg_include_flags})\n    target_link_libraries(${target} ${_pods_pkg_ldflags})\n   \n    # make the target depend on libraries being installed by this source build\n    foreach(_pkg ${ARGN})\n        get_property(_has_dependencies GLOBAL PROPERTY \"PODS_PKG_CONFIG_TARGETS-${_pkg}\" SET)\n        if(_has_dependencies)\n            get_property(_dependencies GLOBAL PROPERTY \"PODS_PKG_CONFIG_TARGETS-${_pkg}\")\n            add_dependencies(${target} ${_dependencies})\n            #            message(\"Found dependencies for ${_pkg}: ${dependencies}\")\n        endif()\n        unset(_has_dependencies)\n        unset(_dependencies)\n    endforeach()\n\n    unset(_pods_pkg_include_flags)\n    unset(_pods_pkg_ldflags)\nendmacro()\n\n\n# pods_config_search_paths()\n#\n# Setup include, linker, and pkg-config paths according to the pods core\n# policy.  This macro is automatically invoked, there is no need to do so\n# manually.\nmacro(pods_config_search_paths)\n    if(NOT DEFINED __pods_setup)\n\t\t#set where files should be output locally\n\t    set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib)\n\t    set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/bin)\n\t    set(INCLUDE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/include)\n\t    set(PKG_CONFIG_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib/pkgconfig)\n\t\t\n\t\t#set where files should be installed to\n\t    set(LIBRARY_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/lib)\n\t    set(EXECUTABLE_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/bin)\n\t    set(INCLUDE_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/include)\n\t    set(PKG_CONFIG_INSTALL_PATH ${CMAKE_INSTALL_PREFIX}/lib/pkgconfig)\n\n\n        # add build/lib/pkgconfig to the pkg-config search path\n        set(ENV{PKG_CONFIG_PATH} ${PKG_CONFIG_INSTALL_PATH}:$ENV{PKG_CONFIG_PATH})\n        set(ENV{PKG_CONFIG_PATH} ${PKG_CONFIG_OUTPUT_PATH}:$ENV{PKG_CONFIG_PATH})\n\n        # add build/include to the compiler include path\n        include_directories(BEFORE ${INCLUDE_OUTPUT_PATH})\n        include_directories(${INCLUDE_INSTALL_PATH})\n\n        # add build/lib to the link path\n        link_directories(${LIBRARY_OUTPUT_PATH})\n        link_directories(${LIBRARY_INSTALL_PATH})\n        \n\n        # abuse RPATH\n        if(${CMAKE_INSTALL_RPATH})\n            set(CMAKE_INSTALL_RPATH ${LIBRARY_INSTALL_PATH}:${CMAKE_INSTALL_RPATH})\n        else(${CMAKE_INSTALL_RPATH})\n            set(CMAKE_INSTALL_RPATH ${LIBRARY_INSTALL_PATH})\n        endif(${CMAKE_INSTALL_RPATH})\n\n        # for osx, which uses \"install name\" path rather than rpath\n        #set(CMAKE_INSTALL_NAME_DIR ${LIBRARY_OUTPUT_PATH})\n        set(CMAKE_INSTALL_NAME_DIR ${CMAKE_INSTALL_RPATH})\n        \n        # hack to force cmake always create install and clean targets \n        install(FILES DESTINATION)\n        add_custom_target(tmp)\n\n        set(__pods_setup true)\n    endif(NOT DEFINED __pods_setup)\nendmacro(pods_config_search_paths)\n\nmacro(enforce_out_of_source)\n    if(CMAKE_BINARY_DIR STREQUAL PROJECT_SOURCE_DIR)\n      message(FATAL_ERROR \n      \"\\n\n      Do not run cmake directly in the pod directory. \n      use the supplied Makefile instead!  You now need to\n      remove CMakeCache.txt and the CMakeFiles directory.\n\n      Then to build, simply type: \n       $ make\n      \")\n    endif()\nendmacro(enforce_out_of_source)\n\n#set the variable POD_NAME to the directory path, and set the cmake PROJECT_NAME\nif(NOT POD_NAME)\n    get_filename_component(POD_NAME ${CMAKE_SOURCE_DIR} NAME)\n    message(STATUS \"POD_NAME is not set... Defaulting to directory name: ${POD_NAME}\") \nendif(NOT POD_NAME)\nproject(${POD_NAME})\n\n#make sure we're running an out-of-source build\nenforce_out_of_source()\n\n#call the function to setup paths\npods_config_search_paths()\n"
  },
  {
    "path": "data_generation/viewer/src/CMakeLists.txt",
    "content": "SET(ENV{PKG_CONFIG_PATH} \"$ENV{PKG_CONFIG_PATH}:/usr/local/lib/pkgconfig:/opt/local/lib/pkgconfig:/usr/local/share/pkgconfig\")\n\npods_install_pkg_config_file(viewer\n    CFLAGS\n    LIBS \n    REQUIRES ${REQUIRED_PACKAGES}\n    VERSION 0.0.1)\n\ninclude_directories(${PROJECT_SOURCE_DIR}/src\n    ${GTK2_INCLUDE_DIRS}\n    ${OPENGL_INCLUDE_DIR}\n    ${GLUT_INCLUDE_DIR}\n    ${LCM_INCLUDE_DIRS}\n    ${BOT2_VIS_INCLUDE_DIRS})\n\nadd_executable(viewer main_viewer.cpp)\n\npods_use_pkg_config_packages(viewer viewer \n\tbot2-core \n\tbot2-vis \n\tbot2-lcmgl-client\n\trenderers)\n\ntarget_link_libraries(viewer\n    ${GTK2_LDFLAGS}\n    ${OPENGL_LIBRARIES}\n    ${GLUT_LIBRARIES}\n    ${LCM_LDFLAGS}\n    ${BOT2_VIS_LDFLAGS})\n\npods_install_executables(viewer)\n"
  },
  {
    "path": "data_generation/viewer/src/main_viewer.cpp",
    "content": "#include <iostream>\n#include <ctime>\n\n//#include <gtk/gtk.h>\n\n#include <bot_core/bot_core.h>\n#include <bot_vis/bot_vis.h>\n\n#ifdef __APPLE__\n#include <OpenGL/gl.h>\n#else\n#include <GL/gl.h>\n#endif\n\n#include <renderers/graph_renderer.h>\n\nusing namespace std;\n\n\ntypedef struct {\n    BotViewer *viewer;\n    lcm_t *lcm;\n} viewer_app_t;\n\n\n\nint main(int argc, char *argv[])\n{\n    \n    gtk_init(&argc, &argv);\n    glutInit(&argc, argv);\n    g_thread_init(NULL);\n    \n    setlinebuf(stdout);\n    \n    viewer_app_t app;\n    memset(&app, 0, sizeof(app));\n    \n    \n    BotViewer *viewer = bot_viewer_new(\"Viewer\");\n    app.viewer = viewer;\n    app.lcm = lcm_create(NULL);\n    bot_glib_mainloop_attach_lcm(app.lcm);\n    \n    // setup renderers\n    //bot_viewer_add_stock_renderer(viewer, BOT_VIEWER_STOCK_RENDERER_GRID, 0); \n    add_graph_renderer_to_viewer (viewer, 1, app.lcm);\n\n    \n    // run the main loop\n    gtk_main();\n\n    // cleanup\n    bot_viewer_unref(viewer);    \n    \n    cout << \"RRTstar is alive\" << endl;\n    \n    return 1;\n}\n\n"
  },
  {
    "path": "data_generation/viewer/src/renderers/CMakeLists.txt",
    "content": "SET(ENV{PKG_CONFIG_PATH} \"$ENV{PKG_CONFIG_PATH}:/usr/local/lib/pkgconfig:/opt/local/lib/pkgconfig:/usr/local/share/pkgconfig\")\n\n\nadd_library(renderers SHARED \n            graph_renderer.cpp)\n\npods_use_pkg_config_packages(renderers bot2-vis lcmtypes)\n\npods_install_headers(graph_renderer.h DESTINATION renderers)\n\n# make the library public\npods_install_libraries(renderers)\n\ntarget_link_libraries(renderers\n    ${GTK2_LDFLAGS}\n    ${OPENGL_LIBRARIES}\n    ${GLUT_LIBRARIES}\n    ${LCM_LDFLAGS}\n    ${BOT2_VIS_LDFLAGS})\n\n# create a pkg-config file for the library, to make it easier for other\n# software to use.\npods_install_pkg_config_file(renderers\n    CFLAGS\n    LIBS -lrenderers\n    REQUIRES bot2-core bot2-vis lcmtypes\n    VERSION 0.0.1)\n"
  },
  {
    "path": "data_generation/viewer/src/renderers/graph_renderer.cpp",
    "content": "#include \"graph_renderer.h\"\n\n#include <lcmtypes/lcmtypes.h>\n\n#include <vector>\n#include <cmath>\n#include <sstream>\n#include <fstream>\n#define RENDERER_NAME \"Graph Visualizer\"\n\nusing namespace std;\n\n\nclass RendererGraph {\npublic:\n    BotRenderer renderer;\n    BotGtkParamWidget *pw;\n    BotViewer *viewer;\n    lcm_t * lcm;\n    \n    lcmtypes_graph_t *graph_last;\n    lcmtypes_environment_t *environment_last;\n   lcmtypes_trajectory_t *trajectory_last;\n    \n    double obstacle_opacity;\n};\n\n\n        \nstatic void graph_message_handler (const lcm_recv_buf_t *rbuf, const char *channel, const lcmtypes_graph_t *msg, void *user) {\n    \n    RendererGraph *self = (RendererGraph *) user;\n    \n    if (self->graph_last) \n        lcmtypes_graph_t_destroy (self->graph_last);\n    \n    self->graph_last = lcmtypes_graph_t_copy (msg);\n    \n    bot_viewer_request_redraw (self->viewer);\n}\n\n\nstatic void environment_message_handler (const lcm_recv_buf_t *rbuf, const char *channel, const lcmtypes_environment_t *msg, void *user) {\n    cout<<\"here\"<<endl;\n    RendererGraph *self = (RendererGraph *) user;\n    \n    if (self->environment_last) \n        lcmtypes_environment_t_destroy (self->environment_last);\n    \n    self->environment_last = lcmtypes_environment_t_copy (msg);\n    \n    bot_viewer_request_redraw (self->viewer);\n}\n\nstatic void trajectory_message_handler (const lcm_recv_buf_t *rbuf, const char *channel, const lcmtypes_trajectory_t *msg, void *user) {\n\n    RendererGraph *self = (RendererGraph *) user;\n\n    if (self->trajectory_last)\n        lcmtypes_trajectory_t_destroy (self->trajectory_last);\n\n    self->trajectory_last = lcmtypes_trajectory_t_copy (msg);\n\n    bot_viewer_request_redraw (self->viewer);\n}\n\n\nstatic void renderer_graph_draw(BotViewer *viewer, BotRenderer *renderer)\n{   \n    \n    RendererGraph *self = (RendererGraph*) renderer;\n\n    \n    glEnable(GL_DEPTH_TEST);\n    glEnable (GL_BLEND);\n    glEnable (GL_RESCALE_NORMAL);\n\n    glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n    glShadeModel (GL_SMOOTH);\n    glEnable (GL_LIGHTING);\n\n    float color_goal[] = { 0.0, 0.0, 1.0,1.0};\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_goal);\n\tglPushMatrix ();\n\t\n\t// root/start state\n\t//glTranslated (self->environment_last->goal.center[0], self->environment_last->goal.center[1], self->environment_last->goal.center[2]);\n    glTranslated (0.0, -60.0, 0.0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n\n    //glScalef (self->environment_last->goal.size[0], self->environment_last->goal.size[1], self->environment_last->goal.size[2]);\n\n    bot_gl_draw_disk (0.0);\n\n    glPopMatrix ();\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_goal);\n\n\n    glPushMatrix ();\n\n\t// goal-state\n    //glTranslated (self->environment_last->goal.center[0], self->environment_last->goal.center[1], self->environment_last->goal.center[2]);\n    glTranslated (13.2117605209,-7.65872478485, 0.0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n\n\n    bot_gl_draw_disk (0.0);\n\n    glPopMatrix ();\n            \n    if (self->trajectory_last)  // if there exist a path solution\n    {\n\n\t\t//Draw generated trajectory\n\n        for (int i = 0; i < self->trajectory_last->num_states-1; i++) {\n\n           \n\t\t    glLineWidth (4.0);\n\t\t    float color_edge[] ={1.0, 0.0, 0.0, 1.0};\n\t\t    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_edge);\n\n\n\n\t\t    glBegin (GL_LINE_STRIP);\n\n\t\t    glVertex3f (self->trajectory_last->states[i].x,\n\t\t                self->trajectory_last->states[i].y,\n\t\t                self->trajectory_last->states[i].z);\n\t\t    glVertex3f (self->trajectory_last->states[i+1].x,\n\t\t                self->trajectory_last->states[i+1].y,\n\t\t                self->trajectory_last->states[i+1].z);\n\t\t    glEnd();\n         }\n              // Draw the graph\n         if (self->graph_last) {\n\n             // Draw the vertices\n\n             glPointSize(1.0);\n             float color_vertex[] = {0.1, 0.1, 0.8, 1.0};\n             glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_vertex);\n             glEnable (GL_POINT_SMOOTH);\n             glBegin (GL_POINTS);\n\n             for (int i = 0; i < self->graph_last->num_vertices; i++) {\n\n                 glVertex3f (self->graph_last->vertices[i].state.x,\n                             self->graph_last->vertices[i].state.y,\n                             self->graph_last->vertices[i].state.z);\n             }\n\n             glEnd();\n\n\n             // Draw the edges\n             for (int i = 0; i < self->graph_last->num_edges; i++) {\n\n                 glLineWidth (1.0); \n                 float color_edge[] = {0.8, 0.3, 0.3, 0.8};\n                 glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_edge);\n\n                 glBegin (GL_LINE_STRIP);\n\n                 glVertex3f (self->graph_last->edges[i].vertex_src.state.x,\n                             self->graph_last->edges[i].vertex_src.state.y,\n                             self->graph_last->edges[i].vertex_src.state.z);\n                 glVertex3f (self->graph_last->edges[i].vertex_dst.state.x,\n                             self->graph_last->edges[i].vertex_dst.state.y,\n                             self->graph_last->edges[i].vertex_dst.state.z);\n                 glEnd();\n             }\n\n           }\n    }\n\n    //Environments\n\n\n    //load obstacle_location file here                        \n\tdouble fnum[20][2];\n    ifstream in(\"obs.dat\", ios::in | ios::binary);\n    in.read((char *) &fnum, sizeof fnum);\n\n\t\n\t//load obstacle permutations here (see rrts_main for more detail on environment generation)\n    int perm[77520][7];\n    ifstream in2(\"obs_perm2.dat\", ios::in | ios::binary);\n    in2.read((char *) &perm, sizeof perm);                            \n\n\n    int i=0; //env_no\n\n\tfloat color_obstacles[] = { 0.37, 0.3, 0.3,((double)(self->obstacle_opacity))/100.0};\n\tglMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n \n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][0]][0],fnum[perm[i][0]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][1]][0],fnum[perm[i][1]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][2]][0],fnum[perm[i][2]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][3]][0],fnum[perm[i][3]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][4]][0],fnum[perm[i][4]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][5]][0],fnum[perm[i][5]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n                            \n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][6]][0],fnum[perm[i][6]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n    return;\n} \n\n\n/*Comment out above renderer_graph_draw function in order to use followinh\n\nFollowing function can be adapted to display: \n-obstacles point-cloud \n-obstacle-free space (random start-goal pairs) \n-MPNet generated paths\n-DeepSMP generated samples  \nTO DO:\n-Load MPNet generated data from files to publish\n-Load oracle (RRT*,P-RRT*) generated paths to publish\n*/\n\n/*\nstatic void renderer_graph_draw(BotViewer *viewer, BotRenderer *renderer)\n{   \n    \n    RendererGraph *self = (RendererGraph*) renderer;\n\n    \n    glEnable(GL_DEPTH_TEST);\n    glEnable (GL_BLEND);\n    glEnable (GL_RESCALE_NORMAL);\n\n    glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n    glShadeModel (GL_SMOOTH);\n    glEnable (GL_LIGHTING);\n\n    int s=1;\n    int size=50000;\n    double fnum[20][2];\n    ifstream in(\"obs.dat\", ios::in | ios::binary);\n    in.read((char *) &fnum, sizeof fnum);\n\n    int perm[77520][7];\n    ifstream in2(\"obs_perm2.dat\", ios::in | ios::binary);\n    in2.read((char *) &perm, sizeof perm);\n    \n    double obs[size][2];\n    ifstream in3(\"graph/graph50.dat\", ios::in | ios::binary);\n    in3.read((char *) &obs, sizeof obs);\n    \n\t//visualize obstacles point cloud that will be passed on to obstacle space encoder of MPNet\n    //double obs_cloud[1400][2];\n    //ifstream in4(\"obs_cloud/obc10000.dat\", ios::in | ios::binary);\n    //in4.read((char *) &obs_cloud, sizeof obs_cloud);\n    //cout<<\"point cloud\"<<endl;\n    //glPointSize(0.0);\n    //float color_vertex[] = {0.1, 0.1, 0.8, 1.0};\n    //glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_vertex);\n    //glEnable (GL_POINT_SMOOTH);\n    //glBegin (GL_POINTS);\n\n   \t//for (int i = 0; i <1400; i++) {\n\n      \t//glVertex3f (obs_cloud[i][0],obs_cloud[i][1],0.0);\n   \t//}\n\n   \t//glEnd();\n   \n\t//load MPNet generated paths here and assign it's x and y coordinates to arrays pathx and path. respectively.\n\t\t\n\tdouble pathx[]=\n\t-12.3745660782,\n\t-7.70831871033,\n\t4.93411445618,\n\t11.2,\n\t13.2117605209};\n\n\tdouble pathy[]=\n\t8.56354236603,\n\t9.5905714035,\n\t2.13003230095,\n\t0.385264575481,\n\t-7.65872478485};\n\t\n\t// load oracle paths (rrtstar/p-rrtstar/BIT*) here and assign their x and y coordinates to px and py respectively\n\n\tdouble px[]={\n\t-12.3745660782,\n\t-7.70831871033,\n\t4.93411445618,\n\t11.2,\n\t13.2117605209};\n\tdouble py[]={\n\t8.56354236603,\n\t9.5905714035,\n\t2.13003230095,\n\t0.385264575481,\n\t-7.65872478485};\n\n\n      \n    // publist MPNet paths     \n    for (int i = 0; i <sizeof(pathx)/sizeof(pathx[0])-1; i++) {\n\n           \n       \tglLineWidth (2.5);\n        float color_edge[] ={1.0, 0.0, 0.0, 1.0};\n        glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_edge);\n\t\tglPointSize(0.5);\n\t\tglBegin (GL_LINE_STRIP);\n\t\t\tglVertex3f (pathx[i],pathy[i],0.0);\n            glVertex3f (pathx[i+1],pathy[i+1],0.0);\n        glEnd();\n    }\n    //publish oracle paths for comparison     \n    for (int i = 0; i <sizeof(px)/sizeof(px[0])-1; i++) {\n\n           \n        glLineWidth (2.5);\n        float color_edge[] ={0.1, 0.1, 0.8, 1.0};\n        glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_edge);\n        glBegin (GL_LINE_STRIP);\n        \tglVertex3f (px[i],py[i],0.0);\n        \tglVertex3f (px[i+1],py[i+1],0.0);\n        glEnd();\n    }\n          \n\tint i=8; //env_no\n\n\tfloat color_obstacles[] = { 0.37, 0.3, 0.3,((double)(self->obstacle_opacity))/100.0};\n \n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][0]][0],fnum[perm[i][0]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n   \tglPushMatrix ();\n   \tglTranslated (fnum[perm[i][1]][0],fnum[perm[i][1]][1], 0);\n   \tglRotatef (0.0, 0.0, 0.0, 1.0);\n   \tglScalef (5.0,5.0,0.0);\n   \tbot_gl_draw_cube ();\n   \tglPopMatrix ();\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][2]][0],fnum[perm[i][2]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][3]][0],fnum[perm[i][3]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][4]][0],fnum[perm[i][4]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n\n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][5]][0],fnum[perm[i][5]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0,5.0,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n                            \n    glMaterialfv (GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color_obstacles);\n    glPushMatrix ();\n    glTranslated (fnum[perm[i][6]][0],fnum[perm[i][6]][1], 0);\n    glRotatef (0.0, 0.0, 0.0, 1.0);\n    glScalef (5.0/s,5.0/s,0.0);\n    bot_gl_draw_cube ();\n    glPopMatrix ();\n                   \n\n   return;\n}*/\n\n\nstatic void renderer_graph_free (BotRenderer *renderer)\n{\n    RendererGraph *self = (RendererGraph*) renderer;\n    \n    if (self->graph_last)\n        lcmtypes_graph_t_destroy (self->graph_last);\n    free(self);\n}\n\n\nvoid add_graph_renderer_to_viewer (BotViewer* viewer, int render_priority, lcm_t* lcm)\n{\n    \n    RendererGraph *self = new RendererGraph;\n    BotRenderer *renderer = &self->renderer;\n    self->lcm = lcm;\n    self->viewer = viewer;\n    \n    renderer->draw = renderer_graph_draw;\n    renderer->destroy = renderer_graph_free;\n    renderer->widget = gtk_vbox_new(FALSE, 0);\n    renderer->name = (char *) RENDERER_NAME;\n    renderer->user = self;\n    renderer->enabled = 1;\n    \n    \n    self->graph_last = NULL;\n    self->environment_last = NULL;\n    self->trajectory_last = NULL;\n    self->obstacle_opacity = 80;\n    \n    // subscribe to messages\n    lcmtypes_graph_t_subscribe (lcm, \"GRAPH\", graph_message_handler, self);\n    lcmtypes_environment_t_subscribe (lcm, \"ENVIRONMENT\", environment_message_handler, self);\n    lcmtypes_trajectory_t_subscribe (lcm, \"TRAJECTORY\", trajectory_message_handler, self);\n    bot_viewer_add_renderer(viewer, &self->renderer, render_priority);\n}\n"
  },
  {
    "path": "data_generation/viewer/src/renderers/graph_renderer.h",
    "content": "#ifndef RACECAR_ROAD_RENDERER_H_\n#define RACECAR_ROAD_RENDERER_H_\n\n#include <iostream>\n\n#include <bot_vis/bot_vis.h>\n#include <bot_core/bot_core.h>\n\n#include <gtk/gtk.h>\n#include <lcm/lcm.h>\n\n#include <GL/glut.h>\n\n#ifdef __APPLE__\n#include <OpenGL/gl.h>\n#else\n#include <GL/gl.h>\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n    \n    void add_graph_renderer_to_viewer (BotViewer* viewer, int render_priority, lcm_t* lcm);\n    \n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "data_generation/viewer/src/renderers/graph_renderer.h~",
    "content": "#ifndef RACECAR_ROAD_RENDERER_H_\n#define RACECAR_ROAD_RENDERER_H_\n\n#include <iostream>\n\n#include <bot_vis/bot_vis.h>\n#include <bot_core/bot_core.h>\n\n#include <gtk/gtk.h>\n#include <lcm/lcm.h>\n\n#include <GLUT/GLUT.h>\n\n#ifdef __APPLE__\n#include <OpenGL/gl.h>\n#else\n#include <GL/gl.h>\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n    \n    void add_graph_renderer_to_viewer (BotViewer* viewer, int render_priority, lcm_t* lcm);\n    \n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "readme",
    "content": "\ndata_generation\n-Contains C++ code to generate expert demonstrations in 2D and 3D environments\n- Follow the instructions in README file to compile c++ code.\n\n\nMPNet:\n-AE: Autoencoder codes\n-data_loader: loads expert demonstrations for training and testing\n-model: define path generator dataset\n-neuralplanner: Uses neural models to generate paths\n-Train: training code\nMPNet/AE:\n-data_loader: loads obstacles point-cloud\n-CAE: Contractive AutoEncoder\n\n\n\n\n\n\n"
  },
  {
    "path": "visualizer.py",
    "content": "import matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport struct\nimport numpy as np\nimport argparse\n\ndef main(args):\n    # visualize point cloud (obstacles)\n    obs = []\n    temp=np.fromfile(args.obs_file)\n    obs.append(temp)\n    obs = np.array(obs).astype(np.float32).reshape(-1,2)\n    plt.scatter(obs[:,0], obs[:,1], c='blue')\n\n\n\n\n    # visualize path\n    path = np.loadtxt(args.path_file)\n    print(path)\n    path = path.reshape(-1, 2)\n    path_x = []\n    path_y = []\n    for i in range(len(path)):\n        path_x.append(path[i][0])\n        path_y.append(path[i][1])\n\n    plt.plot(path_x, path_y, c='r', marker='o')\n\n    plt.show()\n\n\nparser = argparse.ArgumentParser()\n# for training\nparser.add_argument('--obs_file', type=str, default='./data/obs_cloud/obc0.dat',help='obstacle point cloud file')\nparser.add_argument('--path_file', type=str, default='./results/env_0/path_0.txt',help='path file')\nargs = parser.parse_args()\nprint(args)\nmain(args)\n"
  }
]