[
  {
    "path": ".github/workflows/pylint.yml",
    "content": "name: Pylint\n\non: [push]\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        python-version: [\"3.8\", \"3.9\", \"3.10\"]\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python ${{ matrix.python-version }}\n      uses: actions/setup-python@v3\n      with:\n        python-version: ${{ matrix.python-version }}\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install pylint\n        pip3 install -r requirements.txt\n    - name: Analysing the code with pylint\n      run: |\n        pylint $(git ls-files '*.py')\n"
  },
  {
    "path": ".github/workflows/python-app.yml",
    "content": "# This workflow will install Python dependencies, run tests and lint with a single version of Python\n# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions\n\nname: Python application\n\non:\n  push:\n    branches: [ \"main\" ]\n  pull_request:\n    branches: [ \"main\" ]\n\npermissions:\n  contents: read\n\njobs:\n  build:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v3\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install flake8 pytest\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Lint with flake8\n      run: |\n        # stop the build if there are Python syntax errors or undefined names\n        flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n        # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide\n        flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n#     - name: Test with pytest\n#       run: |\n#         pytest src/extract_image_meta_exif.py\n"
  },
  {
    "path": ".github/workflows/python-publish.yml",
    "content": "# This workflow will upload a Python Package using Twine when a release is created\n# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries\n\n# This workflow uses actions that are not certified by GitHub.\n# They are provided by a third-party and are governed by\n# separate terms of service, privacy policy, and support\n# documentation.\n\nname: Upload Python Package\n\non:\n  release:\n    types: [published]\n\npermissions:\n  contents: read\n\njobs:\n  deploy:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python\n      uses: actions/setup-python@v3\n      with:\n        python-version: '3.10'\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install build\n    - name: Build package\n      run: python -m build\n    - name: Publish package\n      uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29\n      with:\n        user: __token__\n        password: ${{ secrets.PYPI_API_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "devel/\nlogs/\nbuild/\nbin/\nlib/\nmsg_gen/\nsrv_gen/\nmsg/*Action.msg\nmsg/*ActionFeedback.msg\nmsg/*ActionGoal.msg\nmsg/*ActionResult.msg\nmsg/*Feedback.msg\nmsg/*Goal.msg\nmsg/*Result.msg\nmsg/_*.py\nbuild_isolated/\ndevel_isolated/\n\n# Generated by dynamic reconfigure\n*.cfgc\n/cfg/cpp/\n/cfg/*.py\n\n# Ignore generated docs\n*.dox\n*.wikidoc\n\n# eclipse stuff\n.project\n.cproject\n\n# qcreator stuff\nCMakeLists.txt.user\n\nsrv/_*.py\n*.pcd\n*.pyc\nqtcreator-*\n*.user\n\n/planning/cfg\n/planning/docs\n/planning/src\n\n*~\n\n# Emacs\n.#*\n\n# Catkin custom files\nCATKIN_IGNORE\ngit_stuff.txt\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"src/superglue_lib\"]\n\tpath = src/superglue_lib\n\turl = https://github.com/magicleap/SuperGluePretrainedNetwork.git\n"
  },
  {
    "path": "LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2022, TIERS\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its\n   contributors may be used to endorse or promote products derived from\n   this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "![ubuntu label](https://img.shields.io/badge/Ubuntu-20.04-brightgreen)\n[![build passing label](https://img.shields.io/badge/build-passing-brightgreen)](https://github.com/TIERS/wildnav/actions/workflows/python-app.yml)\n# WildNav: GNSS-Free Localization in the Wild\n\nCheck out our paper \"Vision-Based GNSS-Free Localization for UAVs in the Wild\" here [DOI](https://doi.org/10.1109/ICMERR56497.2022.10097798) or [UTU](https://research.utu.fi/converis/portal/detail/Publication/179738211?lang=fi_FI). \n<!-- Access a sample [dataset here](https://utufi.sharepoint.com/:f:/s/msteams_0ed7e9/EsXaX0CKlpxIpOzVnUmn8-sB4yvmsxUohqh1d8nWKD9-BA?e=gPca2s).-->\n\n###  Abstract\n\nConsidering the accelerated development of Unmanned Aerial Vehicles (UAVs) applications in both industrial and research scenarios, there is an increasing need for localizing these aerial systems in non-urban environments, using GNSS-Free, vision-based methods. Our paper proposes a vision-based localization algorithm that utilizes deep features to compute geographical coordinates of a UAV flying in the wild. The method is based on matching salient features of RGB photographs captured by the drone camera and sections of a pre-built map consisting of georeferenced open-source satellite images. Experimental results prove that vision-based localization has comparable accuracy with traditional GNSS-based methods, which serve as ground truth. Compared to state-of-the-art Visual Odometry (VO) approaches, our solution is designed for long-distance, high-altitude UAV flights.\n\n### Overview\n\nThe main advantage of Wildnav is its capability of matching drone images (left) with georeferenced satellite images (right). The aim is for this to work for drones flying in non-urban areas, in environments with sparse features.\n\n<div align=center>\n<img src=\"assets/overview/project_overview.png\" width=\"800px\">\n<p align=\"center\">Vision-based localization algorithm overview </p>\n</div>\n\n<div align=center>\n<img src=\"assets/overview/good_match_1.png\" width=\"800px\">\n<p align=\"center\">Robust against rotation </p>\n</div>\n\n<div align=center>\n<img src=\"assets/overview/good_match_2.png\" width=\"800px\">\n<p align=\"center\"> Able to visually identify the drone pose even in environments with sparse feature </p>\n</div>\n\n<div align=center>\n<img src=\"assets/overview/good_match_3.png\" width=\"800px\">\n<p align=\"center\"> The drone image (left) can be taken from a very different perspective, compared to the matched satellite image(right) </p>\n</div>\n\n<div align=center>\n<img src=\"assets/overview/good_match_4.png\" width=\"800px\">\n<p align=\"center\"> Successful visual-based localization in non-urban areas </p>\n</div>\n\n\n\n## Run it yourself\n\nThe algorithm was tested on Ubuntu 20.04 with Python 3.10. Nevertheless, it should work with other versions as well.\n\n\n   0. **(Highly recommended)** Create a new python3 virtual environment\n      ```\n      python3 -m venv env \n      ```\n      Activate it\n      ```\n      source env/bin/activate\n      ```\n   1. Clone the repo\n      ```\n      git clone git@github.com:TIERS/wildnav.git\n      ```\n   3. Install superglue dependencies:\n      ```\n      cd wildnav\n      git submodule update --init --recursive\n      ```\n   3. Install python dependencies\n      ```\n      pip3 install -r requirements.txt\n      ```\n   4. Run the localization script (see below to add a dataset, but you can try with a few samples already in the repo)\n      ```\n      cd src\n      python3 wildnav.py\n      ```\n\n\n## Add your own drone image dataset\n\nBefore running the code, you need to have a dataset, reference images (map) \n\n   1. Add your drone photos to ```assets/query```. Feel free to use our dataset from [here](https://utufi.sharepoint.com/:f:/s/msteams_0ed7e9/EsXaX0CKlpxIpOzVnUmn8-sB4yvmsxUohqh1d8nWKD9-BA?e=gPca2s).\n\n   2. Add your satellite map images to ```assets/map``` together with a csv file containing geodata for the images (see ```assets/map/map.csv```) \n\n   3. Run python script to generate csv file containing photo metadata with GNSS coordinates\n      ```\n      python3 extract_image_meta_exif.py\n      ```\n   4. Run wildnav algorithm\n      ```\n      python3 wildnav.py\n      ```\n   \n## Common problems and fixes\n\n1. Runtime error due to incompatible version of ```torch``` installed\nError message: \"NVIDIA GeForce RTX 3070 with CUDA capability sm_86 is not compatible with the current PyTorch installation. The current PyTorch install supports CUDA capabilities sm_37 sm_50 sm_60 sm_70.If you want to use the NVIDIA GeForce RTX 3070 GPU with PyTorch.\"\n\n**Fix:**\n\nUninstall current torch installation:\n```\npip3 uninstall torch\n```\nFollow instructions on the official [pytorch](https://pytorch.org/get-started/locally/) website to install the right version of torch for your system (it depends on your graphics card and CUDA version).\n\n2. No dedicated GPU available on your system.\n\n**Fix:**\n\n\nThe algorithm can run, albeit much slower, on CPU. Simply change ```force_cpu``` flag in ```src/superglue_utils.py``` to ```True```.\n\n**NOTE**: If you encounter any problems which are not listed here, please open a new issue in this repository. We will try to fix it as soon as possible.\n\n## Datasets\n\nPhotographs used for experimental validation of the algorithm can be found [here](https://utufi.sharepoint.com/:f:/s/msteams_0ed7e9/EsXaX0CKlpxIpOzVnUmn8-sB4yvmsxUohqh1d8nWKD9-BA?e=gPca2s).\n\n<div align=center>\n<img src=\"assets/overview/experiments_flight_area.png\" width=\"800px\">\n<p align=\"center\">Satellite view of the flight zone (highlighted rectangle). The yellow pin is\nlocated at 60.403091° latitude and 22.461824° longitude </p>\n</div>\n\n\n## Results\n\n<div align=center>\n\n|           \t| Total \t| Localized \t| MAE (m) \t|\n|:---------:\t|:-----:\t|:---------:\t|:-------:\t|\n| Dataset 1 \t|  124  \t|  77 (62%) \t|  15.82  \t|\n| Dataset 2 \t|   78  \t|  44 (56%) \t|  26.58  \t|\n\n</div>\n\n\n\n<div align=center>\n<img src=\"assets/overview/dataset_1_abs_coord.png\" width=\"800px\">\n<p align=\"center\">Dataset 1 absolute coordinates of localized photographs </p>\n</div>\n\n<div align=center>\n<img src=\"assets/overview/dataset_1_error.png\" width=\"800px\">\n<p align=\"center\">Dataset 1 localization error </p>\n</div>\n\n<div align=center>\n<img src=\"assets/overview/dataset_2_abs_coord.png\" width=\"800px\">\n<p align=\"center\">Dataset 2 absolute coordinates of localized photographs</p>\n</div>\n\n<div align=center>\n<img src=\"assets/overview/dataset_2_error.png\" width=\"800px\">\n<p align=\"center\">Dataset 2 localization error </p>\n</div>\n\n<div align=center>\n<img src=\"assets/overview/error_comparison.png\" width=\"800px\">\n<p align=\"center\">Error comparison </p>\n</div>\n\n## Contact\nFeel free to send me an email at mmgurg@utu.fi if you have any questions about the project.\n"
  },
  {
    "path": "assets/map/map.csv",
    "content": "Filename, Top_left_lat,Top_left_lon,Bottom_right_lat,Bottom_right_long\n\"sat_map_00.png\",   60.403962,    22.460441,   60.402409,    22.464059\n\"sat_map_01.png\",  60.403963,  22.464054,  60.402409,  22.467672\n\"sat_map_02.png\",  60.402410,  22.460440,  60.400857,  22.464058\n\"sat_map_03.png\",  60.402412,  22.464056,  60.400859,  22.467674\n\"sat_map_04.png\",  60.402412,  22.467673,  60.400859,  22.471291\n\"sat_map_05.png\",  60.403962,  22.467672,  60.402408,  22.471290\n\"sat_map_06.png\",  60.405516,  22.460443,  60.403962,  22.464061\n\"sat_map_07.png\",  60.405514,  22.464053,  60.403961,  22.467671\n\"sat_map_08.png\",  60.405512,  22.467667,  60.403959,  22.471285\n\"sat_map_09.png\",  60.407068,  22.460446,  60.405515,  22.464064\n\"sat_map_10.png\",  60.407067,  22.464064,  60.405514,  22.467682\n\"sat_map_11.png\",  60.407064,  22.467667,  60.405511,  22.471285\n\"sat_map_12.png\",  60.408617,  22.460445,  60.407064,  22.464063\n\"sat_map_13.png\",  60.408619,  22.464056,  60.407066,  22.467674\n\"sat_map_14.png\",  60.408616,  22.467670,  60.407063,  22.471288"
  },
  {
    "path": "assets/query/photo_metadata.csv",
    "content": "Filename,Latitude,Longitude,Altitude,Gimball_Roll,Gimball_Yaw,Gimball_Pitch,Flight_Roll,Flight_Yaw,Flight_Pitch\ndrone_image_1.jpg,60.403241666666666,22.462133333333334,+119.98,+0.00,-4.70,-90.30,-8.50,+1.60,-11.00\ndrone_image_2.jpg,60.40225833333333,22.465155555555555,+120.01,+0.00,-92.60,-90.30,-1.00,-86.80,-10.90"
  },
  {
    "path": "requirements.txt",
    "content": "haversine==2.5.1\nmatplotlib>=3.6.1\nnumpy>=1.23.3\nopencv_python==4.5.5.64\npandas==1.4.2\nrequests>=2.27.1\nscikit_learn==1.1.2\nseaborn>=0.11.2\ntorch==1.12.1\n"
  },
  {
    "path": "src/build_map.py",
    "content": "\"\"\"Module used for automatically building satellite maps with Google Maps API\"\"\"\nimport csv\nimport math\nimport shutil\nimport requests\n\n\n\n############################################################################################\n# Satellite map building script\n# Requires access to Google Static Maps API,\n#   see https://developers.google.com/maps/documentation/maps-static/overview\n# It will not work with current API_KEY, you need to get your own\n############################################################################################\n\n# Path to the folder where the map will be saved\nMAP_PATH = \"../assets/maps/map_2/\"\nclass FlightZone:\n    \"\"\"A rectangle shaped flight area defined by 2 points (latitudes and longitudes)\"\"\"\n    def __init__(self, top_left_lat, top_left_lon,\\\n         bottom_right_lat, bottom_right_lon, filename = \"default\"):\n        self.filename = filename\n        self.top_left_lat = top_left_lat\n        self.top_left_lon = top_left_lon\n        self.bottom_right_lat = bottom_right_lat\n        self.bottom_right_lon = bottom_right_lon\n\n    def __str__(self):\n        return f\"{self.__class__.__name__}; \\n{self.top_left_lat}: %f \\nt{self.top_left_lon}: \\\n            \\n{self.bottom_right_lat}: %f \\n{self.bottom_right_lon} %f\" \nclass PatchSize:\n    \"\"\"Size of a map patch in decimal latitude and longitude\"\"\"\n    def __init__(self, lat, lon):\n        self.lat = lat\n        self.lon = lon\n\n    def __str__(self):\n        return f\"{self.__class__.__name__}; \\n{self.lat}: %f \\n{self.lon}\"\n\ndef csv_write_image_location():\n    \"\"\"Writes a csv file with the geographical location of the downloaded satellite images\"\"\"\n    header = [\"Filename, Top_left_lat,Top_left_lon,Bottom_right_lat,Bottom_right_long\"]\n    file_path = MAP_PATH + 'map.csv'\n    with open(file_path, 'w', encoding='UTF8') as file:\n        writer = csv.writer(file)\n        writer.writerow(header)\n        for photo in photo_list:\n            line = [photo.filename, str(photo.top_left_lat), str(photo.top_left_lon), \\\n                   str(photo.bottom_right_lat), str(photo.bottom_right_lon)]\n            writer.writerow(line)\n\n# These 2 variables determine the number of images that form the map\n\n#define as a pair of coordinates determining a rectangle in which the satellite photos will be taken\nflight_zone = FlightZone(60.408615, 22.460445, 60.400855, 22.471289)\n\n#define as height (latitude) and width (longitude) of the patch to be taken\npatch_size = PatchSize(0.001676, 0.00341)\n\n# Number of satellite patches needed to build the map\nwidth = math.floor((flight_zone.bottom_right_lon -  flight_zone.top_left_lon) / patch_size.lon)\nheight = math.floor((flight_zone.top_left_lat - flight_zone.bottom_right_lat) / patch_size.lat)\n\ntotal = width * height\nprint(f\"The script will download : {total} images. Do you want to continue? [Y/N]\")\nanswer = input()\n\nwhile answer not in ('Y', 'N', 'y', 'n'):\n    print(\"Please answer with Y or N\")\n    answer = input()\n\nif answer in ('Y', 'y'):\n    print(\"Downloading images...\")\nelif answer in ('N', 'n'):\n    print(\"Exiting...\")\n    exit(1)\n\ncurrent_center = PatchSize(flight_zone.top_left_lat - patch_size.lat / 2, \\\n     flight_zone.top_left_lon + patch_size.lon / 2)\ncurrent_top_left = PatchSize(flight_zone.top_left_lat, flight_zone.top_left_lon)\ncurrent_bottom_right = PatchSize(current_top_left.lat - patch_size.lat, \\\n     current_top_left.lon + patch_size.lon)\n\ncenter = str(current_center.lat) + \",\" + str(current_center.lon)\nzoom = \"18\" # optimized for the perspective of a wide angle camera at 120 m altitude\nsize = \"640x640\"  # maximum allowed size\nmaptype = \"satellite\"\nscale = \"2\" # maximum allowed scale\n#restricted by IP address, so you will have to generate your own\nAPI_KEY = \"AIzaSyAclBCbWo0rwQIaezGcXM6X3S_Otv-hHOQ\"\n\nURL = \"https://maps.googleapis.com/maps/api/staticmap?\" # base URL for the Google Maps API\n\n#defining a params dict for the parameters to be sent to the API\nPARAMS = {'center':center,\n          'zoom':zoom,\n          'size':size,\n          'scale':scale,\n          'maptype':maptype,\n          'key':API_KEY\n        }\n\nphoto_list = [] # list of all the photos that will be downloaded\nindex = 0 # index of the current image\n\n# build map by downloading and stitching together satellite images\nfor i in range(0, height):\n    for j in range(0, width):\n        photo_name = 'sat_patch' + '_' + f\"{index:04d}\"+ '.png'\n\n        #send GET request to the API which returns a satellite image upon success\n        r = requests.get(url = URL, params = PARAMS, stream=True, timeout=10)\n\n\n        # if request successful, write image to file\n        if r.status_code == 200:\n            with open(MAP_PATH + photo_name, 'wb') as out_file:\n                shutil.copyfileobj(r.raw, out_file)\n                print(\"image \"+ str(index) + \" downloaded\")\n        else:\n            print(\"Error \" + r.status_code + \" downloading image \" + str(index))\n        \n        current_patch = FlightZone(current_top_left.lat, current_top_left.lon, \\\n             current_bottom_right.lat, current_bottom_right.lon, photo_name)\n        photo_list.append(current_patch)\n        index += 1\n        current_center.lon = current_center.lon + patch_size.lon\n        current_top_left.lon += patch_size.lon\n        current_bottom_right.lon += patch_size.lon\n        PARAMS['center'] = str(current_center.lat) + \",\" + str(current_center.lon)\n    current_top_left.lat -= patch_size.lat\n    current_top_left.lon = flight_zone.top_left_lon\n    current_bottom_right.lat -= patch_size.lat\n    current_bottom_right.lon = flight_zone.top_left_lon + patch_size.lon\n    current_center.lat = current_center.lat - patch_size.lat\n    current_center.lon = flight_zone.top_left_lon + patch_size.lon / 2\n    PARAMS['center'] = str(current_center.lat) + \",\" + str(current_center.lon)\n\nprint(\"Map images downloaded, writing csv file\")\ncsv_write_image_location()\nprint(\"Map built successfully, check the map folder: \" + MAP_PATH)\n"
  },
  {
    "path": "src/extract_image_meta_exif.py",
    "content": "\"\"\"Script that reads the EXIF data from the drone images and extracts the GNSS coordinates to a csv file\"\"\"\nimport subprocess\nimport re\nimport os\n\n############################################################################################################\n# Requires exiftool to be installed: https://exiftool.org/\n# You might have to modify the scripts to match the EXIF metadata of your drone photos \n# Use https://www.metadata2go.com/ to easily check the metadata of your images\n############################################################################################################\n\ncsv_filename = 'photo_metadata.csv' # csv file with drone image metadata containing GNSS location\nphoto_folder = '../assets/query/' # folder with drone images\n \ncsv_filename = photo_folder + csv_filename\n\ndef convert_gnss_coord(lat_or_lon):\n    \"\"\"\n    Convert GNSS coordinate to decimal degrees instead of minutes and seconds\n    \"\"\"\n    deg, deg_string, minutes,discard_1,  seconds, discard_2, direction =  re.split('[\\s°\\'\"]', lat_or_lon)\n    converted = (float(deg) + float(minutes)/60 + float(seconds)/(60*60)) * (-1 if direction in ['W', 'S'] else 1)\n    return str(converted)\n\ndef load_images_from_folder(folder):\n    images_list = []\n    print(\"Loading images. Please wait...\")\n    for filename in os.listdir(folder):\n        if filename.endswith((\".JPG\", \".jpg\", \".png\")):\n            images_list.append(filename)\n    print(\"Done loading \" +  str(len(images_list)) + \" images.\")\n\n    images_list.sort()\n    return images_list\n\nimages_list = load_images_from_folder(photo_folder)\n\nf = open(csv_filename, \"a\")\n\nfilesize = os.path.getsize(csv_filename)\nif filesize == 0:\n    f.write(\"Filename,Latitude,Longitude,Altitude,Gimball_Roll,Gimball_Yaw,Gimball_Pitch,Flight_Roll,Flight_Yaw,Flight_Pitch\")\n\nprint(\"Reading metadata. Please wait...\")\nfor image in images_list:\n    infoDict = {} #Creating the dict to get the metadata tags\n    exifToolPath = 'exiftool'\n    imgPath = photo_folder + image\n    process = subprocess.Popen([exifToolPath,imgPath],stdout=subprocess.PIPE, stderr=subprocess.STDOUT,universal_newlines=True) \n    #get the tags in dict \n    for tag in process.stdout:\n        line = tag.strip().split(':')\n        infoDict[line[0].strip()] = line[-1].strip()\n\n    #Default values\n    altitude = \"NaN\"\n    gimball_roll = \"NaN\"\n    gimball_yaw = \"NaN\"\n    gimball_pitch = \"NaN\"\n    flight_roll = \"NaN\"\n    flight_yaw = \"NaN\"\n    flight_pitch = \"NaN\"\n\n    if 'File Name' in infoDict:\n        filename = infoDict['File Name']\n    if 'GPS Latitude' in infoDict:\n        lat = infoDict['GPS Latitude']\n    if 'GPS Longitude' in infoDict:\n        lon = infoDict['GPS Longitude']\n    if 'Relative Altitude' in infoDict:\n        altitude = infoDict['Relative Altitude']\n    elif 'GPS Altitude' in infoDict:\n        altitude = infoDict['GPS Altitude']\n        altitude =  re.search(r'\\d+', altitude).group() #keep only the number value, discard text\n\n    if 'Gimbal Roll Degree' in infoDict:\n        gimball_roll = infoDict['Gimbal Roll Degree']\n    if 'Gimbal Yaw Degree' in infoDict:\n        gimball_yaw = infoDict['Gimbal Yaw Degree']\n    if 'Gimbal Pitch Degree' in infoDict:\n        gimball_pitch = infoDict['Gimbal Pitch Degree']\n    if 'Flight Roll Degree' in infoDict:\n        flight_roll = infoDict['Flight Roll Degree']\n    if 'Flight Yaw Degree' in infoDict:\n        flight_yaw = infoDict['Flight Yaw Degree']\n    if 'Flight Pitch Degree' in infoDict:    \n        flight_pitch = infoDict['Flight Pitch Degree']\n\n    lat = convert_gnss_coord(lat)\n    lon = convert_gnss_coord(lon)\n\n    f.write('\\n' + filename + ',' + lat + ',' + lon + ',' + altitude + ',' + gimball_roll + ',' + gimball_yaw + ',' + gimball_pitch + ',' + flight_roll + ','  + flight_yaw + ','+ flight_pitch)\n\nprint(\"Done reading metadata. Metadata saved to \" + csv_filename)\n\nf.close()\n\n\n"
  },
  {
    "path": "src/plot_data.py",
    "content": "\"\"\"Plots data output from wildnav\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import rcParams\nfrom sklearn.metrics import mean_squared_error \nfrom sklearn.metrics import mean_absolute_error\n\n############################################################################\n# Script what plots ground truth coordinates and calculated coordinates \n############################################################################\n\ndef mse(actual, predicted):\n    return np.square(np.subtract(np.array(actual), np.array(predicted))).mean()\n\ndef dif(actual, predicted):\n    return np.subtract(np.array(actual), np.array(predicted))\n\n\nsns.set_style(\"whitegrid\")\nsns.set(font_scale = 2)\nplt.rcParams['font.sans-serif'] = ['Arial']\nplt.rcParams['axes.edgecolor'] = 'white'\nplt.rcParams['axes.unicode_minus'] = False\nfilename = \"calculated_coordinates_real_data_1.csv\"\n\n\n\ndata = pd.read_csv(filename)\ntotal_images = data.shape[0]\ndata = data.loc[data['Calculated_Latitude'] != -1]\ndata = data.loc[data['Meters_Error'] < 50]\n\nlocated_images = data.shape[0]\n\ndataset_list = []\nfor i in range(0,data.shape[0]):\n    dataset_list.append(\"Dataset 1\")\n\ndata['Dataset'] = dataset_list\n\npd_zeros = pd.DataFrame(np.zeros(data.shape[0]))\n \nprint(\"Total images \", total_images) \nprint(\"Located images \", located_images)\nprint(\"Located images %\", located_images *100 / total_images)\n\n# Using DataFrame.mean() method to get column average\ndf2 = data[\"Meters_Error\"].mean()\nprint(data['Meters_Error'])\nprint(\"Mean meter error:\", df2)\nprint(\"RMS meter error:\", mse(pd_zeros, data['Meters_Error']))\nmse_sci_kit = mean_squared_error(pd_zeros, data['Meters_Error'], squared=False)\nmae_sci_kit = mean_absolute_error(pd_zeros, data['Meters_Error'], )\nprint(\"RMS meter error Sci_kit: \", mse_sci_kit)\nprint(\"MAE meter error Sci_kit: \", mae_sci_kit)\n\nprint(\"Filename:\", filename)\nprint(\"RMS Latitude:\", mse(data['Latitude'], data['Calculated_Latitude']))\n\nprint(\"RMS Longitude:\", mse(data['Longitude'], data['Calculated_Longitude']))\n\n\nprint(data)\n\n\nfig=plt.figure(figsize=(15,8))\nax=plt.gca()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.patch.set_facecolor('lightgray')\nax.patch.set_alpha(0.4)\nsns.lineplot(data = data, x = data.index , y = \"Meters_Error\", marker=\"o\", color='r')\nax.set_xlabel(\"Photograph Index\")\nax.set_ylabel(\"Localization Error [m]\")\n\nplt.savefig(\"mean_error_dataset_1.png\")\nplt.savefig(\"mean_error_dataset_1.pdf\")\n\n\nplt.show()\n\n\n\nprint(\"RMS Latitude:\", mse(data['Latitude'], data['Calculated_Latitude']))\n\nprint(\"RMS Longitude:\", mse(data['Longitude'], data['Calculated_Longitude']))\n\nerror = dif(data['Latitude'], data['Calculated_Latitude'])\nprint(error)\n\n\n\n\n\n\nfilename = \"calculated_coordinates_real_data_2.csv\"\n\n\n\ndata_2 = pd.read_csv(filename)\ntotal_images = data_2.shape[0]\ndata_2 = data_2.loc[data_2['Calculated_Latitude'] != -1]\ndata_2 = data_2.loc[data_2['Meters_Error'] < 50]\n\nlocated_images = data_2.shape[0]\n\ndataset_list = []\nfor i in range(0,data_2.shape[0]):\n    dataset_list.append(\"Dataset 2\")\n\ndata_2['Dataset'] = dataset_list\n\nconcatenated = pd.concat([data, data_2])\n\nfig=plt.figure(figsize=(15,8))\nax=plt.gca()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.patch.set_facecolor('lightgray')\nax.patch.set_alpha(0.4)\nsns.boxplot(x = \"Dataset\",y=\"Meters_Error\", data=concatenated)\n#ax.set_xlabel(\"Photograph Index\")\n#ax.set_ylabel(\"Localization Error [m]\")\n\nplt.savefig(\"boxplot_error_dataset_1.png\")\nplt.savefig(\"boxplot_error_dataset_1.pdf\")\n\n\nplt.show()\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "src/superglue_utils.py",
    "content": "from pathlib import Path\nimport cv2\nimport matplotlib.cm as cm\nimport torch\nimport numpy as np\n\n\nfrom superglue_lib.models.matching import Matching\nfrom superglue_lib.models.utils import (AverageTimer, VideoStreamer,\n                          make_matching_plot_fast, frame2tensor)\n\ntorch.set_grad_enabled(False)\n\n\ndef match_image(): \n    \"\"\"\n    Wrapper function for matching two images, provides an interface to superglue model\n    \"\"\"\n    center = None\n    input = '../assets/map/'\n    output_dir = \"../results\"\n    image_glob = ['*.png', '*.jpg', '*.jpeg', '*.JPG']\n    skip = 1\n    max_length = 1000000\n\n\n    # Important parameters to modify if you wish to improve the feature matching performance. \n    resize = [800] # Resize the image to this size before processing. Set to None to disable resizing.\n    superglue = 'outdoor' # The SuperGlue model to use. Either 'indoor' or 'outdoor'.\n    max_keypoints = -1 # -1 keep all keypoints  \n    keypoint_threshold = 0.01 # Remove keypoints with low confidence. Set to -1 to keep all keypoints.\n    nms_radius = 4 # Non-maxima suppression: keypoints with similar responses in a small neighborhood are removed.\n    sinkhorn_iterations = 20 # Number of Sinkhorn iterations for matching.\n    match_threshold = 0.5 # Remove matches with low confidence. Set to -1 to keep all matches.\n    show_keypoints = True # Show the detected keypoints.\n    no_display = False\n    force_cpu = False # Force CPU mode. It is significantly slower, but allows the model to run on systems withou dedicated GPU.\n    \n   \n    if len(resize) == 2 and resize[1] == -1:\n        resize = resize[0:1]\n    if len(resize) == 2:\n        print('Will resize to {}x{} (WxH)'.format(\n            resize[0], resize[1]))\n    elif len(resize) == 1 and resize[0] > 0:\n        print('Will resize max dimension to {}'.format(resize[0]))\n    elif len(resize) == 1:\n        print('Will not resize images')\n    else:\n        raise ValueError('Cannot specify more than two integers for --resize')\n\n    \n    device = 'cuda' if torch.cuda.is_available() and not force_cpu else 'cpu'\n    print('Running inference on device \\\"{}\\\"'.format(device))\n    config = {\n        'superpoint': {\n            'nms_radius': nms_radius,\n            'keypoint_threshold': keypoint_threshold,\n            'max_keypoints': max_keypoints\n        },\n        'superglue': {\n            'weights': superglue,\n            'sinkhorn_iterations': sinkhorn_iterations,\n            'match_threshold': match_threshold,\n        }\n    }\n    matching = Matching(config).eval().to(device)\n    keys = ['keypoints', 'scores', 'descriptors']\n\n    vs = VideoStreamer(input, resize, skip,\n                       image_glob, max_length)\n    frame, ret = vs.next_frame()\n    assert ret, 'Error when reading the first frame (try different --input?)'\n\n    frame_tensor = frame2tensor(frame, device)\n    last_data = matching.superpoint({'image': frame_tensor})\n    last_data = {k+'0': last_data[k] for k in keys}\n    last_data['image0'] = frame_tensor\n    last_frame = frame\n    last_image_id = 0\n\n    if output_dir is not None:\n        print('==> Will write outputs to {}'.format(output_dir))\n        Path(output_dir).mkdir(exist_ok=True)\n\n    # Create a window to display the demo.\n    if not no_display:\n        cv2.namedWindow('SuperGlue matches', cv2.WINDOW_NORMAL)\n        cv2.resizeWindow('SuperGlue matches', 640*2*2, 480*2)\n    else:\n        print('Skipping visualization, will not show a GUI.')\n\n    timer = AverageTimer()\n\n    satellite_map_index = None # index of the satellite photo in the map where the best match was found\n    index = 0 # index of the sattelite photo if a match was found\n    max_matches = -1 # max number of matches, used to keep track of the best match\n    MATCHED = False # flag to indicate if a match was found\n    located_image = None # the image of the satellite photo where the best match was found\n    features_mean = [0,0] # mean values of feature pixel coordinates \n\n    while True:\n        \n        #current sattelite image to be matched\n        frame, ret = vs.next_frame()\n        if not ret:\n            print('Finished demo_superglue.py')\n            break\n        timer.update('data')\n        stem0, stem1 = last_image_id, vs.i - 1\n\n\n        \n\n        frame_tensor = frame2tensor(frame, device)\n        pred = matching({**last_data, 'image1': frame_tensor})\n        kpts0 = last_data['keypoints0'][0].cpu().numpy()\n        kpts1 = pred['keypoints1'][0].cpu().numpy()\n        matches = pred['matches0'][0].cpu().numpy()\n        confidence = pred['matching_scores0'][0].cpu().numpy()\n        timer.update('forward')\n\n        valid = matches > -1\n        mkpts0 = kpts0[valid]\n        mkpts1 = kpts1[matches[valid]]\n\n        \"\"\"\n        Find image in sattelite map with findHomography        \n        \"\"\"\n        #At least 4 matched features are needed to compute homography\n        MATCHED = False\n        print(\"Matches found:\", len(mkpts1))\n        if (len(mkpts1) >= 4): \n            perspective_tranform_error = False           \n            M, mask = cv2.findHomography(mkpts0, mkpts1, cv2.RANSAC,5.0)\n            h,w = last_frame.shape\n            pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n            try: \n                dst = cv2.perspectiveTransform(pts,M)\n            except:\n                print(\"Perspective transform error. Abort matching.\")\n                perspective_tranform_error = True    \n\n            if (len(mkpts1) > max_matches) and not perspective_tranform_error: \n                frame = cv2.polylines(frame,[np.int32(dst)],True,255,3, cv2.LINE_AA) \n                moments = cv2.moments(dst)\n                cX = int(moments[\"m10\"] / moments[\"m00\"])\n                cY = int(moments[\"m01\"] / moments[\"m00\"])\n                center = (cX  ,cY) #shape[0] is Y coord, shape[1] is X coord\n                #use ratio here instead of pixels because image is reshaped in superglue\n                features_mean = np.mean(mkpts0, axis = 0)\n\n                #Draw the center of the area which has been matched\n                cv2.circle(frame, center, radius = 10, color = (255, 0, 255), thickness = 5)\n                cv2.circle(last_frame, (int(features_mean[0]), int(features_mean[1])), radius = 10, color = (255, 0, 0), thickness = 2)\n                center = (cX / frame.shape[1] ,cY /frame.shape[0] )\n                satellite_map_index = index\n                max_matches = len(mkpts1)\n                MATCHED = True\n\n        else:\n            print(\"Photos were NOT matched\")\n      \n        color = cm.jet(confidence[valid])\n        k_thresh = matching.superpoint.config['keypoint_threshold']\n        m_thresh = matching.superglue.config['match_threshold']\n        small_text = [\n            'Keypoint Threshold: {:.4f}'.format(k_thresh),\n            'Match Threshold: {:.2f}'.format(m_thresh),\n            'Image Pair: {:06}:{:06}'.format(stem0, stem1),\n        ]\n     \n        out = make_matching_plot_fast(\n            last_frame, frame, kpts0, kpts1, mkpts0, mkpts1, color, text='',\n            path=None, show_keypoints=show_keypoints, small_text='')\n\n        if MATCHED == True:\n            located_image = out\n        \n\n        if not no_display:\n            cv2.imshow('SuperGlue matches', out)\n            key = chr(cv2.waitKey(1) & 0xFF)\n            if key == 'q':\n                vs.cleanup()\n                print('Exiting (via q) demo_superglue.py')\n                break\n            elif key == 'n':  # set the current frame as anchor\n                last_data = {k+'0': pred[k+'1'] for k in keys}\n                last_data['image0'] = frame_tensor\n                last_frame = frame\n                last_image_id = (vs.i - 1)\n            elif key in ['e', 'r']:\n                # Increase/decrease keypoint threshold by 10% each keypress.\n                d = 0.1 * (-1 if key == 'e' else 1)\n                matching.superpoint.config['keypoint_threshold'] = min(max(\n                    0.0001, matching.superpoint.config['keypoint_threshold']*(1+d)), 1)\n                print('\\nChanged the keypoint threshold to {:.4f}'.format(\n                    matching.superpoint.config['keypoint_threshold']))\n            elif key in ['d', 'f']:\n                # Increase/decrease match threshold by 0.05 each keypress.\n                d = 0.05 * (-1 if key == 'd' else 1)\n                matching.superglue.config['match_threshold'] = min(max(\n                    0.05, matching.superglue.config['match_threshold']+d), .95)\n                print('\\nChanged the match threshold to {:.2f}'.format(\n                    matching.superglue.config['match_threshold']))\n            elif key == 'k':\n                show_keypoints = not show_keypoints\n\n        timer.update('viz')\n        timer.print()  \n        index += 1  \n\n        if output_dir is not None:\n            #stem = 'matches_{:06}_{:06}'.format(last_image_id, vs.i-1)\n            stem = 'matches_{:06}_{:06}'.format(stem0, stem1)\n            out_file = str(Path(output_dir, stem + '.png'))\n            print('\\nWriting image to {}'.format(out_file))\n            cv2.imwrite(out_file, out)\n\n\n    cv2.destroyAllWindows()\n    vs.cleanup()\n    \n    return satellite_map_index, center, located_image, features_mean, last_frame, max_matches\n    \n\n"
  },
  {
    "path": "src/wildnav.py",
    "content": "\"\"\"Core module. Contains the main functions for the project.\"\"\"\nimport csv\nimport cv2\nimport haversine as hs\nfrom haversine import Unit\nimport superglue_utils\n\n############################################################################################################\n# Important variables\n############################################################################################################\n\nmap_path = \"../assets/map/\"\nmap_filename = \"../assets/map/map.csv\" #  csv file with the sattelite geo tagged images\ndrone_photos_filename = \"../assets/query/photo_metadata.csv\" # csv file with the geo tagged drone images;\n                                                             # the geo coordinates are only used to compare\n                                                             # the calculated coordinates with the real ones\n                                                             # after the feature matching\n\n############################################################################################################\n# Class definitios\n############################################################################################################\nclass GeoPhotoDrone:\n    \"\"\"Stores a drone photo together with GNSS location\n    and camera rotation parameters\n    \"\"\"\n    def __init__(self,filename, photo=0, latitude=0, longitude = 0 ,\\\n         altitude = 0 ,gimball_roll = 0, gimball_yaw = 0, gimball_pitch = 0, flight_roll = 0, flight_yaw = 0, flight_pitch = 0):\n        self.filename = filename\n        self.photo = photo\n        self.latitude = latitude\n        self.longitude = longitude\n        self.latitude_calculated = -1\n        self.longitude_calculated = -1\n        self.altitude = altitude\n        self.gimball_roll = gimball_roll\n        self.gimball_yaw = gimball_yaw\n        self.gimball_pitch = gimball_pitch\n        self.flight_roll = flight_roll\n        self.flight_yaw = flight_yaw\n        self.flight_pitch = flight_pitch\n        self.corrected = False\n        self.matched = False\n\n    def __str__(self):\n        return \"%s; \\nlatitude: %f \\nlongitude: %f \\naltitude: %f \\ngimball_roll: %f \\ngimball_yaw: %f \\ngimball_pitch: %f \\nflight_roll: %f \\nflight_yaw: %f \\nflight_pitch: %f\" % (self.filename, self.latitude, self.longitude, self.altitude, self.gimball_roll, self.gimball_yaw, self.gimball_pitch, self.flight_roll, self.flight_yaw, self.flight_pitch )\n        \nclass GeoPhoto:\n    \"\"\"Stores a satellite photo together with (latitude, longitude) for top_left and bottom_right_corner\n    \"\"\"\n    def __init__(self, filename, photo, geo_top_left, geo_bottom_right):\n        self.filename = filename\n        self.photo = photo\n        self.top_left_coord = geo_top_left\n        self.bottom_right_coord = geo_bottom_right\n\n    def __lt__(self, other):\n         return self.filename < other.filename\n\n    def __str__(self):\n        return \"%s; \\n\\ttop_left_latitude: %f \\n\\ttop_left_lon: %f \\n\\tbottom_right_lat: %f \\n\\tbottom_right_lon %f \" % (self.filename, self.top_left_coord[0], self.top_left_coord[1], self.bottom_right_coord[0], self.bottom_right_coord[1])\n\n\n############################################################################################################\n# Functions for data writing and reading csv files\n############################################################################################################\ndef csv_read_drone_images(filename):\n    \"\"\"Builds a list with drone geo tagged photos by reading a csv file with this format:\n    Filename, Top_left_lat,Top_left_lon,Bottom_right_lat,Bottom_right_long\n    \"photo_name.png\",60.506787,22.311631,60.501037,22.324467\n    \"\"\"\n    geo_list_drone = []\n    photo_path = \"../assets/query/\"\n    with open(filename) as csv_file:\n        csv_reader = csv.reader(csv_file, delimiter=',')\n        line_count = 0\n        for row in csv_reader:\n            if line_count == 0:\n                print(f'Column names are {\", \".join(row)}')\n                line_count += 1\n            else:                \n                #img = cv2.imread(photo_path + row[0],0)\n                geo_photo = GeoPhotoDrone(photo_path + row[0], 0, float(row[1]), float(row[2]), float(row[3]), float(row[4]), float(row[5]), float(row[6]), float(row[7]), float(row[8]), float(row[9]))\n                geo_list_drone.append(geo_photo)\n                line_count += 1\n\n        print(f'Processed {line_count} lines.')\n        return geo_list_drone\n\ndef csv_read_sat_map(filename):\n    \"\"\"Builds a list with satellite geo tagged photos by reading a csv file with this format:\n    Filename, Top_left_lat,Top_left_lon,Bottom_right_lat,Bottom_right_long\n    \"photo_name.png\",60.506787,22.311631,60.501037,22.324467\n    \"\"\"\n    geo_list = []\n    photo_path = \"../assets/map/\"\n    print(\"opening: \",filename)\n    with open(filename) as csv_file:\n        csv_reader = csv.reader(csv_file, delimiter=',')\n        line_count = 0\n        for row in csv_reader:\n            if line_count == 0:\n                print(f'Column names are {\", \".join(row)}')\n                line_count += 1\n            else:            \n                img = cv2.imread(photo_path + row[0],0)\n                geo_photo = GeoPhoto(photo_path + row[0],img,(float(row[1]),float(row[2])), (float(row[3]), float(row[4])))\n                geo_list.append(geo_photo)\n                line_count += 1\n\n        print(f'Processed {line_count} lines.')\n        geo_list.sort() # sort alphabetically by filename to ensure that the feature matcher return the right index of the matched sat image\n        return geo_list\n    \ndef csv_write_image_location(photo):\n    header = ['Filename', 'Latitude', 'Longitude', 'Calculated_Latitude', 'Calculated_Longitude', 'Latitude_Error', 'Longitude_Error', 'Meters_Error', 'Corrected', 'Matched']\n    with open('../results/calculated_coordinates.csv', 'a', encoding='UTF8') as f:\n        writer = csv.writer(f)        \n        photo_name = photo.filename.split(\"/\")[-1]\n        loc1 = ( photo.latitude, photo.longitude)\n        loc2 = ( photo.latitude_calculated, photo.longitude_calculated)\n        dist_error =  hs.haversine(loc1,loc2,unit=Unit.METERS)\n        lat_error = photo.latitude - photo.latitude_calculated\n        lon_error = photo.longitude - photo.longitude_calculated\n        line = [photo_name, str(photo.latitude), str(photo.longitude), str(photo.latitude_calculated), str(photo.longitude_calculated), str(lat_error), str(lon_error), str(dist_error), str(drone_image.corrected), str(drone_image.matched), str(drone_image.gimball_yaw + drone_image.flight_yaw - 15)]\n        writer.writerow(line)\n\n\ndef calculate_geo_pose(geo_photo, center, features_mean,  shape):\n    \"\"\"\n    Calculates the geographical location of the drone image.\n    Input: satellite geotagged image, relative pixel center of the drone image, \n    (center with x = 0.5 and y = 0.5 means the located features are in the middle of the sat image)\n    pixel coordinatess (horizontal and vertical) of where the features are localted in the sat image, shape of the sat image\n    \"\"\"\n    #use ratio here instead of pixels because image is reshaped in superglue    \n    latitude = geo_photo.top_left_coord[0] + abs( center[1])  * ( geo_photo.bottom_right_coord[0] - geo_photo.top_left_coord[0])\n    longitude = geo_photo.top_left_coord[1] + abs(center[0])  * ( geo_photo.bottom_right_coord[1] - geo_photo.top_left_coord[1])\n    \n    return latitude, longitude\n\n\n\n#######################################\n# MAIN\n#######################################\n\n#Read all the geo tagged images that make up the sattelite map used for reference\ngeo_images_list = csv_read_sat_map(map_filename)\n\n#Read all the geo tagged drone that will located in the map\ndrone_images_list = csv_read_drone_images(drone_photos_filename)\n\nlatitude_truth = []\nlongitude_truth = []\nlatitude_calculated = []\nlongitude_calculated = []\n\nprint(str(len(drone_images_list)) + \" drone photos were loaded.\")\n\n\n# Iterate through all the drone images\nfor drone_image in drone_images_list:\n    latitude_truth.append(drone_image.latitude) # ground truth from drone image metadata for later comparison\n    longitude_truth.append(drone_image.longitude) # ground truth for later comparison\n    photo =  cv2.imread(drone_image.filename) # read the drone image\n    \n\n    max_features = 0 # keep track of the best match, more features = better match\n    located = False # flag to indicate if the drone image was located in the map\n    center = None # center of the drone image in the map\n\n\n    rotations = [20] # list of rotations to try\n                     # keep in mind GNSS metadata could have wrong rotation angle\n                     # so we try to match the image with different (manually established) rotations\n\n    # Iterate through all the rotations, in this case only one rotation\n    for rot in rotations:\n        \n        # Write the query photo to the map folder\n        cv2.imwrite(map_path + \"1_query_image.png\", photo)\n\n        #Call superglue wrapper function to match the query image to the map\n        satellite_map_index_new, center_new, located_image_new, features_mean_new, query_image_new, feature_number = superglue_utils.match_image()\n        \n        # If the drone image was located in the map and the number of features is greater than the previous best match, then update the best match\n        # Sometimes the pixel center returned by the perspective transform exceeds 1, discard the resuls in that case\n        if (feature_number > max_features and center_new[0] < 1 and center_new[1] < 1):\n            satellite_map_index = satellite_map_index_new\n            center = center_new\n            located_image = located_image_new\n            features_mean = features_mean_new\n            query_image = query_image_new\n            max_features = feature_number\n            located = True\n    photo_name = drone_image.filename.split(\"/\")[-1]\n\n    # If the drone image was located in the map, calculate the geographical location of the drone image\n    if center != None and located:        \n        current_location = calculate_geo_pose(geo_images_list[satellite_map_index], center, features_mean, query_image.shape )\n        \n        # Write the results to the image result file with the best match\n        cv2.putText(located_image, \"Calculated: \" + str(current_location), org = (10,625),fontFace =  cv2.FONT_HERSHEY_DUPLEX, fontScale = 0.8,  color = (0, 0, 0))\n        cv2.putText(located_image, \"Ground truth: \" + str(drone_image.latitude) + \", \" + str(drone_image.longitude), org = (10,655),fontFace =  cv2.FONT_HERSHEY_DUPLEX, fontScale = 0.8,  color = (0, 0, 0))\n        cv2.imwrite(\"../results/\" + photo_name + \"_located.png\", located_image)\n        \n        print(\"Image \" + str(photo_name) + \" was successfully located in the map\")\n        print(\"Calculated location: \", str(current_location[0:2]))\n        print(\"Ground Truth: \", drone_image.latitude, drone_image.longitude)   \n        \n        # Save the calculated location for later comparison with the ground truth\n        drone_image.matched = True\n        drone_image.latitude_calculated = current_location[0]\n        drone_image.longitude_calculated = current_location[1]\n        \n        latitude_calculated.append(drone_image.latitude_calculated)\n        longitude_calculated.append(drone_image.longitude_calculated)\n\n    else:\n        print(\"NOT MATCHED:\", photo_name)\n\n    # Write the results to the csv file    \n    csv_write_image_location(drone_image)\n\n"
  }
]