Repository: clovaai/TedEval
Branch: master
Commit: f24e003a97dd
Files: 22
Total size: 164.4 KB
Directory structure:
gitextract_ex9hoqm4/
├── .gitignore
├── LICENSE
├── NOTICE
├── README.MD
├── config/
│ └── config.py
├── rrc_evaluation_funcs.py
├── script.py
├── static/
│ ├── funcs.js
│ ├── jquery-mousewheel.js
│ ├── jquery.form-3.51.js
│ ├── ranking.js
│ ├── style.css
│ ├── visualization.css
│ └── visualization_default.js
├── static_custom/
│ ├── contents.txt
│ ├── visualization_TL_iou.css
│ └── visualization_TL_iou.js
├── views/
│ ├── index.tpl
│ ├── method.tpl
│ ├── sample.tpl
│ └── upload.tpl
└── web.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
__pycache__*/
.idea/
output/*
._*
*/._*
.pyc
.DS_Store
*.swp
================================================
FILE: LICENSE
================================================
Copyright (c) 2019-present NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
================================================
FILE: NOTICE
================================================
TedEval
Copyright (c) 2019-present NAVER Corp.
This project contains subcomponents with separate copyright notices and license terms.
Your use of the source code for these subcomponents is subject to the terms and conditions of the following licenses.
=====================
TedEval solves the drawbacks of previous metrics such as IoU and DetEval.
This code is based on ICDAR15 official evaluation code from https://rrc.cvc.uab.es/.
=====================
jquery/jquery from http://jquery.com/
=====================
Copyright jQuery Foundation and other contributors, https://jquery.org/
This software consists of voluntary contributions made by many
individuals. For exact contribution history, see the revision history
available at https://github.com/jquery/jquery
The following license applies to all parts of this software except as
documented below:
====
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
====
All files located in the node_modules and external directories are
externally maintained libraries used by this software which have their
own licenses; we recommend you read them, as their terms may differ from
the terms above.
=====================
jquery/jquery-ui from https://github.com/jquery/jquery-ui
=====================
Copyright jQuery Foundation and other contributors, https://jquery.org/
This software consists of voluntary contributions made by many
individuals. For exact contribution history, see the revision history
available at https://github.com/jquery/jquery-ui
The following license applies to all parts of this software except as
documented below:
====
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
====
Copyright and related rights for sample code are waived via CC0. Sample
code is defined as all source code contained within the demos directory.
CC0: http://creativecommons.org/publicdomain/zero/1.0/
====
All files located in the node_modules and external directories are
externally maintained libraries used by this software which have their
own licenses; we recommend you read them, as their terms may differ from
the terms above.
=====================
malsup/form from https://github.com/malsup/form
=====================
Copyright 2006-2013 (c) M. Alsup
All versions, present and past, of the jQuery Form plugin are dual licensed under the MIT and GPL licenses:
MIT
GPL
You may use either license. The MIT License is recommended for most projects because it is simple and easy to understand and it places almost no restrictions on what you can do with the plugin.
If the GPL suits your project better you are also free to use the plugin under that license.
You don't have to do anything special to choose one license or the other and you don't have to notify anyone which license you are using. You are free to use the jQuery Form Plugin in commercial projects as long as the copyright header is left intact.
-----
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
=====
================================================
FILE: README.MD
================================================
# TedEval: A Fair Evaluation Metric for Scene Text Detectors
Official Python 3 implementation of TedEval | [paper](https://arxiv.org/abs/1907.01227) | [slides](https://docs.google.com/presentation/d/1EFK_WjpdLExZVDPt4C7yCcxjpXNvIyAOL9zUnKx1VoY/edit?usp=sharing)
**[Chae Young Lee](mailto:cylee7133@gmail.com), Youngmin Baek, and Hwalsuk Lee.**
Clova AI Research, NAVER Corp.
### Overview
We propose a new evaluation metric for scene text detectors called TedEval. Through separate instance-level matching policy and character-level scoring policy, TedEval solves the drawbacks of previous metrics such as IoU and DetEval. This code is based on [ICDAR15 official evaluation code](http://rrc.cvc.uab.es/).
## Methodology
### 1. Mathcing Policy
- Non-exclusively gathers all possible matches of not only one-to-one but also one-to-many and many-to-one.
- The threshold of both area recall and area precision are set to 0.4.
- Multiline is identified and rejected when _|min(theta, 180 - theta)| > 45_ from Fig. 2.
### 2. Scoring Policy
We compute Pseudo Character Center (PCC) from word-level bounding boxes and penalize matches when PCCs are missing or overlapping.
### Sample Evaluation
## Experiments
We evaluated state-of-the-art scene text detectors with TedEval on two benchmark datasets: ICDAR 2013 Focused Scene Text (IC13) and ICDAR 2015 Incidental Scene Text (IC15). Detectors are listed in the order of published dates.
### ICDAR 2013
| Detector | Date (YY/MM/DD) | Recall (%) | Precision (%) | H-mean (%) |
| :-----------------------------------------------------: | :-------------: | :--------: | :-----------: | :--------: |
| [CTPN](https://arxiv.org/pdf/1609.03605.pdf) | 16/09/12 | 82.1 | 92.7 | 87.6 |
| [RRPN](https://arxiv.org/pdf/1703.01086.pdf) | 17/03/03 | 89.0 | 94.2 | 91.6 |
| [SegLink](https://arxiv.org/pdf/1703.06520.pdf) | 17/03/19 | 65.6 | 74.9 | 70.0 |
| [EAST](https://arxiv.org/pdf/1704.03155.pdf) | 17/04/11 | 77.7 | 87.1 | 82.5 |
| [WordSup](https://arxiv.org/pdf/1708.06720) | 17/08/22 | 87.5 | 92.2 | 90.2 |
| [PixelLink](https://arxiv.org/pdf/1801.01315.pdf) | 18/01/04 | 84.0 | 87.2 | 86.1 |
| [FOTS](https://arxiv.org/pdf/1801.01671.pdf) | 18/01/05 | 91.5 | 93.0 | 92.6 |
| [TextBoxes++](https://arxiv.org/pdf/1801.02765.pdf) | 18/01/09 | 87.4 | 92.3 | 90.0 |
| [MaskTextSpotter](https://arxiv.org/pdf/1807.02242.pdf) | 18/07/06 | 90.2 | 95.4 | 92.9 |
| [PMTD](https://arxiv.org/pdf/1903.11800.pdf) | 19/03/28 | 94.0 | 95.2 | 94.7 |
| [CRAFT](https://arxiv.org/pdf/1904.01941.pdf) | 19/04/03 | 93.6 | 96.5 | 95.1 |
### ICDAR 2015
| Detector | Date (YY/MM/DD) | Recall (%) | Precision (%) | H-mean (%) |
| :-----------------------------------------------------: | :-------------: | :--------: | :-----------: | :--------: |
| [CTPN](https://arxiv.org/pdf/1609.03605.pdf) | 16/09/12 | 85.0 | 81.1 | 67.8 |
| [RRPN](https://arxiv.org/pdf/1703.01086.pdf) | 17/03/03 | 79.5 | 85.9 | 82.6 |
| [SegLink](https://arxiv.org/pdf/1703.06520.pdf) | 17/03/19 | 77.1 | 83.9 | 80.6 |
| [EAST](https://arxiv.org/pdf/1704.03155.pdf) | 17/04/11 | 82.5 | 90.0 | 86.3 |
| [WordSup](https://arxiv.org/pdf/1708.06720) | 17/08/22 | 83.2 | 87.1 | 85.2 |
| [PixelLink](https://arxiv.org/pdf/1801.01315.pdf) | 18/01/04 | 85.7 | 86.1 | 86.0 |
| [FOTS](https://arxiv.org/pdf/1801.01671.pdf) | 18/01/05 | 89.0 | 93.4 | 91.2 |
| [TextBoxes++](https://arxiv.org/pdf/1801.02765.pdf) | 18/01/09 | 82.4 | 90.8 | 86.5 |
| [MaskTextSpotter](https://arxiv.org/pdf/1807.02242.pdf) | 18/07/06 | 82.5 | 91.8 | 86.9 |
| [PMTD](https://arxiv.org/pdf/1903.11800.pdf) | 19/03/28 | 89.2 | 92.8 | 91.0 |
| [CRAFT](https://arxiv.org/pdf/1904.01941.pdf) | 19/04/03 | 88.5 | 93.1 | 90.9 |
### Frequency
## Getting Started
### Clone repository
`git clone https://github.com/clovaai/TedEval.git`
### Requirements
- python 3
- python 3.x Polygon, Bottle, Pillow
```python3
# install
pip3 install Polygon3 bottle Pillow
```
### Supported Annotation Type
- LTRB (xmin, ymin, xmax, ymax)
- QUAD (x1, y1, x2, y2, x3, y3, x4, y4)
## Evaluation
### Prepare data
The ground truth and the result data should be text files, one for each sample. Note that the default naming rule of each text file is that there must be `img_{number}` in the filename and that the number indicate the image sample (this can be changed in `default_evaluation_params()` in `script.py`).
```
# gt/gt_img_38.txt
644,101,932,113,932,168,643,156,concierge@L3
477,138,487,139,488,149,477,148,###
344,131,398,130,398,149,344,149,###
1195,148,1277,138,1277,177,1194,187,###
23,270,128,267,128,282,23,284,###
# result/res_img_38.txt
644,101,932,113,932,168,643,156,{Transcription},{Confidence}
477,138,487,139,488,149,477,148
344,131,398,130,398,149,344,149
1195,148,1277,138,1277,177,1194,187
23,270,128,267,128,282,23,284
```
Compress these text files without the parent directory.
```python3
zip gt.zip gt/*
zip result.zip result/*
```
Refer to `gt/result.zip` and `gt/gt_*.zip` for examples.
### Run stand-alone evaluation
```python3
python script.py –g=gt/gt.zip –s=result/result.zip
```
For evaluation setup, please refer to the following parameter list to edit `default_evaluation_params()` in `script.py`.
### Important Parameters
| name | type | default | description |
| ------------------------- | --------- | ------- | ------------------------------------------------------------- |
| AREA_RECALL_CONSTRAINT | `float` | `0.4` | area recall constraint (0 <= R <= 1) |
| AREA_PRECISION_CONSTRAINT | `float` | `0.4` | area precision constraint (0 <= P <= 1) |
| GT_LTRB | `boolean` | `False` | GT file annotation type (True if LTRB, False if QUAD) |
| DET_LTRB | `boolean` | `False` | prediction file annotation type (True if LTRB, False if QUAD) |
| TRANSCRIPTION | `boolean` | `False` | set True if result file has transcription |
| CONFIDENCES | `boolean` | `False` | set True if result file has confidence |
### Run Visualizer
```python3
python web.py
```
- Place the zip file of images and GTs of the dataset named `images.zip` and `gt.zip`, respectively, in the `gt` directory.
- Create an empty directory name `output`. This is where the DB, submission files, and result files will be created.
- You can change the host and port number in the final line of `web.py`.
The file structure should then be:
```
.
├── gt
│ ├── gt.zip
│ └── images.zip
├── output # empty dir
├── script.py
├── web.py
├── README.md
└── ...
```
## Citation
```
@article{lee2019tedeval,
title={TedEval: A Fair Evaluation Metric for Scene Text Detectors},
author={Lee, Chae Young and Baek, Youngmin and Lee, Hwalsuk},
journal={arXiv preprint arXiv:1907.01227},
year={2019}
}
```
## Contact us
We welcome any feedbacks to our metric. Please contact the authors via `{cylee7133, youngmin.baek, hwalsuk.lee}@gmail.com`. In case of code errors, open an issue and we will get to you.
## License
```
Copyright (c) 2019-present NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
```
================================================
FILE: config/config.py
================================================
#!/usr/bin/env python3
#encoding: UTF-8
import json
#Name of the script used for the evalution
evaluation_script = 'script'
#Upload instructions
instructions = """
A single zip file is expected, containing a set of text files.
No directory structure within the zip file is permitted, just the set of text files.
The containing text files should be named as res_img_#.txt, where # is the number of the corresponding test-set image.
Each text file should contain as many lines as text bounding boxes found. Each line should contain eight comma separated values only. The values should correspond to the coordinates of the four corners of the bounding quadrilateral of the word.
New lines in the text files should be indicated with the windows CR/LF termination.
The submitted zip file is automatically checked at the time of submission, and a submission log is presented to the user along with a confirmation of the submission. The checks performed are the following:
That the file submitted is a valid zip file, it can be opened and the contents can be extracted.
That the names of the text files contained are correct and the image numbers are within the bounds of the test set.
That each text file contains eight comma separated values per line.
That the coordinates passed are within the bounds of the image and that the coordinates are in clocwise order
"""
#Extension of the GT file. gt.[extension]
gt_ext = "zip"
#Acronym for the task. It's used to cache the Images
acronym = "IST-T1"
#Title of the Task
title = "Incidental Scene Text - Task 1 Text Localization TEST DATASET (evaluation:IoU)"
#Custom JavaScript for the visualiztion.
customJS = 'visualization_TL_iou.js'
#Custom CSS for the visualiztion.
customCSS = 'visualization_TL_iou.css'
#Parameters used to show the results of a method and the method's ranking
method_params = json.loads("""{"recall":{"long_name":"Recall","type":"double","order":"","grafic":"1","format":"perc"},"precision":{"long_name":"Precision","type":"double","order":"","grafic":"1","format":"perc"},"hmean":{"long_name":"Hmean","type":"double","order":"desc","grafic":"1","format":"perc"}}""")
#Parameters to show for each sample
sample_params = json.loads("""{"recall":{"long_name":"Recall","type":"double","order":"","grafic":"","format":"perc"},"precision":{"long_name":"Precision","type":"double","order":"","grafic":"","format":"perc"},"hmean":{"long_name":"Hmean","type":"double","order":"desc","grafic":"","format":"perc"}}""")
#Parameters to ask for for each submition
submit_params = json.loads("""{}""")
#Regular expression to get the Sample ID from the image name. ID must be the first capturing group.
image_name_to_id_str = '*([0-9]+)*.(jpg|gif|png)'
================================================
FILE: rrc_evaluation_funcs.py
================================================
#!/usr/bin/env python3
#encoding: UTF-8
import json
import sys;sys.path.append('./')
import zipfile
import re
import sys
import os
import codecs
import importlib
from io import StringIO
def print_help():
sys.stdout.write('Usage: python %s.py -g= -s= [-o= -p=]' %sys.argv[0])
sys.exit(2)
def load_zip_file_keys(file,fileNameRegExp=''):
"""
Returns an array with the entries of the ZIP file that match with the regular expression.
The key's are the names or the file or the capturing group definied in the fileNameRegExp
"""
try:
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
except :
raise Exception('Error loading the ZIP archive.')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
if fileNameRegExp!="":
m = re.match(fileNameRegExp,name)
if m == None:
addFile = False
else:
if len(m.groups())>0:
keyName = m.group(1)
if addFile:
pairs.append( keyName )
return pairs
def load_zip_file(file,fileNameRegExp='',allEntries=False):
"""
Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
The key's are the names or the file or the capturing group definied in the fileNameRegExp
allEntries validates that all entries in the ZIP file pass the fileNameRegExp
"""
try:
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
except :
raise Exception('Error loading the ZIP archive')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
# if fileNameRegExp!="":
# m = re.match(fileNameRegExp,name)
# if m == None:
# addFile = False
# else:
# if len(m.groups())>0:
# keyName = m.group(1)
keyName = name.replace('gt_', '').replace('res_', '').replace('.txt', '')
if addFile:
pairs.append( [ keyName , archive.read(name)] )
else:
if allEntries:
raise Exception('ZIP entry not valid: %s' %name)
return dict(pairs)
def decode_utf8(raw):
"""
Returns a Unicode object on success, or None on failure
"""
try:
raw = codecs.decode(raw,'utf-8', 'replace')
#extracts BOM if exists
raw = raw.encode('utf8')
if raw.startswith(codecs.BOM_UTF8):
raw = raw.replace(codecs.BOM_UTF8, '', 1)
return raw.decode('utf-8')
except:
return None
def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
"""
This function validates that all lines of the file calling the Line validation function for each line
"""
utf8File = decode_utf8(file_contents)
if (utf8File is None) :
raise Exception("The file %s is not UTF-8" %fileName)
lines = utf8File.split( "\r\n" if CRLF else "\n" )
for line in lines:
line = line.replace("\r","").replace("\n","")
# if(line != ""):
# try:
# validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
# except Exception as e:
# raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace'))
def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0):
"""
Validate the format of the line. If the line is not valid an exception will be raised.
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
Posible values are:
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
"""
get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
"""
Validate the format of the line. If the line is not valid an exception will be raised.
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
Posible values are:
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
Returns values from a textline. Points , [Confidences], [Transcriptions]
"""
confidence = 0.0
transcription = "";
points = []
numPoints = 4;
if LTRB:
numPoints = 4;
if withTranscription and withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
if m == None :
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription")
elif withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence")
elif withTranscription:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription")
else:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax")
xmin = int(m.group(1))
ymin = int(m.group(2))
xmax = int(m.group(3))
ymax = int(m.group(4))
if(xmax0 and imHeight>0):
validate_point_inside_bounds(xmin,ymin,imWidth,imHeight);
validate_point_inside_bounds(xmax,ymax,imWidth,imHeight);
else:
numPoints = 8;
if withTranscription and withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription")
elif withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence")
elif withTranscription:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription")
else:
if line[-1] == ',' : line = line[:-1]
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4")
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
validate_clockwise_points(points)
if (imWidth>0 and imHeight>0):
validate_point_inside_bounds(points[0],points[1],imWidth,imHeight);
validate_point_inside_bounds(points[2],points[3],imWidth,imHeight);
validate_point_inside_bounds(points[4],points[5],imWidth,imHeight);
validate_point_inside_bounds(points[6],points[7],imWidth,imHeight);
if withConfidence:
try:
confidence = float(m.group(numPoints+1))
except ValueError:
raise Exception("Confidence value must be a float")
if withTranscription:
posTranscription = numPoints + (2 if withConfidence else 1)
transcription = m.group(posTranscription)
m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription)
if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters
transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"")
return points,confidence,transcription
def validate_point_inside_bounds(x,y,imWidth,imHeight):
if(x<0 or x>imWidth):
raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight))
if(y<0 or y>imHeight):
raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight))
def validate_clockwise_points(points):
"""
Validates that the points that the 4 points that dlimite a polygon are in clockwise order.
"""
if len(points) != 8:
raise Exception("Points list not valid." + str(len(points)))
point = [
[int(points[0]) , int(points[1])],
[int(points[2]) , int(points[3])],
[int(points[4]) , int(points[5])],
[int(points[6]) , int(points[7])]
]
edge = [
( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),
( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),
( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),
( point[0][0] - point[3][0])*( point[0][1] + point[3][1])
]
summatory = edge[0] + edge[1] + edge[2] + edge[3];
# if summatory>0:
# raise Exception("Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.")
def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True):
"""
Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
xmin,ymin,xmax,ymax,[confidence],[transcription]
x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
"""
pointsList = []
transcriptionsList = []
confidencesList = []
lines = content.split( "\r\n" if CRLF else "\n" )
for line in lines:
line = line.replace("\r","").replace("\n","")
if(line != "") :
points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight);
pointsList.append(points)
transcriptionsList.append(transcription)
confidencesList.append(confidence)
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
import numpy as np
sorted_ind = np.argsort(-np.array(confidencesList))
confidencesList = [confidencesList[i] for i in sorted_ind]
pointsList = [pointsList[i] for i in sorted_ind]
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
return pointsList,confidencesList,transcriptionsList
def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True):
"""
This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample.
Params:
p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used.
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
validate_data_fn: points to a method that validates the corrct format of the submission
evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results
"""
if (p == None):
p = dict([s[1:].split('=') for s in sys.argv[1:]])
if(len(sys.argv)<3):
print_help()
evalParams = default_evaluation_params_fn()
if 'p' in p.keys():
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) )
resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'}
validate_data_fn(p['g'], p['s'], evalParams)
evalData = evaluate_method_fn(p['g'], p['s'], evalParams)
resDict.update(evalData)
if 'o' in p:
if not os.path.exists(p['o']):
os.makedirs(p['o'])
resultsOutputname = p['o'] + '/results.zip'
outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True)
del resDict['per_sample']
if 'output_items' in resDict.keys():
del resDict['output_items']
outZip.writestr('method.json',json.dumps(resDict))
if not resDict['calculated']:
if show_result:
sys.stderr.write('Error!\n'+ resDict['Message'])
if 'o' in p:
outZip.close()
return resDict
if 'o' in p:
if per_sample == True:
for k,v in evalData['per_sample'].items():
outZip.writestr( k + '.json',json.dumps(v))
if 'output_items' in evalData.keys():
for k, v in evalData['output_items'].items():
outZip.writestr( k,v)
outZip.close()
if show_result:
sys.stdout.write("Calculated!")
sys.stdout.write(json.dumps(resDict['method']))
return resDict
def main_validation(default_evaluation_params_fn,validate_data_fn):
"""
This process validates a method
Params:
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
validate_data_fn: points to a method that validates the corrct format of the submission
"""
p = dict([s[1:].split('=') for s in sys.argv[1:]])
evalParams = default_evaluation_params_fn()
if 'p' in p.keys():
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) )
validate_data_fn(p['g'], p['s'], evalParams)
print ('SUCCESS')
sys.exit(0)
================================================
FILE: script.py
================================================
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
import rrc_evaluation_funcs
import importlib
import math
def evaluation_imports():
"""
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
"""
return {
'Polygon':'plg',
'numpy':'np'
}
def default_evaluation_params():
"""
default_evaluation_params: Default parameters to use for the validation and evaluation.
"""
return {
'AREA_RECALL_CONSTRAINT' : 0.4,
'AREA_PRECISION_CONSTRAINT' :0.4,
'EV_PARAM_IND_CENTER_DIFF_THR': 1,
'GT_SAMPLE_NAME_2_ID':'.*([0-9]+).*',
'DET_SAMPLE_NAME_2_ID':'.*([0-9]+).*',
'GT_LTRB': False, # LTRB: 2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
'GT_CRLF': False, # Lines are delimited by Windows CRLF format
'DET_LTRB': False, # LTRB: 2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
'DET_CRLF': False, # Lines are delimited by Windows CRLF format
'CONFIDENCES': False, # Detections must include confidence value. AP will be calculated
'TRANSCRIPTION': False, # Does prediction has transcription or not
'PER_SAMPLE_RESULTS': True, # Generate per sample results and produce data for visualization
}
def validate_data(gtFilePath, submFilePath,evaluationParams):
"""
Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
Validates also that there are no missing files in the folder.
If some error detected, the method raises the error
"""
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)
#Validate format of GroundTruth
for k in gt:
rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['GT_CRLF'],evaluationParams['GT_LTRB'],True)
#Validate format of results
for k in subm:
if (k in gt) == False :
raise Exception("The sample %s not present in GT" %k)
rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['DET_CRLF'],evaluationParams['DET_LTRB'],evaluationParams['TRANSCRIPTION'],evaluationParams['CONFIDENCES'])
def evaluate_method(gtFilePath, submFilePath, evaluationParams):
"""
Method evaluate_method: evaluate method and returns the results
Results. Dictionary with the following values:
- method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
- samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
"""
for module,alias in evaluation_imports().items():
globals()[alias] = importlib.import_module(module)
def polygon_from_points(points):
"""
Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
"""
resBoxes=np.empty([1,8],dtype='int32')
resBoxes[0,0]=int(points[0])
resBoxes[0,4]=int(points[1])
resBoxes[0,1]=int(points[2])
resBoxes[0,5]=int(points[3])
resBoxes[0,2]=int(points[4])
resBoxes[0,6]=int(points[5])
resBoxes[0,3]=int(points[6])
resBoxes[0,7]=int(points[7])
pointMat = resBoxes[0].reshape([2,4]).T
return plg.Polygon( pointMat)
def rectangle_to_polygon(rect):
resBoxes=np.empty([1,8],dtype='int32')
resBoxes[0,0]=int(rect.xmin)
resBoxes[0,4]=int(rect.ymin)
resBoxes[0,1]=int(rect.xmax)
resBoxes[0,5]=int(rect.ymin)
resBoxes[0,2]=int(rect.xmax)
resBoxes[0,6]=int(rect.ymax)
resBoxes[0,3]=int(rect.xmin)
resBoxes[0,7]=int(rect.ymax)
pointMat = resBoxes[0].reshape([2,4]).T
return plg.Polygon( pointMat)
def rectangle_to_points(rect):
points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)]
return points
def polygon_to_points(pol):
pointMat = []
for p in pol:
for i in range(len(p)):
pointMat.extend(p[i])
return pointMat
def get_intersection(pD,pG):
pInt = pD & pG
if len(pInt) == 0:
return 0
return pInt.area()
def compute_ap(confList, matchList,numGtCare):
correct = 0
AP = 0
if len(confList)>0:
confList = np.array(confList)
matchList = np.array(matchList)
sorted_ind = np.argsort(-confList)
confList = confList[sorted_ind]
matchList = matchList[sorted_ind]
for n in range(len(confList)):
match = matchList[n]
if match:
correct += 1
AP += float(correct)/(n + 1)
if numGtCare>0:
AP /= numGtCare
return AP
def point_distance(a, b):
distx = math.fabs(a[0] - b[0])
disty = math.fabs(a[1] - b[1])
return math.sqrt(distx * distx + disty * disty)
def diag(points):
diag1 = point_distance((points[0], points[1]), (points[4], points[5]))
diag2 = point_distance((points[2], points[3]), (points[6], points[7]))
return (diag1 + diag2) / 2
def center_distance(p1, p2):
return point_distance(p1.center(), p2.center())
def get_midpoints(p1,p2):
return ((p1[0]+p2[0])/2, (p1[1]+p2[1])/2)
def get_angle_3pt(a, b, c):
"""Counterclockwise angle in degrees by turning from a to c around b
Returns a float between 0.0 and 360.0"""
ang = math.degrees(
math.atan2(c[1]-b[1], c[0]-b[0]) - math.atan2(a[1]-b[1], a[0]-b[0]))
return ang + 360 if ang < 0 else ang
def gtBoxtoChars(num, points):
chars = []
assert len(points) == 8
p1 = get_midpoints([points[0],points[1]], [points[6],points[7]])
p2 = get_midpoints([points[2],points[3]], [points[4],points[5]])
unitx = (p2[0] - p1[0]) / num
unity = (p2[1] - p1[1]) / num
for i in range(num):
x = p1[0] + unitx/2 + unitx * i
y = p1[1] + unity/2 + unity * i
chars.append((x,y))
return chars
def char_fill(detNums, matchMat):
for detNum in detNums:
detPol = detPols[detNum]
for gtNum, gtChars in enumerate(gtCharPoints):
if matchMat[gtNum, detNum] == 1:
for gtCharNum, gtChar in enumerate(gtChars):
if detPol.isInside(gtChar[0], gtChar[1]):
gtCharCounts[gtNum][detNum][gtCharNum] = 1
def one_to_one_match(row, col):
cont = 0
for j in range(len(recallMat[0])):
if recallMat[row,j] >= evaluationParams['AREA_RECALL_CONSTRAINT'] and precisionMat[row,j] >= evaluationParams['AREA_PRECISION_CONSTRAINT'] :
cont = cont +1
if (cont != 1):
return False
cont = 0
for i in range(len(recallMat)):
if recallMat[i,col] >= evaluationParams['AREA_RECALL_CONSTRAINT'] and precisionMat[i,col] >= evaluationParams['AREA_PRECISION_CONSTRAINT'] :
cont = cont +1
if (cont != 1):
return False
if recallMat[row,col] >= evaluationParams['AREA_RECALL_CONSTRAINT'] and precisionMat[row,col] >= evaluationParams['AREA_PRECISION_CONSTRAINT'] :
return True
return False
def one_to_many_match(gtNum):
many_sum = 0
detRects = []
for detNum in range(len(recallMat[0])):
if detNum not in detDontCarePolsNum and gtExcludeMat[gtNum] == 0 and detExcludeMat[detNum] == 0:
if precisionMat[gtNum,detNum] >= evaluationParams['AREA_PRECISION_CONSTRAINT']:
many_sum += recallMat[gtNum,detNum]
detRects.append(detNum)
if many_sum >= evaluationParams['AREA_RECALL_CONSTRAINT'] and len(detRects) >= 2:
pivots = []
for matchDet in detRects:
pD = polygon_from_points(detPolPoints[matchDet])
pivots.append([get_midpoints(pD[0][0], pD[0][3]), pD.center()])
for i in range(len(pivots)):
for k in range(len(pivots)):
if k == i:
continue
angle = get_angle_3pt(pivots[i][0], pivots[k][1], pivots[i][1])
if angle > 180:
angle = 360 - angle
if min(angle, 180 - angle) >= 45:
return False, []
return True, detRects
else:
return False, []
def many_to_one_match(detNum):
many_sum = 0
gtRects = []
for gtNum in range(len(recallMat)):
if gtNum not in gtDontCarePolsNum and gtExcludeMat[gtNum] == 0 and detExcludeMat[detNum] == 0:
if recallMat[gtNum,detNum] >= evaluationParams['AREA_RECALL_CONSTRAINT']:
many_sum += precisionMat[gtNum,detNum]
gtRects.append(gtNum)
if many_sum >= evaluationParams['AREA_PRECISION_CONSTRAINT'] and len(gtRects) >= 2:
pivots = []
for matchGt in gtRects:
pG = gtPols[matchGt]
pivots.append([get_midpoints(pG[0][0], pG[0][3]), pG.center()])
for i in range(len(pivots)):
for k in range(len(pivots)):
if k == i:
continue
angle = get_angle_3pt(pivots[i][0], pivots[k][1], pivots[i][1])
if angle > 180:
angle = 360 - angle
if min(angle, 180 - angle) >= 45:
return False, []
return True, gtRects
else:
return False, []
perSampleMetrics = {}
methodRecallSum = 0
methodPrecisionSum = 0
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)
numGlobalCareGt = 0;
numGlobalCareDet = 0;
arrGlobalConfidences = [];
arrGlobalMatches = [];
for resFile in gt:
gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
recall = 0
precision = 0
hmean = 0
recallAccum = 0.
precisionAccum = 0.
detMatched = 0
numGtCare = 0
numDetCare = 0
recallMat = np.empty([1,1])
precisionMat = np.empty([1,1])
matchMat = np.zeros([1,1])
gtPols = []
detPols = []
gtPolPoints = []
detPolPoints = []
# pseudo character centers
gtCharPoints = []
gtCharCounts = []
# visualization
charCounts = np.zeros([1,1])
recallScore = list()
precisionScore = list()
#Array of Ground Truth Polygons' keys marked as don't Care
gtDontCarePolsNum = []
#Array of Detected Polygons' matched with a don't Care GT
detDontCarePolsNum = []
pairs = []
detMatchedNums = []
gtExcludeNums = []
arrSampleConfidences = [];
arrSampleMatch = [];
sampleAP = 0;
evaluationLog = ""
pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile, evaluationParams['GT_CRLF'], evaluationParams['GT_LTRB'], True, False)
for n in range(len(pointsList)):
points = pointsList[n]
transcription = transcriptionsList[n]
dontCare = transcription == "###"
if evaluationParams['GT_LTRB']:
gtRect = Rectangle(*points)
gtPol = rectangle_to_polygon(gtRect)
points = polygon_to_points(gtPol)
else:
gtPol = polygon_from_points(points)
gtPols.append(gtPol)
if dontCare:
gtDontCarePolsNum.append( len(gtPols)-1 )
gtPolPoints.append(points)
gtCharPoints.append([])
else:
gtCharSize = len(transcription)
aspect_ratio = gtPol.aspectRatio()
if aspect_ratio > 1.5:
points_ver = [points[6], points[7], points[0], points[1], points[2], points[3], points[4], points[5]]
gtPolPoints.append(points_ver)
gtCharPoints.append(gtBoxtoChars(gtCharSize, points_ver))
else:
gtCharPoints.append(gtBoxtoChars(gtCharSize, points))
gtPolPoints.append(points)
evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n")
# GT Don't Care overlap
for DontCare in gtDontCarePolsNum:
for gtNum in list(set(range(len(gtPols))) - set(gtDontCarePolsNum)):
if get_intersection(gtPols[gtNum], gtPols[DontCare]) > 0:
gtPols[DontCare] -= gtPols[gtNum]
if resFile in subm:
detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
pointsList,confidencesList,_ = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['DET_CRLF'],evaluationParams['DET_LTRB'],evaluationParams['TRANSCRIPTION'],evaluationParams['CONFIDENCES'])
for n in range(len(pointsList)):
points = pointsList[n]
if evaluationParams['DET_LTRB']:
detRect = Rectangle(*points)
detPol = rectangle_to_polygon(detRect)
points = polygon_to_points(detPol)
else:
detPol = polygon_from_points(points)
detPols.append(detPol)
detPolPoints.append(points)
evaluationLog += "DET polygons: " + str(len(detPols))
if len(gtPols)>0 and len(detPols)>0:
#Calculate IoU and precision matrixs
outputShape=[len(gtPols),len(detPols)]
recallMat = np.empty(outputShape)
precisionMat = np.empty(outputShape)
matchMat = np.zeros(outputShape)
gtRectMat = np.zeros(len(gtPols),np.int8)
detRectMat = np.zeros(len(detPols),np.int8)
gtExcludeMat = np.zeros(len(gtPols),np.int8)
detExcludeMat = np.zeros(len(detPols),np.int8)
for gtNum in range(len(gtPols)):
detCharCounts = []
for detNum in range(len(detPols)):
pG = gtPols[gtNum]
pD = detPols[detNum]
intersected_area = get_intersection(pD,pG)
recallMat[gtNum,detNum] = 0 if pG.area()==0 else intersected_area / pG.area()
precisionMat[gtNum,detNum] = 0 if pD.area()==0 else intersected_area / pD.area()
detCharCounts.append(np.zeros(len(gtCharPoints[gtNum])))
gtCharCounts.append(detCharCounts)
# Find detection Don't Care
if len(gtDontCarePolsNum)>0 :
for detNum in range(len(detPols)):
# many-to-one
many_sum = 0
for gtNum in gtDontCarePolsNum:
if recallMat[gtNum, detNum] > evaluationParams['AREA_RECALL_CONSTRAINT']:
many_sum += precisionMat[gtNum, detNum]
if many_sum >= evaluationParams['AREA_PRECISION_CONSTRAINT']:
detDontCarePolsNum.append(detNum)
else:
for gtNum in gtDontCarePolsNum:
if precisionMat[gtNum, detNum] > evaluationParams['AREA_PRECISION_CONSTRAINT']:
detDontCarePolsNum.append(detNum)
break
# many-to-one for mixed DC and non-DC
for gtNum in gtDontCarePolsNum:
if recallMat[gtNum, detNum] > 0:
detPols[detNum] -= gtPols[gtNum]
evaluationLog += " (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n"
# Recalculate matrices
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
pG = gtPols[gtNum]
pD = detPols[detNum]
intersected_area = get_intersection(pD,pG)
recallMat[gtNum,detNum] = 0 if pG.area()==0 else intersected_area / pG.area()
precisionMat[gtNum,detNum] = 0 if pD.area()==0 else intersected_area / pD.area()
# Find many-to-one matches
evaluationLog += "Find many-to-one matches\n"
for detNum in range(len(detPols)):
if detNum not in detDontCarePolsNum:
match, matchesGt = many_to_one_match(detNum)
if match:
pairs.append({'gt':matchesGt, 'det':[detNum], 'type':'MO'})
evaluationLog += "Match GT #" + str(matchesGt) + " with Det #" + str(detNum) + "\n"
# Find one-to-one matches
evaluationLog += "Find one-to-one matches\n"
for gtNum in range(len(gtPols)):
for detNum in range(len(detPols)):
if gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum :
match = one_to_one_match(gtNum, detNum)
if match:
normDist = center_distance(gtPols[gtNum], detPols[detNum]);
normDist /= diag(gtPolPoints[gtNum]) + diag(detPolPoints[detNum]);
normDist *= 2.0;
if normDist < evaluationParams['EV_PARAM_IND_CENTER_DIFF_THR'] :
pairs.append({'gt':[gtNum],'det':[detNum],'type':'OO'})
evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
# Find one-to-many matches
evaluationLog += "Find one-to-many matches\n"
for gtNum in range(len(gtPols)):
if gtNum not in gtDontCarePolsNum:
match, matchesDet = one_to_many_match(gtNum)
if match:
pairs.append({'gt':[gtNum], 'det':matchesDet, 'type':'OM'})
evaluationLog += "Match Gt #" + str(gtNum) + " with Det #" + str(matchesDet) + "\n"
# Fill match matrix
for pair in pairs:
matchMat[pair['gt'],pair['det']] = 1
# Fill character matrix
char_fill(np.where(matchMat.sum(axis=0) > 0)[0], matchMat)
# Recall score
for gtNum in range(len(gtRectMat)):
if matchMat.sum(axis=1)[gtNum] > 0:
recallAccum += len(np.where(sum(gtCharCounts[gtNum]) == 1)[0]) / len(gtCharPoints[gtNum])
if len(np.where(sum(gtCharCounts[gtNum]) == 1)[0]) / len(gtCharPoints[gtNum]) < 1:
recallScore.append("" + str(len(np.where(sum(gtCharCounts[gtNum]) == 1)[0])) + "/" + str(len(gtCharPoints[gtNum])) + "")
else: recallScore.append(str(len(np.where(sum(gtCharCounts[gtNum]) == 1)[0])) + "/" + str(len(gtCharPoints[gtNum])))
else: recallScore.append("")
# Precision score
for detNum in range(len(detRectMat)):
if matchMat.sum(axis=0)[detNum] > 0:
detTotal = 0; detContain = 0
for gtNum in range(len(gtRectMat)):
if matchMat[gtNum, detNum] > 0:
detTotal += len(gtCharCounts[gtNum][detNum])
detContain += len(np.where(gtCharCounts[gtNum][detNum] == 1)[0])
precisionAccum += detContain / detTotal
if detContain / detTotal < 1:
precisionScore.append("" + str(detContain) + "/" + str(detTotal) + "")
else: precisionScore.append(str(detContain) + "/" + str(detTotal))
else:
precisionScore.append("")
# Visualization
charCounts = np.zeros((len(gtRectMat), len(detRectMat)))
for gtNum in range(len(gtRectMat)):
for detNum in range(len(detRectMat)):
charCounts[gtNum][detNum] = sum(gtCharCounts[gtNum][detNum])
if evaluationParams['CONFIDENCES']:
for detNum in range(len(detPols)):
if detNum not in detDontCarePolsNum :
match = detNum in detMatchedNums
arrSampleConfidences.append(confidencesList[detNum])
arrSampleMatch.append(match)
arrGlobalConfidences.append(confidencesList[detNum]);
arrGlobalMatches.append(match);
numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
numDetCare = (len(detPols) - len(detDontCarePolsNum))
if numGtCare == 0:
recall = float(1)
precision = float(0) if numDetCare >0 else float(1)
sampleAP = precision
else:
recall = float(recallAccum) / numGtCare
precision = float(0) if numDetCare==0 else float(precisionAccum) / numDetCare
if evaluationParams['CONFIDENCES'] and evaluationParams['PER_SAMPLE_RESULTS']:
sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare )
hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
evaluationLog += "Recall = " + str(round(recallAccum,2)) + " / " + str(numGtCare) + " = " + str(round(recall,2)) + "\n"
evaluationLog += "Precision = " + str(round(precisionAccum,2)) + " / " + str(numDetCare) + " = "+ str(round(precision,2)) + "\n"
methodRecallSum += recallAccum
methodPrecisionSum += precisionAccum
numGlobalCareGt += numGtCare
numGlobalCareDet += numDetCare
if evaluationParams['PER_SAMPLE_RESULTS']:
perSampleMetrics[resFile] = {
'precision':precision,
'recall':recall,
'hmean':hmean,
'pairs':pairs,
'AP':sampleAP,
'recallMat':[] if len(detPols)>100 else recallMat.tolist(),
'precisionMat':[] if len(detPols)>100 else precisionMat.tolist(),
'gtPolPoints':gtPolPoints,
'detPolPoints':detPolPoints,
'gtCharPoints':gtCharPoints,
'gtCharCounts':[sum(k).tolist() for k in gtCharCounts],
'charCounts': charCounts.tolist(),
'recallScore': recallScore,
'precisionScore': precisionScore,
'gtDontCare':gtDontCarePolsNum,
'detDontCare':detDontCarePolsNum,
'evaluationParams': evaluationParams,
'evaluationLog': evaluationLog
}
# Compute MAP and MAR
AP = 0
if evaluationParams['CONFIDENCES']:
AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)
methodRecall = 0 if numGlobalCareGt == 0 else methodRecallSum/numGlobalCareGt
methodPrecision = 0 if numGlobalCareDet == 0 else methodPrecisionSum/numGlobalCareDet
methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
methodMetrics = {'recall':methodRecall, 'precision':methodPrecision, 'hmean':methodHmean, 'AP':AP }
resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}
return resDict;
if __name__=='__main__':
rrc_evaluation_funcs.main_evaluation(None, default_evaluation_params, validate_data, evaluate_method)
================================================
FILE: static/funcs.js
================================================
var getUrlParameter = function getUrlParameter(sParam) {
var sPageURL = decodeURIComponent(window.location.search.substring(1)),
sURLVariables = sPageURL.split('&'),
sParameterName,
i;
for (i = 0; i < sURLVariables.length; i++) {
sParameterName = sURLVariables[i].split('=');
if (sParameterName[0] === sParam) {
return sParameterName[1] === undefined ? true : sParameterName[1];
}
}
};
================================================
FILE: static/jquery-mousewheel.js
================================================
/**
*
* credits for this plugin go to brandonaaron.net
*
* unfortunately his site is down
*
* @param {Object} up
* @param {Object} down
* @param {Object} preventDefault
*/
jQuery.fn.extend({
mousewheel: function(up, down, preventDefault) {
return this.hover(
function() {
jQuery.event.mousewheel.giveFocus(this, up, down, preventDefault);
},
function() {
jQuery.event.mousewheel.removeFocus(this);
}
);
},
mousewheeldown: function(fn, preventDefault) {
return this.mousewheel(function(){}, fn, preventDefault);
},
mousewheelup: function(fn, preventDefault) {
return this.mousewheel(fn, function(){}, preventDefault);
},
unmousewheel: function() {
return this.each(function() {
jQuery(this).unmouseover().unmouseout();
jQuery.event.mousewheel.removeFocus(this);
});
},
unmousewheeldown: jQuery.fn.unmousewheel,
unmousewheelup: jQuery.fn.unmousewheel
});
jQuery.event.mousewheel = {
giveFocus: function(el, up, down, preventDefault) {
if (el._handleMousewheel) jQuery(el).unmousewheel();
if (preventDefault == window.undefined && down && down.constructor != Function) {
preventDefault = down;
down = null;
}
el._handleMousewheel = function(event) {
if (!event) event = window.event;
if (preventDefault)
if (event.preventDefault) event.preventDefault();
else event.returnValue = false;
var delta = 0;
if (event.wheelDelta) {
delta = event.wheelDelta/120;
if (window.opera) delta = -delta;
} else if (event.detail) {
delta = -event.detail/3;
}
if (up && (delta > 0 || !down))
up.apply(el, [event, delta]);
else if (down && delta < 0)
down.apply(el, [event, delta]);
};
if (window.addEventListener)
window.addEventListener('DOMMouseScroll', el._handleMousewheel, false);
window.onmousewheel = document.onmousewheel = el._handleMousewheel;
},
removeFocus: function(el) {
if (!el._handleMousewheel) return;
if (window.removeEventListener)
window.removeEventListener('DOMMouseScroll', el._handleMousewheel, false);
window.onmousewheel = document.onmousewheel = null;
el._handleMousewheel = null;
}
};
================================================
FILE: static/jquery.form-3.51.js
================================================
/*!
* jQuery Form Plugin
* version: 3.51.0-2014.06.20
* Requires jQuery v1.5 or later
* Copyright (c) 2014 M. Alsup
* Examples and documentation at: http://malsup.com/jquery/form/
* Project repository: https://github.com/malsup/form
* Dual licensed under the MIT and GPL licenses.
* https://github.com/malsup/form#copyright-and-license
*/
!function(e){"use strict";"function"==typeof define&&define.amd?define(["jquery"],e):e("undefined"!=typeof jQuery?jQuery:window.Zepto)}(function(e){"use strict";function t(t){var r=t.data;t.isDefaultPrevented()||(t.preventDefault(),e(t.target).ajaxSubmit(r))}function r(t){var r=t.target,a=e(r);if(!a.is("[type=submit],[type=image]")){var n=a.closest("[type=submit]");if(0===n.length)return;r=n[0]}var i=this;if(i.clk=r,"image"==r.type)if(void 0!==t.offsetX)i.clk_x=t.offsetX,i.clk_y=t.offsetY;else if("function"==typeof e.fn.offset){var o=a.offset();i.clk_x=t.pageX-o.left,i.clk_y=t.pageY-o.top}else i.clk_x=t.pageX-r.offsetLeft,i.clk_y=t.pageY-r.offsetTop;setTimeout(function(){i.clk=i.clk_x=i.clk_y=null},100)}function a(){if(e.fn.ajaxSubmit.debug){var t="[jquery.form] "+Array.prototype.join.call(arguments,"");window.console&&window.console.log?window.console.log(t):window.opera&&window.opera.postError&&window.opera.postError(t)}}var n={};n.fileapi=void 0!==e("").get(0).files,n.formdata=void 0!==window.FormData;var i=!!e.fn.prop;e.fn.attr2=function(){if(!i)return this.attr.apply(this,arguments);var e=this.prop.apply(this,arguments);return e&&e.jquery||"string"==typeof e?e:this.attr.apply(this,arguments)},e.fn.ajaxSubmit=function(t){function r(r){var a,n,i=e.param(r,t.traditional).split("&"),o=i.length,s=[];for(a=0;o>a;a++)i[a]=i[a].replace(/\+/g," "),n=i[a].split("="),s.push([decodeURIComponent(n[0]),decodeURIComponent(n[1])]);return s}function o(a){for(var n=new FormData,i=0;i').val(m.extraData[d].value).appendTo(w)[0]:e('').val(m.extraData[d]).appendTo(w)[0]);m.iframeTarget||v.appendTo("body"),g.attachEvent?g.attachEvent("onload",s):g.addEventListener("load",s,!1),setTimeout(t,15);try{w.submit()}catch(h){var x=document.createElement("form").submit;x.apply(w)}}finally{w.setAttribute("action",i),w.setAttribute("enctype",c),r?w.setAttribute("target",r):f.removeAttr("target"),e(l).remove()}}function s(t){if(!x.aborted&&!F){if(M=n(g),M||(a("cannot access response document"),t=k),t===D&&x)return x.abort("timeout"),void S.reject(x,"timeout");if(t==k&&x)return x.abort("server abort"),void S.reject(x,"error","server abort");if(M&&M.location.href!=m.iframeSrc||T){g.detachEvent?g.detachEvent("onload",s):g.removeEventListener("load",s,!1);var r,i="success";try{if(T)throw"timeout";var o="xml"==m.dataType||M.XMLDocument||e.isXMLDoc(M);if(a("isXml="+o),!o&&window.opera&&(null===M.body||!M.body.innerHTML)&&--O)return a("requeing onLoad callback, DOM not available"),void setTimeout(s,250);var u=M.body?M.body:M.documentElement;x.responseText=u?u.innerHTML:null,x.responseXML=M.XMLDocument?M.XMLDocument:M,o&&(m.dataType="xml"),x.getResponseHeader=function(e){var t={"content-type":m.dataType};return t[e.toLowerCase()]},u&&(x.status=Number(u.getAttribute("status"))||x.status,x.statusText=u.getAttribute("statusText")||x.statusText);var c=(m.dataType||"").toLowerCase(),l=/(json|script|text)/.test(c);if(l||m.textarea){var f=M.getElementsByTagName("textarea")[0];if(f)x.responseText=f.value,x.status=Number(f.getAttribute("status"))||x.status,x.statusText=f.getAttribute("statusText")||x.statusText;else if(l){var p=M.getElementsByTagName("pre")[0],h=M.getElementsByTagName("body")[0];p?x.responseText=p.textContent?p.textContent:p.innerText:h&&(x.responseText=h.textContent?h.textContent:h.innerText)}}else"xml"==c&&!x.responseXML&&x.responseText&&(x.responseXML=X(x.responseText));try{E=_(x,c,m)}catch(y){i="parsererror",x.error=r=y||i}}catch(y){a("error caught: ",y),i="error",x.error=r=y||i}x.aborted&&(a("upload aborted"),i=null),x.status&&(i=x.status>=200&&x.status<300||304===x.status?"success":"error"),"success"===i?(m.success&&m.success.call(m.context,E,"success",x),S.resolve(x.responseText,"success",x),d&&e.event.trigger("ajaxSuccess",[x,m])):i&&(void 0===r&&(r=x.statusText),m.error&&m.error.call(m.context,x,i,r),S.reject(x,"error",r),d&&e.event.trigger("ajaxError",[x,m,r])),d&&e.event.trigger("ajaxComplete",[x,m]),d&&!--e.active&&e.event.trigger("ajaxStop"),m.complete&&m.complete.call(m.context,x,i),F=!0,m.timeout&&clearTimeout(j),setTimeout(function(){m.iframeTarget?v.attr("src",m.iframeSrc):v.remove(),x.responseXML=null},100)}}}var c,l,m,d,p,v,g,x,y,b,T,j,w=f[0],S=e.Deferred();if(S.abort=function(e){x.abort(e)},r)for(l=0;l'),v.css({position:"absolute",top:"-1000px",left:"-1000px"})),g=v[0],x={aborted:0,responseText:null,responseXML:null,status:0,statusText:"n/a",getAllResponseHeaders:function(){},getResponseHeader:function(){},setRequestHeader:function(){},abort:function(t){var r="timeout"===t?"timeout":"aborted";a("aborting upload... "+r),this.aborted=1;try{g.contentWindow.document.execCommand&&g.contentWindow.document.execCommand("Stop")}catch(n){}v.attr("src",m.iframeSrc),x.error=r,m.error&&m.error.call(m.context,x,r,t),d&&e.event.trigger("ajaxError",[x,m,r]),m.complete&&m.complete.call(m.context,x,r)}},d=m.global,d&&0===e.active++&&e.event.trigger("ajaxStart"),d&&e.event.trigger("ajaxSend",[x,m]),m.beforeSend&&m.beforeSend.call(m.context,x,m)===!1)return m.global&&e.active--,S.reject(),S;if(x.aborted)return S.reject(),S;y=w.clk,y&&(b=y.name,b&&!y.disabled&&(m.extraData=m.extraData||{},m.extraData[b]=y.value,"image"==y.type&&(m.extraData[b+".x"]=w.clk_x,m.extraData[b+".y"]=w.clk_y)));var D=1,k=2,A=e("meta[name=csrf-token]").attr("content"),L=e("meta[name=csrf-param]").attr("content");L&&A&&(m.extraData=m.extraData||{},m.extraData[L]=A),m.forceSync?o():setTimeout(o,10);var E,M,F,O=50,X=e.parseXML||function(e,t){return window.ActiveXObject?(t=new ActiveXObject("Microsoft.XMLDOM"),t.async="false",t.loadXML(e)):t=(new DOMParser).parseFromString(e,"text/xml"),t&&t.documentElement&&"parsererror"!=t.documentElement.nodeName?t:null},C=e.parseJSON||function(e){return window.eval("("+e+")")},_=function(t,r,a){var n=t.getResponseHeader("content-type")||"",i="xml"===r||!r&&n.indexOf("xml")>=0,o=i?t.responseXML:t.responseText;return i&&"parsererror"===o.documentElement.nodeName&&e.error&&e.error("parsererror"),a&&a.dataFilter&&(o=a.dataFilter(o,r)),"string"==typeof o&&("json"===r||!r&&n.indexOf("json")>=0?o=C(o):("script"===r||!r&&n.indexOf("javascript")>=0)&&e.globalEval(o)),o};return S}if(!this.length)return a("ajaxSubmit: skipping submit process - no element selected"),this;var u,c,l,f=this;"function"==typeof t?t={success:t}:void 0===t&&(t={}),u=t.type||this.attr2("method"),c=t.url||this.attr2("action"),l="string"==typeof c?e.trim(c):"",l=l||window.location.href||"",l&&(l=(l.match(/^([^#]+)/)||[])[1]),t=e.extend(!0,{url:l,success:e.ajaxSettings.success,type:u||e.ajaxSettings.type,iframeSrc:/^https/i.test(window.location.href||"")?"javascript:false":"about:blank"},t);var m={};if(this.trigger("form-pre-serialize",[this,t,m]),m.veto)return a("ajaxSubmit: submit vetoed via form-pre-serialize trigger"),this;if(t.beforeSerialize&&t.beforeSerialize(this,t)===!1)return a("ajaxSubmit: submit aborted via beforeSerialize callback"),this;var d=t.traditional;void 0===d&&(d=e.ajaxSettings.traditional);var p,h=[],v=this.formToArray(t.semantic,h);if(t.data&&(t.extraData=t.data,p=e.param(t.data,d)),t.beforeSubmit&&t.beforeSubmit(v,this,t)===!1)return a("ajaxSubmit: submit aborted via beforeSubmit callback"),this;if(this.trigger("form-submit-validate",[v,this,t,m]),m.veto)return a("ajaxSubmit: submit vetoed via form-submit-validate trigger"),this;var g=e.param(v,d);p&&(g=g?g+"&"+p:p),"GET"==t.type.toUpperCase()?(t.url+=(t.url.indexOf("?")>=0?"&":"?")+g,t.data=null):t.data=g;var x=[];if(t.resetForm&&x.push(function(){f.resetForm()}),t.clearForm&&x.push(function(){f.clearForm(t.includeHidden)}),!t.dataType&&t.target){var y=t.success||function(){};x.push(function(r){var a=t.replaceTarget?"replaceWith":"html";e(t.target)[a](r).each(y,arguments)})}else t.success&&x.push(t.success);if(t.success=function(e,r,a){for(var n=t.context||this,i=0,o=x.length;o>i;i++)x[i].apply(n,[e,r,a||f,f])},t.error){var b=t.error;t.error=function(e,r,a){var n=t.context||this;b.apply(n,[e,r,a,f])}}if(t.complete){var T=t.complete;t.complete=function(e,r){var a=t.context||this;T.apply(a,[e,r,f])}}var j=e("input[type=file]:enabled",this).filter(function(){return""!==e(this).val()}),w=j.length>0,S="multipart/form-data",D=f.attr("enctype")==S||f.attr("encoding")==S,k=n.fileapi&&n.formdata;a("fileAPI :"+k);var A,L=(w||D)&&!k;t.iframe!==!1&&(t.iframe||L)?t.closeKeepAlive?e.get(t.closeKeepAlive,function(){A=s(v)}):A=s(v):A=(w||D)&&k?o(v):e.ajax(t),f.removeData("jqxhr").data("jqxhr",A);for(var E=0;Ec;c++)if(d=u[c],f=d.name,f&&!d.disabled)if(t&&o.clk&&"image"==d.type)o.clk==d&&(a.push({name:f,value:e(d).val(),type:d.type}),a.push({name:f+".x",value:o.clk_x},{name:f+".y",value:o.clk_y}));else if(m=e.fieldValue(d,!0),m&&m.constructor==Array)for(r&&r.push(d),l=0,h=m.length;h>l;l++)a.push({name:f,value:m[l]});else if(n.fileapi&&"file"==d.type){r&&r.push(d);var v=d.files;if(v.length)for(l=0;li;i++)r.push({name:a,value:n[i]});else null!==n&&"undefined"!=typeof n&&r.push({name:this.name,value:n})}}),e.param(r)},e.fn.fieldValue=function(t){for(var r=[],a=0,n=this.length;n>a;a++){var i=this[a],o=e.fieldValue(i,t);null===o||"undefined"==typeof o||o.constructor==Array&&!o.length||(o.constructor==Array?e.merge(r,o):r.push(o))}return r},e.fieldValue=function(t,r){var a=t.name,n=t.type,i=t.tagName.toLowerCase();if(void 0===r&&(r=!0),r&&(!a||t.disabled||"reset"==n||"button"==n||("checkbox"==n||"radio"==n)&&!t.checked||("submit"==n||"image"==n)&&t.form&&t.form.clk!=t||"select"==i&&-1==t.selectedIndex))return null;if("select"==i){var o=t.selectedIndex;if(0>o)return null;for(var s=[],u=t.options,c="select-one"==n,l=c?o+1:u.length,f=c?o:0;l>f;f++){var m=u[f];if(m.selected){var d=m.value;if(d||(d=m.attributes&&m.attributes.value&&!m.attributes.value.specified?m.text:m.value),c)return d;s.push(d)}}return s}return e(t).val()},e.fn.clearForm=function(t){return this.each(function(){e("input,select,textarea",this).clearFields(t)})},e.fn.clearFields=e.fn.clearInputs=function(t){var r=/^(?:color|date|datetime|email|month|number|password|range|search|tel|text|time|url|week)$/i;return this.each(function(){var a=this.type,n=this.tagName.toLowerCase();r.test(a)||"textarea"==n?this.value="":"checkbox"==a||"radio"==a?this.checked=!1:"select"==n?this.selectedIndex=-1:"file"==a?/MSIE/.test(navigator.userAgent)?e(this).replaceWith(e(this).clone(!0)):e(this).val(""):t&&(t===!0&&/hidden/.test(a)||"string"==typeof t&&e(this).is(t))&&(this.value="")})},e.fn.resetForm=function(){return this.each(function(){("function"==typeof this.reset||"object"==typeof this.reset&&!this.reset.nodeType)&&this.reset()})},e.fn.enable=function(e){return void 0===e&&(e=!0),this.each(function(){this.disabled=!e})},e.fn.selected=function(t){return void 0===t&&(t=!0),this.each(function(){var r=this.type;if("checkbox"==r||"radio"==r)this.checked=t;else if("option"==this.tagName.toLowerCase()){var a=e(this).parent("select");t&&a[0]&&"select-one"==a[0].type&&a.find("option").selected(!1),this.selected=t}})},e.fn.ajaxSubmit.debug=!1});
================================================
FILE: static/ranking.js
================================================
/* global web, google */
var ranking_task_graphic_options_default = {
height: 240,
fontSize:12,
animation:{'duration':0},
title:'',
backgroundColor:'transparent',
chartArea:{left:50,top:20,width:350,height:180},
width: 400,
focusTarget:'category',
legend: {position:'bottom'},
vAxis:{
format:'none',
textStyle:{fontSize: 8},
//title:'%',
titleTextStyle:{fontSize: 12,fontStyle:'bold'},
textPosition:'out'
},
hAxis:{
textStyle:{fontSize: 8}
}
};
function delete_methods(){
if(!confirm("Are you sure to delete all methods?")){
return;
}
var url = "/delete_all";
$.post(url, function (data) {
document.location.reload();
});
}
function delete_method(id){
if(!confirm("Are you sure to delete the method?")){
return;
}
var url = "/delete_method";
$.post(url,{"id":id} , function (data) {
document.location.reload();
});
}
function edit_method(id,el){
var current_name = $(el).closest("tr").find("span.title").text();
var name = prompt("Enter the method's name", current_name);
if (name != null) {
var url = "/edit_method";
$.post(url,{"id":id,"name":name} , function (data) {
document.location.reload();
});
}
}
function upload_subm(){
$("form").submit();
}
function wait_screen(msg){
$("body").append("
" + (msg!=undefined? "
" + msg + "
" : "") + "
");
}
function close_overlay(){
$("div.overlay").remove();
}
function show_error(msg){
if(!$("div.overlay").length){
$("body").append("");
}
$("div.overlay").html("
" + msg + "
");
}
function show_info(msg){
if(!$("div.overlay").length){
$("body").append("");
}
$("div.overlay").html("
% result = json.loads(results.read('method.json'))
% if result==None:
Submit your method
% elif result['calculated']==False:
The method has not been calculated
{{result['Message']}}
% else:
Method summary
Title: {{methodTitle}} [{{submitId}}]
Submit date: {{submitDate}}
<% for k,v in method_params.items():
colValue = result['method'][k]
if v['format'] == "perc" :
value = str(round(colValue*100,2)) + " %"
elif v['type'] == "double" :
value = str(round(colValue*100,2))
else:
value = colValue
end
%>
{{v['long_name']}}: {{value}}
% end
%num_pages = int(math.ceil(float(len(images)) / 20))
% if page>1:
< previous
% end
Page {{page}} of {{num_pages}}
% if pagenext >
% end
<%
for index, name in enumerate(images[(page-1)*20:page*20]):
sampleId = web.image_name_to_id(name)
values = json.loads(results.read( sampleId + '.json'))
sample = (page-1)*20+index+1
%>
<% for k,v in sample_params.items():
colValue = values[k]
if v['format'] == "perc" :
value = str(round(colValue*100,2)) + " %"
elif v['type'] == "double" :
value = str(round(colValue*100,2))
else:
value = colValue
end
%>
{{v['long_name']}}: {{value}}
% end
<%
end
results.close()
%>
% end
================================================
FILE: views/sample.tpl
================================================
{{title}}
{{title}}
% submitId, methodTitle, submitDate, methodResultJson = subm_data
% import math
% page = 1
% if int(sample)>1:
%page = (0 if sample % 20 == 0 else 1) + int(math.ceil(sample/20))
% end
<%
num_column = -1
num_column_order = -1
for k,v in sample_params.items():
num_column+=1
if v['order'] != "":
num_column_order = num_column
sort_order = v['order']
end
%>
{{v['long_name']}}
% end
<%
samplesData = []
for row in samplesValues:
sampleData = [row['id'],row['title']]
for k,v in sample_params.items():
sampleData.append(row[k])
end
samplesData.append(sampleData)
end
samplesData = sorted(samplesData, key=lambda sample: sample[num_column_order],reverse=sort_order=="desc")
for row in samplesData:
methodClass = "current" if row[0]==submitId else "other"
%>
{{row[1]}}
<% index = 2 #omit fields id,title
for k,v in sample_params.items():
colValue = row[index]
if v['format'] == "perc" :
value = str(round(colValue*100,2)) + " %"
elif v['type'] == "double" :
value = str(round(colValue*100,2))
else:
value = colValue
end
index = index+1
%>